Removed unused include statements
[gromacs.git] / src / gromacs / mdlib / nbnxn_ocl / nbnxn_ocl_data_mgmt.cpp
blob23566d8db8b379eef7a5f11093301c55e3fa0b0c
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief Define OpenCL implementation of nbnxn_gpu_data_mgmt.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
43 #include "gmxpre.h"
45 #include <assert.h>
46 #include <math.h>
47 #include <stdarg.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <string.h>
52 #include "gromacs/gpu_utils/gpu_utils.h"
53 #include "gromacs/gpu_utils/oclutils.h"
54 #include "gromacs/hardware/gpu_hw_info.h"
55 #include "gromacs/math/vectypes.h"
56 #include "gromacs/mdlib/force_flags.h"
57 #include "gromacs/mdlib/nb_verlet.h"
58 #include "gromacs/mdlib/nbnxn_consts.h"
59 #include "gromacs/mdlib/nbnxn_gpu.h"
60 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
61 #include "gromacs/mdlib/nbnxn_gpu_jit_support.h"
62 #include "gromacs/mdtypes/interaction_const.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/pbcutil/ishift.h"
65 #include "gromacs/timing/gpu_timing.h"
66 #include "gromacs/utility/cstringutil.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/real.h"
70 #include "gromacs/utility/smalloc.h"
72 #include "nbnxn_ocl_internal.h"
73 #include "nbnxn_ocl_types.h"
75 /*! \brief This parameter should be determined heuristically from the
76 * kernel execution times
78 * This value is best for small systems on a single AMD Radeon R9 290X
79 * (and about 5% faster than 40, which is the default for CUDA
80 * devices). Larger simulation systems were quite insensitive to the
81 * value of this parameter.
83 static unsigned int gpu_min_ci_balanced_factor = 50;
86 /*! \brief Returns true if LJ combination rules are used in the non-bonded kernels.
88 * Full doc in nbnxn_ocl_internal.h */
89 bool useLjCombRule(int vdwType)
91 return (vdwType == evdwOclCUTCOMBGEOM ||
92 vdwType == evdwOclCUTCOMBLB);
95 /*! \brief Free device buffers
97 * If the pointers to the size variables are NULL no resetting happens.
99 void ocl_free_buffered(cl_mem d_ptr, int *n, int *nalloc)
101 cl_int gmx_unused cl_error;
103 if (d_ptr)
105 cl_error = clReleaseMemObject(d_ptr);
106 assert(cl_error == CL_SUCCESS);
107 // TODO: handle errors
110 if (n)
112 *n = -1;
115 if (nalloc)
117 *nalloc = -1;
121 /*! \brief Reallocation device buffers
123 * Reallocation of the memory pointed by d_ptr and copying of the data from
124 * the location pointed by h_src host-side pointer is done. Allocation is
125 * buffered and therefore freeing is only needed if the previously allocated
126 * space is not enough.
127 * The H2D copy is launched in command queue s and can be done synchronously or
128 * asynchronously (the default is the latter).
129 * If copy_event is not NULL, on return it will contain an event object
130 * identifying the H2D copy. The event can further be used to queue a wait
131 * for this operation or to query profiling information.
132 * OpenCL equivalent of cu_realloc_buffered.
134 void ocl_realloc_buffered(cl_mem *d_dest, void *h_src,
135 size_t type_size,
136 int *curr_size, int *curr_alloc_size,
137 int req_size,
138 cl_context context,
139 cl_command_queue s,
140 bool bAsync = true,
141 cl_event *copy_event = NULL)
143 if (d_dest == NULL || req_size < 0)
145 return;
148 /* reallocate only if the data does not fit = allocation size is smaller
149 than the current requested size */
150 if (req_size > *curr_alloc_size)
152 cl_int gmx_unused cl_error;
154 /* only free if the array has already been initialized */
155 if (*curr_alloc_size >= 0)
157 ocl_free_buffered(*d_dest, curr_size, curr_alloc_size);
160 *curr_alloc_size = over_alloc_large(req_size);
162 *d_dest = clCreateBuffer(context, CL_MEM_READ_WRITE, *curr_alloc_size * type_size, NULL, &cl_error);
163 assert(cl_error == CL_SUCCESS);
164 // TODO: handle errors, check clCreateBuffer flags
167 /* size could have changed without actual reallocation */
168 *curr_size = req_size;
170 /* upload to device */
171 if (h_src)
173 if (bAsync)
175 ocl_copy_H2D_async(*d_dest, h_src, 0, *curr_size * type_size, s, copy_event);
177 else
179 ocl_copy_H2D(*d_dest, h_src, 0, *curr_size * type_size, s);
184 /*! \brief Releases the input OpenCL buffer */
185 static void free_ocl_buffer(cl_mem *buffer)
187 cl_int gmx_unused cl_error;
189 assert(NULL != buffer);
191 if (*buffer)
193 cl_error = clReleaseMemObject(*buffer);
194 assert(CL_SUCCESS == cl_error);
195 *buffer = NULL;
199 /*! \brief Tabulates the Ewald Coulomb force and initializes the size/scale
200 * and the table GPU array.
202 * If called with an already allocated table, it just re-uploads the
203 * table.
205 static void init_ewald_coulomb_force_table(const interaction_const_t *ic,
206 cl_nbparam_t *nbp,
207 const gmx_device_runtime_data_t *runData)
209 cl_mem coul_tab;
211 cl_int cl_error;
213 if (nbp->coulomb_tab_climg2d != NULL)
215 free_ocl_buffer(&(nbp->coulomb_tab_climg2d));
218 /* Switched from using textures to using buffers */
219 // TODO: decide which alternative is most efficient - textures or buffers.
221 cl_image_format array_format;
223 array_format.image_channel_data_type = CL_FLOAT;
224 array_format.image_channel_order = CL_R;
226 coul_tab = clCreateImage2D(runData->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
227 &array_format, tabsize, 1, 0, ftmp, &cl_error);
230 coul_tab = clCreateBuffer(runData->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, ic->tabq_size*sizeof(cl_float), ic->tabq_coul_F, &cl_error);
231 assert(cl_error == CL_SUCCESS);
232 // TODO: handle errors, check clCreateBuffer flags
234 nbp->coulomb_tab_climg2d = coul_tab;
235 nbp->coulomb_tab_size = ic->tabq_size;
236 nbp->coulomb_tab_scale = ic->tabq_scale;
240 /*! \brief Initializes the atomdata structure first time, it only gets filled at
241 pair-search.
243 static void init_atomdata_first(cl_atomdata_t *ad, int ntypes, gmx_device_runtime_data_t *runData)
245 cl_int cl_error;
247 ad->ntypes = ntypes;
249 /* An element of the shift_vec device buffer has the same size as one element
250 of the host side shift_vec buffer. */
251 ad->shift_vec_elem_size = sizeof(*(((nbnxn_atomdata_t*)0)->shift_vec));
253 // TODO: handle errors, check clCreateBuffer flags
254 ad->shift_vec = clCreateBuffer(runData->context, CL_MEM_READ_WRITE, SHIFTS * ad->shift_vec_elem_size, NULL, &cl_error);
255 assert(cl_error == CL_SUCCESS);
256 ad->bShiftVecUploaded = false;
258 /* An element of the fshift device buffer has the same size as one element
259 of the host side fshift buffer. */
260 ad->fshift_elem_size = sizeof(*(((cl_nb_staging_t*)0)->fshift));
262 ad->fshift = clCreateBuffer(runData->context, CL_MEM_READ_WRITE, SHIFTS * ad->fshift_elem_size, NULL, &cl_error);
263 assert(cl_error == CL_SUCCESS);
264 // TODO: handle errors, check clCreateBuffer flags
266 ad->e_lj = clCreateBuffer(runData->context, CL_MEM_READ_WRITE, sizeof(float), NULL, &cl_error);
267 assert(cl_error == CL_SUCCESS);
268 // TODO: handle errors, check clCreateBuffer flags
270 ad->e_el = clCreateBuffer(runData->context, CL_MEM_READ_WRITE, sizeof(float), NULL, &cl_error);
271 assert(cl_error == CL_SUCCESS);
272 // TODO: handle errors, check clCreateBuffer flags
274 /* initialize to NULL pointers to data that is not allocated here and will
275 need reallocation in nbnxn_gpu_init_atomdata */
276 ad->xq = NULL;
277 ad->f = NULL;
279 /* size -1 indicates that the respective array hasn't been initialized yet */
280 ad->natoms = -1;
281 ad->nalloc = -1;
284 /*! \brief Copies all parameters related to the cut-off from ic to nbp
286 static void set_cutoff_parameters(cl_nbparam_t *nbp,
287 const interaction_const_t *ic)
289 nbp->ewald_beta = ic->ewaldcoeff_q;
290 nbp->sh_ewald = ic->sh_ewald;
291 nbp->epsfac = ic->epsfac;
292 nbp->two_k_rf = 2.0 * ic->k_rf;
293 nbp->c_rf = ic->c_rf;
294 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
295 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
296 nbp->rlist_sq = ic->rlist * ic->rlist;
298 nbp->sh_lj_ewald = ic->sh_lj_ewald;
299 nbp->ewaldcoeff_lj = ic->ewaldcoeff_lj;
301 nbp->rvdw_switch = ic->rvdw_switch;
302 nbp->dispersion_shift = ic->dispersion_shift;
303 nbp->repulsion_shift = ic->repulsion_shift;
304 nbp->vdw_switch = ic->vdw_switch;
307 /*! \brief Returns the kinds of electrostatics and Vdw OpenCL
308 * kernels that will be used.
310 * Respectively, these values are from enum eelOcl and enum
311 * evdwOcl. */
312 static void
313 map_interaction_types_to_gpu_kernel_flavors(const interaction_const_t *ic,
314 int combRule,
315 int *gpu_eeltype,
316 int *gpu_vdwtype)
318 if (ic->vdwtype == evdwCUT)
320 switch (ic->vdw_modifier)
322 case eintmodNONE:
323 case eintmodPOTSHIFT:
324 switch (combRule)
326 case ljcrNONE:
327 *gpu_vdwtype = evdwOclCUT;
328 break;
329 case ljcrGEOM:
330 *gpu_vdwtype = evdwOclCUTCOMBGEOM;
331 break;
332 case ljcrLB:
333 *gpu_vdwtype = evdwOclCUTCOMBLB;
334 break;
335 default:
336 gmx_incons("The requested LJ combination rule is not implemented in the OpenCL GPU accelerated kernels!");
337 break;
339 break;
340 case eintmodFORCESWITCH:
341 *gpu_vdwtype = evdwOclFSWITCH;
342 break;
343 case eintmodPOTSWITCH:
344 *gpu_vdwtype = evdwOclPSWITCH;
345 break;
346 default:
347 gmx_incons("The requested VdW interaction modifier is not implemented in the GPU accelerated kernels!");
348 break;
351 else if (ic->vdwtype == evdwPME)
353 if (ic->ljpme_comb_rule == ljcrGEOM)
355 *gpu_vdwtype = evdwOclEWALDGEOM;
357 else
359 *gpu_vdwtype = evdwOclEWALDLB;
362 else
364 gmx_incons("The requested VdW type is not implemented in the GPU accelerated kernels!");
367 if (ic->eeltype == eelCUT)
369 *gpu_eeltype = eelOclCUT;
371 else if (EEL_RF(ic->eeltype))
373 *gpu_eeltype = eelOclRF;
375 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
377 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off. */
378 *gpu_eeltype = nbnxn_gpu_pick_ewald_kernel_type(false);
380 else
382 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
383 gmx_incons("The requested electrostatics type is not implemented in the GPU accelerated kernels!");
387 /*! \brief Initializes the nonbonded parameter data structure.
389 static void init_nbparam(cl_nbparam_t *nbp,
390 const interaction_const_t *ic,
391 const nbnxn_atomdata_t *nbat,
392 const gmx_device_runtime_data_t *runData)
394 int ntypes, nnbfp, nnbfp_comb;
395 cl_int cl_error;
398 ntypes = nbat->ntype;
400 set_cutoff_parameters(nbp, ic);
402 map_interaction_types_to_gpu_kernel_flavors(ic,
403 nbat->comb_rule,
404 &(nbp->eeltype),
405 &(nbp->vdwtype));
407 if (ic->vdwtype == evdwPME)
409 if (ic->ljpme_comb_rule == ljcrGEOM)
411 assert(nbat->comb_rule == ljcrGEOM);
413 else
415 assert(nbat->comb_rule == ljcrLB);
418 /* generate table for PME */
419 nbp->coulomb_tab_climg2d = NULL;
420 if (nbp->eeltype == eelOclEWALD_TAB || nbp->eeltype == eelOclEWALD_TAB_TWIN)
422 init_ewald_coulomb_force_table(ic, nbp, runData);
424 else
425 // TODO: improvement needed.
426 // The image2d is created here even if eeltype is not eelCuEWALD_TAB or eelCuEWALD_TAB_TWIN because the OpenCL kernels
427 // don't accept NULL values for image2D parameters.
429 /* Switched from using textures to using buffers */
430 // TODO: decide which alternative is most efficient - textures or buffers.
432 cl_image_format array_format;
434 array_format.image_channel_data_type = CL_FLOAT;
435 array_format.image_channel_order = CL_R;
437 nbp->coulomb_tab_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE,
438 &array_format, 1, 1, 0, NULL, &cl_error);
441 nbp->coulomb_tab_climg2d = clCreateBuffer(runData->context, CL_MEM_READ_ONLY, sizeof(cl_float), NULL, &cl_error);
442 // TODO: handle errors
445 nnbfp = 2*ntypes*ntypes;
446 nnbfp_comb = 2*ntypes;
449 /* Switched from using textures to using buffers */
450 // TODO: decide which alternative is most efficient - textures or buffers.
452 cl_image_format array_format;
454 array_format.image_channel_data_type = CL_FLOAT;
455 array_format.image_channel_order = CL_R;
457 nbp->nbfp_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
458 &array_format, nnbfp, 1, 0, nbat->nbfp, &cl_error);
461 nbp->nbfp_climg2d = clCreateBuffer(runData->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, nnbfp*sizeof(cl_float), nbat->nbfp, &cl_error);
462 assert(cl_error == CL_SUCCESS);
463 // TODO: handle errors
465 if (ic->vdwtype == evdwPME)
467 /* Switched from using textures to using buffers */
468 // TODO: decide which alternative is most efficient - textures or buffers.
469 /* nbp->nbfp_comb_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
470 &array_format, nnbfp_comb, 1, 0, nbat->nbfp_comb, &cl_error);*/
471 nbp->nbfp_comb_climg2d = clCreateBuffer(runData->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, nnbfp_comb*sizeof(cl_float), nbat->nbfp_comb, &cl_error);
474 assert(cl_error == CL_SUCCESS);
475 // TODO: handle errors
477 else
479 // TODO: improvement needed.
480 // The image2d is created here even if vdwtype is not evdwPME because the OpenCL kernels
481 // don't accept NULL values for image2D parameters.
482 /* Switched from using textures to using buffers */
483 // TODO: decide which alternative is most efficient - textures or buffers.
484 /* nbp->nbfp_comb_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE,
485 &array_format, 1, 1, 0, NULL, &cl_error);*/
486 nbp->nbfp_comb_climg2d = clCreateBuffer(runData->context, CL_MEM_READ_ONLY, sizeof(cl_float), NULL, &cl_error);
489 assert(cl_error == CL_SUCCESS);
490 // TODO: handle errors
495 //! This function is documented in the header file
496 void nbnxn_gpu_pme_loadbal_update_param(const nonbonded_verlet_t *nbv,
497 const interaction_const_t *ic)
499 if (!nbv || nbv->grp[0].kernel_type != nbnxnk8x8x8_GPU)
501 return;
503 gmx_nbnxn_ocl_t *nb = nbv->gpu_nbv;
504 cl_nbparam_t *nbp = nb->nbparam;
506 set_cutoff_parameters(nbp, ic);
508 nbp->eeltype = nbnxn_gpu_pick_ewald_kernel_type(ic->rcoulomb != ic->rvdw);
510 init_ewald_coulomb_force_table(ic, nb->nbparam, nb->dev_rundata);
513 /*! \brief Initializes the pair list data structure.
515 static void init_plist(cl_plist_t *pl)
517 /* initialize to NULL pointers to data that is not allocated here and will
518 need reallocation in nbnxn_gpu_init_pairlist */
519 pl->sci = NULL;
520 pl->cj4 = NULL;
521 pl->excl = NULL;
523 /* size -1 indicates that the respective array hasn't been initialized yet */
524 pl->na_c = -1;
525 pl->nsci = -1;
526 pl->sci_nalloc = -1;
527 pl->ncj4 = -1;
528 pl->cj4_nalloc = -1;
529 pl->nexcl = -1;
530 pl->excl_nalloc = -1;
531 pl->bDoPrune = false;
534 /*! \brief Initializes the timer data structure.
536 static void init_timers(cl_timers_t gmx_unused *t, bool gmx_unused bUseTwoStreams)
538 /* Nothing to initialize for OpenCL */
541 /*! \brief Initializes the timings data structure.
543 static void init_timings(gmx_wallclock_gpu_t *t)
545 int i, j;
547 t->nb_h2d_t = 0.0;
548 t->nb_d2h_t = 0.0;
549 t->nb_c = 0;
550 t->pl_h2d_t = 0.0;
551 t->pl_h2d_c = 0;
552 for (i = 0; i < 2; i++)
554 for (j = 0; j < 2; j++)
556 t->ktime[i][j].t = 0.0;
557 t->ktime[i][j].c = 0;
562 /*! \brief Creates context for OpenCL GPU given by \p mygpu
564 * A fatal error results if creation fails.
566 * \param[inout] runtimeData runtime data including program and context
567 * \param[in] devInfo device info struct
568 * \param[in] rank MPI rank (for error reporting)
570 static void
571 nbnxn_gpu_create_context(gmx_device_runtime_data_t *runtimeData,
572 const gmx_device_info_t *devInfo,
573 int rank)
575 cl_context_properties context_properties[3];
576 cl_platform_id platform_id;
577 cl_device_id device_id;
578 cl_context context;
579 cl_int cl_error;
581 assert(runtimeData != NULL);
582 assert(devInfo != NULL);
584 platform_id = devInfo->ocl_gpu_id.ocl_platform_id;
585 device_id = devInfo->ocl_gpu_id.ocl_device_id;
587 context_properties[0] = CL_CONTEXT_PLATFORM;
588 context_properties[1] = (cl_context_properties) platform_id;
589 context_properties[2] = 0; /* Terminates the list of properties */
591 context = clCreateContext(context_properties, 1, &device_id, NULL, NULL, &cl_error);
592 if (CL_SUCCESS != cl_error)
594 gmx_fatal(FARGS, "On rank %d failed to create context for GPU #%s:\n OpenCL error %d: %s",
595 rank,
596 devInfo->device_name,
597 cl_error, ocl_get_error_string(cl_error).c_str());
598 return;
601 runtimeData->context = context;
604 /*! \brief Initializes the OpenCL kernel pointers of the nbnxn_ocl_ptr_t input data structure. */
605 static cl_kernel nbnxn_gpu_create_kernel(gmx_nbnxn_ocl_t *nb,
606 const char *kernel_name)
608 cl_kernel kernel;
609 cl_int cl_error;
611 kernel = clCreateKernel(nb->dev_rundata->program, kernel_name, &cl_error);
612 if (CL_SUCCESS != cl_error)
614 gmx_fatal(FARGS, "Failed to create kernel '%s' for GPU #%s: OpenCL error %d",
615 kernel_name,
616 nb->dev_info->device_name,
617 cl_error);
620 return kernel;
623 /*! \brief Clears nonbonded shift force output array and energy outputs on the GPU.
625 static void
626 nbnxn_ocl_clear_e_fshift(gmx_nbnxn_ocl_t *nb)
629 cl_int cl_error;
630 cl_atomdata_t * adat = nb->atdat;
631 cl_command_queue ls = nb->stream[eintLocal];
633 size_t local_work_size[3] = {1, 1, 1};
634 size_t global_work_size[3] = {1, 1, 1};
636 cl_int shifts = SHIFTS*3;
638 cl_int arg_no;
640 cl_kernel zero_e_fshift = nb->kernel_zero_e_fshift;
642 local_work_size[0] = 64;
643 // Round the total number of threads up from the array size
644 global_work_size[0] = ((shifts + local_work_size[0] - 1)/local_work_size[0])*local_work_size[0];
646 arg_no = 0;
647 cl_error = clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->fshift));
648 cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->e_lj));
649 cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->e_el));
650 cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_uint), &shifts);
651 GMX_ASSERT(cl_error == CL_SUCCESS, ocl_get_error_string(cl_error).c_str());
653 cl_error = clEnqueueNDRangeKernel(ls, zero_e_fshift, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL);
654 GMX_ASSERT(cl_error == CL_SUCCESS, ocl_get_error_string(cl_error).c_str());
657 /*! \brief Initializes the OpenCL kernel pointers of the nbnxn_ocl_ptr_t input data structure. */
658 static void nbnxn_gpu_init_kernels(gmx_nbnxn_ocl_t *nb)
660 /* Init to 0 main kernel arrays */
661 /* They will be later on initialized in select_nbnxn_kernel */
662 memset(nb->kernel_ener_noprune_ptr, 0, sizeof(nb->kernel_ener_noprune_ptr));
663 memset(nb->kernel_ener_prune_ptr, 0, sizeof(nb->kernel_ener_prune_ptr));
664 memset(nb->kernel_noener_noprune_ptr, 0, sizeof(nb->kernel_noener_noprune_ptr));
665 memset(nb->kernel_noener_prune_ptr, 0, sizeof(nb->kernel_noener_prune_ptr));
667 /* Init auxiliary kernels */
668 nb->kernel_memset_f = nbnxn_gpu_create_kernel(nb, "memset_f");
669 nb->kernel_memset_f2 = nbnxn_gpu_create_kernel(nb, "memset_f2");
670 nb->kernel_memset_f3 = nbnxn_gpu_create_kernel(nb, "memset_f3");
671 nb->kernel_zero_e_fshift = nbnxn_gpu_create_kernel(nb, "zero_e_fshift");
674 /*! \brief Initializes simulation constant data.
676 * Initializes members of the atomdata and nbparam structs and
677 * clears e/fshift output buffers.
679 static void nbnxn_ocl_init_const(gmx_nbnxn_ocl_t *nb,
680 const interaction_const_t *ic,
681 const nonbonded_verlet_group_t *nbv_group)
683 init_atomdata_first(nb->atdat, nbv_group[0].nbat->ntype, nb->dev_rundata);
684 init_nbparam(nb->nbparam, ic, nbv_group[0].nbat, nb->dev_rundata);
688 //! This function is documented in the header file
689 void nbnxn_gpu_init(gmx_nbnxn_ocl_t **p_nb,
690 const gmx_gpu_info_t *gpu_info,
691 const gmx_gpu_opt_t *gpu_opt,
692 const interaction_const_t *ic,
693 nonbonded_verlet_group_t *nbv_grp,
694 int my_gpu_index,
695 int rank,
696 gmx_bool bLocalAndNonlocal)
698 gmx_nbnxn_ocl_t *nb;
699 cl_int cl_error;
700 cl_command_queue_properties queue_properties;
702 assert(gpu_info);
703 assert(gpu_opt);
704 assert(ic);
706 if (p_nb == NULL)
708 return;
711 snew(nb, 1);
712 snew(nb->atdat, 1);
713 snew(nb->nbparam, 1);
714 snew(nb->plist[eintLocal], 1);
715 if (bLocalAndNonlocal)
717 snew(nb->plist[eintNonlocal], 1);
720 nb->bUseTwoStreams = bLocalAndNonlocal;
722 snew(nb->timers, 1);
723 snew(nb->timings, 1);
725 /* set device info, just point it to the right GPU among the detected ones */
726 nb->dev_info = gpu_info->gpu_dev + gpu_opt->dev_use[my_gpu_index];
727 snew(nb->dev_rundata, 1);
729 /* init to NULL the debug buffer */
730 nb->debug_buffer = NULL;
732 /* init nbst */
733 ocl_pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
734 ocl_pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
735 ocl_pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
737 init_plist(nb->plist[eintLocal]);
739 /* OpenCL timing disabled if GMX_DISABLE_OCL_TIMING is defined. */
740 /* TODO deprecate the first env var in the 2017 release. */
741 nb->bDoTime = (getenv("GMX_DISABLE_OCL_TIMING") == NULL &&
742 getenv("GMX_DISABLE_GPU_TIMING") == NULL);
744 /* Create queues only after bDoTime has been initialized */
745 if (nb->bDoTime)
747 queue_properties = CL_QUEUE_PROFILING_ENABLE;
749 else
751 queue_properties = 0;
754 nbnxn_gpu_create_context(nb->dev_rundata, nb->dev_info, rank);
756 /* local/non-local GPU streams */
757 nb->stream[eintLocal] = clCreateCommandQueue(nb->dev_rundata->context, nb->dev_info->ocl_gpu_id.ocl_device_id, queue_properties, &cl_error);
758 if (CL_SUCCESS != cl_error)
760 gmx_fatal(FARGS, "On rank %d failed to create context for GPU #%s: OpenCL error %d",
761 rank,
762 nb->dev_info->device_name,
763 cl_error);
764 return;
767 if (nb->bUseTwoStreams)
769 init_plist(nb->plist[eintNonlocal]);
771 nb->stream[eintNonlocal] = clCreateCommandQueue(nb->dev_rundata->context, nb->dev_info->ocl_gpu_id.ocl_device_id, queue_properties, &cl_error);
772 if (CL_SUCCESS != cl_error)
774 gmx_fatal(FARGS, "On rank %d failed to create context for GPU #%s: OpenCL error %d",
775 rank,
776 nb->dev_info->device_name,
777 cl_error);
778 return;
782 if (nb->bDoTime)
784 init_timers(nb->timers, nb->bUseTwoStreams);
785 init_timings(nb->timings);
788 nbnxn_ocl_init_const(nb, ic, nbv_grp);
790 /* Enable LJ param manual prefetch for AMD or if we request through env. var.
791 * TODO: decide about NVIDIA
793 nb->bPrefetchLjParam =
794 (getenv("GMX_OCL_DISABLE_I_PREFETCH") == NULL) &&
795 ((nb->dev_info->vendor_e == OCL_VENDOR_AMD) || (getenv("GMX_OCL_ENABLE_I_PREFETCH") != NULL));
797 /* NOTE: in CUDA we pick L1 cache configuration for the nbnxn kernels here,
798 * but sadly this is not supported in OpenCL (yet?). Consider adding it if
799 * it becomes supported.
801 nbnxn_gpu_compile_kernels(nb);
802 nbnxn_gpu_init_kernels(nb);
804 /* clear energy and shift force outputs */
805 nbnxn_ocl_clear_e_fshift(nb);
807 *p_nb = nb;
809 if (debug)
811 fprintf(debug, "Initialized OpenCL data structures.\n");
815 /*! \brief Clears the first natoms_clear elements of the GPU nonbonded force output array.
817 static void nbnxn_ocl_clear_f(gmx_nbnxn_ocl_t *nb, int natoms_clear)
819 if (natoms_clear == 0)
821 return;
824 cl_int cl_error;
825 cl_atomdata_t * adat = nb->atdat;
826 cl_command_queue ls = nb->stream[eintLocal];
827 cl_float value = 0.0f;
829 size_t local_work_size[3] = {1, 1, 1};
830 size_t global_work_size[3] = {1, 1, 1};
832 cl_int arg_no;
834 cl_kernel memset_f = nb->kernel_memset_f;
836 cl_uint natoms_flat = natoms_clear * (sizeof(rvec)/sizeof(real));
838 local_work_size[0] = 64;
839 // Round the total number of threads up from the array size
840 global_work_size[0] = ((natoms_flat + local_work_size[0] - 1)/local_work_size[0])*local_work_size[0];
843 arg_no = 0;
844 cl_error = clSetKernelArg(memset_f, arg_no++, sizeof(cl_mem), &(adat->f));
845 cl_error |= clSetKernelArg(memset_f, arg_no++, sizeof(cl_float), &value);
846 cl_error |= clSetKernelArg(memset_f, arg_no++, sizeof(cl_uint), &natoms_flat);
847 assert(cl_error == CL_SUCCESS);
849 cl_error = clEnqueueNDRangeKernel(ls, memset_f, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL);
850 assert(cl_error == CL_SUCCESS);
853 //! This function is documented in the header file
854 void
855 nbnxn_gpu_clear_outputs(gmx_nbnxn_ocl_t *nb,
856 int flags)
858 nbnxn_ocl_clear_f(nb, nb->atdat->natoms);
859 /* clear shift force array and energies if the outputs were
860 used in the current step */
861 if (flags & GMX_FORCE_VIRIAL)
863 nbnxn_ocl_clear_e_fshift(nb);
866 /* kick off buffer clearing kernel to ensure concurrency with constraints/update */
867 cl_int gmx_unused cl_error;
868 cl_error = clFlush(nb->stream[eintLocal]);
869 assert(CL_SUCCESS == cl_error);
872 //! This function is documented in the header file
873 void nbnxn_gpu_init_pairlist(gmx_nbnxn_ocl_t *nb,
874 const nbnxn_pairlist_t *h_plist,
875 int iloc)
877 char sbuf[STRLEN];
878 cl_command_queue stream = nb->stream[iloc];
879 cl_plist_t *d_plist = nb->plist[iloc];
881 if (d_plist->na_c < 0)
883 d_plist->na_c = h_plist->na_ci;
885 else
887 if (d_plist->na_c != h_plist->na_ci)
889 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
890 d_plist->na_c, h_plist->na_ci);
891 gmx_incons(sbuf);
895 ocl_realloc_buffered(&d_plist->sci, h_plist->sci, sizeof(nbnxn_sci_t),
896 &d_plist->nsci, &d_plist->sci_nalloc,
897 h_plist->nsci,
898 nb->dev_rundata->context,
899 stream, true, &(nb->timers->pl_h2d_sci[iloc]));
901 ocl_realloc_buffered(&d_plist->cj4, h_plist->cj4, sizeof(nbnxn_cj4_t),
902 &d_plist->ncj4, &d_plist->cj4_nalloc,
903 h_plist->ncj4,
904 nb->dev_rundata->context,
905 stream, true, &(nb->timers->pl_h2d_cj4[iloc]));
907 ocl_realloc_buffered(&d_plist->excl, h_plist->excl, sizeof(nbnxn_excl_t),
908 &d_plist->nexcl, &d_plist->excl_nalloc,
909 h_plist->nexcl,
910 nb->dev_rundata->context,
911 stream, true, &(nb->timers->pl_h2d_excl[iloc]));
913 /* need to prune the pair list during the next step */
914 d_plist->bDoPrune = true;
917 //! This function is documented in the header file
918 void nbnxn_gpu_upload_shiftvec(gmx_nbnxn_ocl_t *nb,
919 const nbnxn_atomdata_t *nbatom)
921 cl_atomdata_t *adat = nb->atdat;
922 cl_command_queue ls = nb->stream[eintLocal];
924 /* only if we have a dynamic box */
925 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
927 ocl_copy_H2D_async(adat->shift_vec, nbatom->shift_vec, 0,
928 SHIFTS * adat->shift_vec_elem_size, ls, NULL);
929 adat->bShiftVecUploaded = true;
933 //! This function is documented in the header file
934 void nbnxn_gpu_init_atomdata(gmx_nbnxn_ocl_t *nb,
935 const struct nbnxn_atomdata_t *nbat)
937 cl_int cl_error;
938 int nalloc, natoms;
939 bool realloced;
940 bool bDoTime = nb->bDoTime;
941 cl_timers_t *timers = nb->timers;
942 cl_atomdata_t *d_atdat = nb->atdat;
943 cl_command_queue ls = nb->stream[eintLocal];
945 natoms = nbat->natoms;
946 realloced = false;
948 /* need to reallocate if we have to copy more atoms than the amount of space
949 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
950 if (natoms > d_atdat->nalloc)
952 nalloc = over_alloc_small(natoms);
954 /* free up first if the arrays have already been initialized */
955 if (d_atdat->nalloc != -1)
957 ocl_free_buffered(d_atdat->f, &d_atdat->natoms, &d_atdat->nalloc);
958 ocl_free_buffered(d_atdat->xq, NULL, NULL);
959 ocl_free_buffered(d_atdat->lj_comb, NULL, NULL);
960 ocl_free_buffered(d_atdat->atom_types, NULL, NULL);
963 d_atdat->f_elem_size = sizeof(rvec);
965 // TODO: handle errors, check clCreateBuffer flags
966 d_atdat->f = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE, nalloc * d_atdat->f_elem_size, NULL, &cl_error);
967 assert(CL_SUCCESS == cl_error);
969 // TODO: change the flag to read-only
970 d_atdat->xq = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE, nalloc * sizeof(cl_float4), NULL, &cl_error);
971 assert(CL_SUCCESS == cl_error);
972 // TODO: handle errors, check clCreateBuffer flags
974 if (useLjCombRule(nb->nbparam->vdwtype))
976 // TODO: change the flag to read-only
977 d_atdat->lj_comb = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE, nalloc * sizeof(cl_float2), NULL, &cl_error);
978 assert(CL_SUCCESS == cl_error);
979 // TODO: handle errors, check clCreateBuffer flags
981 else
983 // TODO: change the flag to read-only
984 d_atdat->atom_types = clCreateBuffer(nb->dev_rundata->context, CL_MEM_READ_WRITE, nalloc * sizeof(int), NULL, &cl_error);
985 assert(CL_SUCCESS == cl_error);
986 // TODO: handle errors, check clCreateBuffer flags
989 d_atdat->nalloc = nalloc;
990 realloced = true;
993 d_atdat->natoms = natoms;
994 d_atdat->natoms_local = nbat->natoms_local;
996 /* need to clear GPU f output if realloc happened */
997 if (realloced)
999 nbnxn_ocl_clear_f(nb, nalloc);
1002 if (useLjCombRule(nb->nbparam->vdwtype))
1004 ocl_copy_H2D_async(d_atdat->lj_comb, nbat->lj_comb, 0,
1005 natoms*sizeof(cl_float2), ls, bDoTime ? &(timers->atdat) : NULL);
1007 else
1009 ocl_copy_H2D_async(d_atdat->atom_types, nbat->type, 0,
1010 natoms*sizeof(int), ls, bDoTime ? &(timers->atdat) : NULL);
1014 /* kick off the tasks enqueued above to ensure concurrency with the search */
1015 cl_error = clFlush(ls);
1016 assert(CL_SUCCESS == cl_error);
1019 /*! \brief Releases an OpenCL kernel pointer */
1020 void free_kernel(cl_kernel *kernel_ptr)
1022 cl_int gmx_unused cl_error;
1024 assert(NULL != kernel_ptr);
1026 if (*kernel_ptr)
1028 cl_error = clReleaseKernel(*kernel_ptr);
1029 assert(cl_error == CL_SUCCESS);
1031 *kernel_ptr = NULL;
1035 /*! \brief Releases a list of OpenCL kernel pointers */
1036 void free_kernels(cl_kernel *kernels, int count)
1038 int i;
1040 for (i = 0; i < count; i++)
1042 free_kernel(kernels + i);
1046 /*! \brief Free the OpenCL runtime data (context and program).
1048 * The function releases the OpenCL context and program assuciated with the
1049 * device that the calling PP rank is running on.
1051 * \param runData [in] porinter to the structure with runtime data.
1053 static void free_gpu_device_runtime_data(gmx_device_runtime_data_t *runData)
1055 if (runData == NULL)
1057 return;
1060 cl_int gmx_unused cl_error;
1062 if (runData->context)
1064 cl_error = clReleaseContext(runData->context);
1065 runData->context = NULL;
1066 assert(CL_SUCCESS == cl_error);
1069 if (runData->program)
1071 cl_error = clReleaseProgram(runData->program);
1072 runData->program = NULL;
1073 assert(CL_SUCCESS == cl_error);
1078 //! This function is documented in the header file
1079 void nbnxn_gpu_free(gmx_nbnxn_ocl_t *nb)
1081 int kernel_count;
1083 /* Free kernels */
1084 kernel_count = sizeof(nb->kernel_ener_noprune_ptr) / sizeof(nb->kernel_ener_noprune_ptr[0][0]);
1085 free_kernels((cl_kernel*)nb->kernel_ener_noprune_ptr, kernel_count);
1087 kernel_count = sizeof(nb->kernel_ener_prune_ptr) / sizeof(nb->kernel_ener_prune_ptr[0][0]);
1088 free_kernels((cl_kernel*)nb->kernel_ener_prune_ptr, kernel_count);
1090 kernel_count = sizeof(nb->kernel_noener_noprune_ptr) / sizeof(nb->kernel_noener_noprune_ptr[0][0]);
1091 free_kernels((cl_kernel*)nb->kernel_noener_noprune_ptr, kernel_count);
1093 kernel_count = sizeof(nb->kernel_noener_prune_ptr) / sizeof(nb->kernel_noener_prune_ptr[0][0]);
1094 free_kernels((cl_kernel*)nb->kernel_noener_prune_ptr, kernel_count);
1096 free_kernel(&(nb->kernel_memset_f));
1097 free_kernel(&(nb->kernel_memset_f2));
1098 free_kernel(&(nb->kernel_memset_f3));
1099 free_kernel(&(nb->kernel_zero_e_fshift));
1101 /* Free atdat */
1102 free_ocl_buffer(&(nb->atdat->xq));
1103 free_ocl_buffer(&(nb->atdat->f));
1104 free_ocl_buffer(&(nb->atdat->e_lj));
1105 free_ocl_buffer(&(nb->atdat->e_el));
1106 free_ocl_buffer(&(nb->atdat->fshift));
1107 free_ocl_buffer(&(nb->atdat->lj_comb));
1108 free_ocl_buffer(&(nb->atdat->atom_types));
1109 free_ocl_buffer(&(nb->atdat->shift_vec));
1110 sfree(nb->atdat);
1112 /* Free nbparam */
1113 free_ocl_buffer(&(nb->nbparam->nbfp_climg2d));
1114 free_ocl_buffer(&(nb->nbparam->nbfp_comb_climg2d));
1115 free_ocl_buffer(&(nb->nbparam->coulomb_tab_climg2d));
1116 sfree(nb->nbparam);
1118 /* Free plist */
1119 free_ocl_buffer(&(nb->plist[eintLocal]->sci));
1120 free_ocl_buffer(&(nb->plist[eintLocal]->cj4));
1121 free_ocl_buffer(&(nb->plist[eintLocal]->excl));
1122 sfree(nb->plist[eintLocal]);
1123 if (nb->bUseTwoStreams)
1125 free_ocl_buffer(&(nb->plist[eintNonlocal]->sci));
1126 free_ocl_buffer(&(nb->plist[eintNonlocal]->cj4));
1127 free_ocl_buffer(&(nb->plist[eintNonlocal]->excl));
1128 sfree(nb->plist[eintNonlocal]);
1131 /* Free nbst */
1132 ocl_pfree(nb->nbst.e_lj);
1133 nb->nbst.e_lj = NULL;
1135 ocl_pfree(nb->nbst.e_el);
1136 nb->nbst.e_el = NULL;
1138 ocl_pfree(nb->nbst.fshift);
1139 nb->nbst.fshift = NULL;
1141 /* Free debug buffer */
1142 free_ocl_buffer(&nb->debug_buffer);
1144 /* Free command queues */
1145 clReleaseCommandQueue(nb->stream[eintLocal]);
1146 nb->stream[eintLocal] = NULL;
1147 if (nb->bUseTwoStreams)
1149 clReleaseCommandQueue(nb->stream[eintNonlocal]);
1150 nb->stream[eintNonlocal] = NULL;
1152 /* Free other events */
1153 if (nb->nonlocal_done)
1155 clReleaseEvent(nb->nonlocal_done);
1156 nb->nonlocal_done = NULL;
1158 if (nb->misc_ops_and_local_H2D_done)
1160 clReleaseEvent(nb->misc_ops_and_local_H2D_done);
1161 nb->misc_ops_and_local_H2D_done = NULL;
1164 free_gpu_device_runtime_data(nb->dev_rundata);
1165 sfree(nb->dev_rundata);
1167 /* Free timers and timings */
1168 sfree(nb->timers);
1169 sfree(nb->timings);
1170 sfree(nb);
1172 if (debug)
1174 fprintf(debug, "Cleaned up OpenCL data structures.\n");
1179 //! This function is documented in the header file
1180 gmx_wallclock_gpu_t * nbnxn_gpu_get_timings(gmx_nbnxn_ocl_t *nb)
1182 return (nb != NULL && nb->bDoTime) ? nb->timings : NULL;
1185 //! This function is documented in the header file
1186 void nbnxn_gpu_reset_timings(nonbonded_verlet_t* nbv)
1188 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
1190 init_timings(nbv->gpu_nbv->timings);
1194 //! This function is documented in the header file
1195 int nbnxn_gpu_min_ci_balanced(gmx_nbnxn_ocl_t *nb)
1197 return nb != NULL ?
1198 gpu_min_ci_balanced_factor * nb->dev_info->compute_units : 0;
1201 //! This function is documented in the header file
1202 gmx_bool nbnxn_gpu_is_kernel_ewald_analytical(const gmx_nbnxn_ocl_t *nb)
1204 return ((nb->nbparam->eeltype == eelOclEWALD_ANA) ||
1205 (nb->nbparam->eeltype == eelOclEWALD_ANA_TWIN));