2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/topology/block.h"
70 #include "gromacs/utility/exceptions.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/gmxomp.h"
73 #include "gromacs/utility/smalloc.h"
75 using namespace gmx
; // TODO: Remove when this file is moved into gmx namespace
78 /* We shift the i-particles backward for PBC.
79 * This leads to more conditionals than shifting forward.
80 * We do this to get more balanced pair lists.
82 constexpr bool c_pbcShiftBackward
= true;
85 static void nbs_cycle_clear(nbnxn_cycle_t
*cc
)
87 for (int i
= 0; i
< enbsCCnr
; i
++)
94 static double Mcyc_av(const nbnxn_cycle_t
*cc
)
96 return (double)cc
->c
*1e-6/cc
->count
;
99 static void nbs_cycle_print(FILE *fp
, const nbnxn_search_t nbs
)
102 fprintf(fp
, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
103 nbs
->cc
[enbsCCgrid
].count
,
104 Mcyc_av(&nbs
->cc
[enbsCCgrid
]),
105 Mcyc_av(&nbs
->cc
[enbsCCsearch
]),
106 Mcyc_av(&nbs
->cc
[enbsCCreducef
]));
108 if (nbs
->nthread_max
> 1)
110 if (nbs
->cc
[enbsCCcombine
].count
> 0)
112 fprintf(fp
, " comb %5.2f",
113 Mcyc_av(&nbs
->cc
[enbsCCcombine
]));
115 fprintf(fp
, " s. th");
116 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
118 fprintf(fp
, " %4.1f",
119 Mcyc_av(&nbs
->work
[t
].cc
[enbsCCsearch
]));
125 /* Layout for the nonbonded NxN pair lists */
126 enum class NbnxnLayout
128 NoSimd4x4
, // i-cluster size 4, j-cluster size 4
129 Simd4xN
, // i-cluster size 4, j-cluster size SIMD width
130 Simd2xNN
, // i-cluster size 4, j-cluster size half SIMD width
131 Gpu8x8x8
// i-cluster size 8, j-cluster size 8 + super-clustering
134 /* Returns the j-cluster size */
135 template <NbnxnLayout layout
>
136 static constexpr int jClusterSize()
139 static_assert(layout
== NbnxnLayout::NoSimd4x4
|| layout
== NbnxnLayout::Simd4xN
|| layout
== NbnxnLayout::Simd2xNN
, "Currently jClusterSize only supports CPU layouts");
141 return layout
== NbnxnLayout::Simd4xN
? GMX_SIMD_REAL_WIDTH
: (layout
== NbnxnLayout::Simd2xNN
? GMX_SIMD_REAL_WIDTH
/2 : NBNXN_CPU_CLUSTER_I_SIZE
);
143 static_assert(layout
== NbnxnLayout::NoSimd4x4
, "Currently without SIMD, jClusterSize only supports NoSimd4x4");
145 return NBNXN_CPU_CLUSTER_I_SIZE
;
149 /* Returns the j-cluster index given the i-cluster index */
150 template <int jClusterSize
>
151 static inline int cjFromCi(int ci
)
153 static_assert(jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
155 if (jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2)
159 else if (jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
)
169 /* Returns the j-cluster index given the i-cluster index */
170 template <NbnxnLayout layout
>
171 static inline int cjFromCi(int ci
)
173 constexpr int clusterSize
= jClusterSize
<layout
>();
175 return cjFromCi
<clusterSize
>(ci
);
178 /* Returns the nbnxn coordinate data index given the i-cluster index */
179 template <NbnxnLayout layout
>
180 static inline int xIndexFromCi(int ci
)
182 constexpr int clusterSize
= jClusterSize
<layout
>();
184 static_assert(clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
186 if (clusterSize
<= NBNXN_CPU_CLUSTER_I_SIZE
)
188 /* Coordinates are stored packed in groups of 4 */
193 /* Coordinates packed in 8, i-cluster size is half the packing width */
194 return (ci
>> 1)*STRIDE_P8
+ (ci
& 1)*(c_packX8
>> 1);
198 /* Returns the nbnxn coordinate data index given the j-cluster index */
199 template <NbnxnLayout layout
>
200 static inline int xIndexFromCj(int cj
)
202 constexpr int clusterSize
= jClusterSize
<layout
>();
204 static_assert(clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
206 if (clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2)
208 /* Coordinates are stored packed in groups of 4 */
209 return (cj
>> 1)*STRIDE_P4
+ (cj
& 1)*(c_packX4
>> 1);
211 else if (clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
)
213 /* Coordinates are stored packed in groups of 4 */
218 /* Coordinates are stored packed in groups of 8 */
223 gmx_bool
nbnxn_kernel_pairlist_simple(int nb_kernel_type
)
225 if (nb_kernel_type
== nbnxnkNotSet
)
227 gmx_fatal(FARGS
, "Non-bonded kernel type not set for Verlet-style pair-list.");
230 switch (nb_kernel_type
)
232 case nbnxnk8x8x8_GPU
:
233 case nbnxnk8x8x8_PlainC
:
236 case nbnxnk4x4_PlainC
:
237 case nbnxnk4xN_SIMD_4xN
:
238 case nbnxnk4xN_SIMD_2xNN
:
242 gmx_incons("Invalid nonbonded kernel type passed!");
247 /* Initializes a single nbnxn_pairlist_t data structure */
248 static void nbnxn_init_pairlist_fep(t_nblist
*nl
)
250 nl
->type
= GMX_NBLIST_INTERACTION_FREE_ENERGY
;
251 nl
->igeometry
= GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE
;
252 /* The interaction functions are set in the free energy kernel fuction */
265 nl
->jindex
= nullptr;
267 nl
->excl_fep
= nullptr;
271 void nbnxn_init_search(nbnxn_search_t
* nbs_ptr
,
273 struct gmx_domdec_zones_t
*zones
,
277 nbnxn_search_t nbs
= new nbnxn_search
;
283 nbs
->DomDec
= (n_dd_cells
!= nullptr);
285 clear_ivec(nbs
->dd_dim
);
291 for (int d
= 0; d
< DIM
; d
++)
293 if ((*n_dd_cells
)[d
] > 1)
296 /* Each grid matches a DD zone */
302 nbs
->grid
.resize(numGrids
);
304 nbs
->nthread_max
= nthread_max
;
306 /* Initialize the work data structures for each thread */
307 snew(nbs
->work
, nbs
->nthread_max
);
308 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
310 nbs
->work
[t
].cxy_na
= nullptr;
311 nbs
->work
[t
].cxy_na_nalloc
= 0;
312 nbs
->work
[t
].sort_work
= nullptr;
313 nbs
->work
[t
].sort_work_nalloc
= 0;
315 snew(nbs
->work
[t
].nbl_fep
, 1);
316 nbnxn_init_pairlist_fep(nbs
->work
[t
].nbl_fep
);
319 /* Initialize detailed nbsearch cycle counting */
320 nbs
->print_cycles
= (getenv("GMX_NBNXN_CYCLE") != nullptr);
321 nbs
->search_count
= 0;
322 nbs_cycle_clear(nbs
->cc
);
323 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
325 nbs_cycle_clear(nbs
->work
[t
].cc
);
329 static void init_buffer_flags(nbnxn_buffer_flags_t
*flags
,
332 flags
->nflag
= (natoms
+ NBNXN_BUFFERFLAG_SIZE
- 1)/NBNXN_BUFFERFLAG_SIZE
;
333 if (flags
->nflag
> flags
->flag_nalloc
)
335 flags
->flag_nalloc
= over_alloc_large(flags
->nflag
);
336 srenew(flags
->flag
, flags
->flag_nalloc
);
338 for (int b
= 0; b
< flags
->nflag
; b
++)
340 bitmask_clear(&(flags
->flag
[b
]));
344 /* Determines the cell range along one dimension that
345 * the bounding box b0 - b1 sees.
347 static void get_cell_range(real b0
, real b1
,
348 int nc
, real c0
, real s
, real invs
,
349 real d2
, real r2
, int *cf
, int *cl
)
351 *cf
= std::max(static_cast<int>((b0
- c0
)*invs
), 0);
353 while (*cf
> 0 && d2
+ gmx::square((b0
- c0
) - (*cf
-1+1)*s
) < r2
)
358 *cl
= std::min(static_cast<int>((b1
- c0
)*invs
), nc
-1);
359 while (*cl
< nc
-1 && d2
+ gmx::square((*cl
+1)*s
- (b1
- c0
)) < r2
)
365 /* Reference code calculating the distance^2 between two bounding boxes */
367 static float box_dist2(float bx0, float bx1, float by0,
368 float by1, float bz0, float bz1,
369 const nbnxn_bb_t *bb)
372 float dl, dh, dm, dm0;
376 dl = bx0 - bb->upper[BB_X];
377 dh = bb->lower[BB_X] - bx1;
378 dm = std::max(dl, dh);
379 dm0 = std::max(dm, 0.0f);
382 dl = by0 - bb->upper[BB_Y];
383 dh = bb->lower[BB_Y] - by1;
384 dm = std::max(dl, dh);
385 dm0 = std::max(dm, 0.0f);
388 dl = bz0 - bb->upper[BB_Z];
389 dh = bb->lower[BB_Z] - bz1;
390 dm = std::max(dl, dh);
391 dm0 = std::max(dm, 0.0f);
398 /* Plain C code calculating the distance^2 between two bounding boxes */
399 static float subc_bb_dist2(int si
,
400 const nbnxn_bb_t
*bb_i_ci
,
402 gmx::ArrayRef
<const nbnxn_bb_t
> bb_j_all
)
404 const nbnxn_bb_t
*bb_i
= bb_i_ci
+ si
;
405 const nbnxn_bb_t
*bb_j
= bb_j_all
.data() + csj
;
408 float dl
, dh
, dm
, dm0
;
410 dl
= bb_i
->lower
[BB_X
] - bb_j
->upper
[BB_X
];
411 dh
= bb_j
->lower
[BB_X
] - bb_i
->upper
[BB_X
];
412 dm
= std::max(dl
, dh
);
413 dm0
= std::max(dm
, 0.0f
);
416 dl
= bb_i
->lower
[BB_Y
] - bb_j
->upper
[BB_Y
];
417 dh
= bb_j
->lower
[BB_Y
] - bb_i
->upper
[BB_Y
];
418 dm
= std::max(dl
, dh
);
419 dm0
= std::max(dm
, 0.0f
);
422 dl
= bb_i
->lower
[BB_Z
] - bb_j
->upper
[BB_Z
];
423 dh
= bb_j
->lower
[BB_Z
] - bb_i
->upper
[BB_Z
];
424 dm
= std::max(dl
, dh
);
425 dm0
= std::max(dm
, 0.0f
);
431 #if NBNXN_SEARCH_BB_SIMD4
433 /* 4-wide SIMD code for bb distance for bb format xyz0 */
434 static float subc_bb_dist2_simd4(int si
,
435 const nbnxn_bb_t
*bb_i_ci
,
437 gmx::ArrayRef
<const nbnxn_bb_t
> bb_j_all
)
439 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
442 Simd4Float bb_i_S0
, bb_i_S1
;
443 Simd4Float bb_j_S0
, bb_j_S1
;
449 bb_i_S0
= load4(&bb_i_ci
[si
].lower
[0]);
450 bb_i_S1
= load4(&bb_i_ci
[si
].upper
[0]);
451 bb_j_S0
= load4(&bb_j_all
[csj
].lower
[0]);
452 bb_j_S1
= load4(&bb_j_all
[csj
].upper
[0]);
454 dl_S
= bb_i_S0
- bb_j_S1
;
455 dh_S
= bb_j_S0
- bb_i_S1
;
457 dm_S
= max(dl_S
, dh_S
);
458 dm0_S
= max(dm_S
, simd4SetZeroF());
460 return dotProduct(dm0_S
, dm0_S
);
463 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
464 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
468 Simd4Float dx_0, dy_0, dz_0; \
469 Simd4Float dx_1, dy_1, dz_1; \
471 Simd4Float mx, my, mz; \
472 Simd4Float m0x, m0y, m0z; \
474 Simd4Float d2x, d2y, d2z; \
475 Simd4Float d2s, d2t; \
477 shi = si*NNBSBB_D*DIM; \
479 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
480 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
481 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
482 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
483 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
484 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
486 dx_0 = xi_l - xj_h; \
487 dy_0 = yi_l - yj_h; \
488 dz_0 = zi_l - zj_h; \
490 dx_1 = xj_l - xi_h; \
491 dy_1 = yj_l - yi_h; \
492 dz_1 = zj_l - zi_h; \
494 mx = max(dx_0, dx_1); \
495 my = max(dy_0, dy_1); \
496 mz = max(dz_0, dz_1); \
498 m0x = max(mx, zero); \
499 m0y = max(my, zero); \
500 m0z = max(mz, zero); \
509 store4(d2+si, d2t); \
512 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
513 static void subc_bb_dist2_simd4_xxxx(const float *bb_j
,
514 int nsi
, const float *bb_i
,
517 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
520 Simd4Float xj_l
, yj_l
, zj_l
;
521 Simd4Float xj_h
, yj_h
, zj_h
;
522 Simd4Float xi_l
, yi_l
, zi_l
;
523 Simd4Float xi_h
, yi_h
, zi_h
;
529 xj_l
= Simd4Float(bb_j
[0*STRIDE_PBB
]);
530 yj_l
= Simd4Float(bb_j
[1*STRIDE_PBB
]);
531 zj_l
= Simd4Float(bb_j
[2*STRIDE_PBB
]);
532 xj_h
= Simd4Float(bb_j
[3*STRIDE_PBB
]);
533 yj_h
= Simd4Float(bb_j
[4*STRIDE_PBB
]);
534 zj_h
= Simd4Float(bb_j
[5*STRIDE_PBB
]);
536 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
537 * But as we know the number of iterations is 1 or 2, we unroll manually.
539 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i
, d2
);
540 if (STRIDE_PBB
< nsi
)
542 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB
, bb_i
, d2
);
546 #endif /* NBNXN_SEARCH_BB_SIMD4 */
549 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
550 static inline gmx_bool
551 clusterpair_in_range(const nbnxn_list_work_t
*work
,
553 int csj
, int stride
, const real
*x_j
,
556 #if !GMX_SIMD4_HAVE_REAL
559 * All coordinates are stored as xyzxyz...
562 const real
*x_i
= work
->x_ci
;
564 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
++)
566 int i0
= (si
*c_nbnxnGpuClusterSize
+ i
)*DIM
;
567 for (int j
= 0; j
< c_nbnxnGpuClusterSize
; j
++)
569 int j0
= (csj
*c_nbnxnGpuClusterSize
+ j
)*stride
;
571 real d2
= gmx::square(x_i
[i0
] - x_j
[j0
]) + gmx::square(x_i
[i0
+1] - x_j
[j0
+1]) + gmx::square(x_i
[i0
+2] - x_j
[j0
+2]);
582 #else /* !GMX_SIMD4_HAVE_REAL */
584 /* 4-wide SIMD version.
585 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
586 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
588 static_assert(c_nbnxnGpuClusterSize
== 8 || c_nbnxnGpuClusterSize
== 4,
589 "A cluster is hard-coded to 4/8 atoms.");
591 Simd4Real rc2_S
= Simd4Real(rlist2
);
593 const real
*x_i
= work
->x_ci_simd
;
595 int dim_stride
= c_nbnxnGpuClusterSize
*DIM
;
596 Simd4Real ix_S0
= load4(x_i
+ si
*dim_stride
+ 0*GMX_SIMD4_WIDTH
);
597 Simd4Real iy_S0
= load4(x_i
+ si
*dim_stride
+ 1*GMX_SIMD4_WIDTH
);
598 Simd4Real iz_S0
= load4(x_i
+ si
*dim_stride
+ 2*GMX_SIMD4_WIDTH
);
600 Simd4Real ix_S1
, iy_S1
, iz_S1
;
601 if (c_nbnxnGpuClusterSize
== 8)
603 ix_S1
= load4(x_i
+ si
*dim_stride
+ 3*GMX_SIMD4_WIDTH
);
604 iy_S1
= load4(x_i
+ si
*dim_stride
+ 4*GMX_SIMD4_WIDTH
);
605 iz_S1
= load4(x_i
+ si
*dim_stride
+ 5*GMX_SIMD4_WIDTH
);
607 /* We loop from the outer to the inner particles to maximize
608 * the chance that we find a pair in range quickly and return.
610 int j0
= csj
*c_nbnxnGpuClusterSize
;
611 int j1
= j0
+ c_nbnxnGpuClusterSize
- 1;
614 Simd4Real jx0_S
, jy0_S
, jz0_S
;
615 Simd4Real jx1_S
, jy1_S
, jz1_S
;
617 Simd4Real dx_S0
, dy_S0
, dz_S0
;
618 Simd4Real dx_S1
, dy_S1
, dz_S1
;
619 Simd4Real dx_S2
, dy_S2
, dz_S2
;
620 Simd4Real dx_S3
, dy_S3
, dz_S3
;
631 Simd4Bool wco_any_S01
, wco_any_S23
, wco_any_S
;
633 jx0_S
= Simd4Real(x_j
[j0
*stride
+0]);
634 jy0_S
= Simd4Real(x_j
[j0
*stride
+1]);
635 jz0_S
= Simd4Real(x_j
[j0
*stride
+2]);
637 jx1_S
= Simd4Real(x_j
[j1
*stride
+0]);
638 jy1_S
= Simd4Real(x_j
[j1
*stride
+1]);
639 jz1_S
= Simd4Real(x_j
[j1
*stride
+2]);
641 /* Calculate distance */
642 dx_S0
= ix_S0
- jx0_S
;
643 dy_S0
= iy_S0
- jy0_S
;
644 dz_S0
= iz_S0
- jz0_S
;
645 dx_S2
= ix_S0
- jx1_S
;
646 dy_S2
= iy_S0
- jy1_S
;
647 dz_S2
= iz_S0
- jz1_S
;
648 if (c_nbnxnGpuClusterSize
== 8)
650 dx_S1
= ix_S1
- jx0_S
;
651 dy_S1
= iy_S1
- jy0_S
;
652 dz_S1
= iz_S1
- jz0_S
;
653 dx_S3
= ix_S1
- jx1_S
;
654 dy_S3
= iy_S1
- jy1_S
;
655 dz_S3
= iz_S1
- jz1_S
;
658 /* rsq = dx*dx+dy*dy+dz*dz */
659 rsq_S0
= norm2(dx_S0
, dy_S0
, dz_S0
);
660 rsq_S2
= norm2(dx_S2
, dy_S2
, dz_S2
);
661 if (c_nbnxnGpuClusterSize
== 8)
663 rsq_S1
= norm2(dx_S1
, dy_S1
, dz_S1
);
664 rsq_S3
= norm2(dx_S3
, dy_S3
, dz_S3
);
667 wco_S0
= (rsq_S0
< rc2_S
);
668 wco_S2
= (rsq_S2
< rc2_S
);
669 if (c_nbnxnGpuClusterSize
== 8)
671 wco_S1
= (rsq_S1
< rc2_S
);
672 wco_S3
= (rsq_S3
< rc2_S
);
674 if (c_nbnxnGpuClusterSize
== 8)
676 wco_any_S01
= wco_S0
|| wco_S1
;
677 wco_any_S23
= wco_S2
|| wco_S3
;
678 wco_any_S
= wco_any_S01
|| wco_any_S23
;
682 wco_any_S
= wco_S0
|| wco_S2
;
685 if (anyTrue(wco_any_S
))
696 #endif /* !GMX_SIMD4_HAVE_REAL */
699 /* Returns the j-cluster index for index cjIndex in a cj list */
700 static inline int nblCj(const nbnxn_cj_t
*cjList
, int cjIndex
)
702 return cjList
[cjIndex
].cj
;
705 /* Returns the j-cluster index for index cjIndex in a cj4 list */
706 static inline int nblCj(const nbnxn_cj4_t
*cj4List
, int cjIndex
)
708 return cj4List
[cjIndex
/c_nbnxnGpuJgroupSize
].cj
[cjIndex
& (c_nbnxnGpuJgroupSize
- 1)];
711 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
712 static unsigned int nbl_imask0(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
714 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].imei
[0].imask
;
717 /* Ensures there is enough space for extra extra exclusion masks */
718 static void check_excl_space(nbnxn_pairlist_t
*nbl
, int extra
)
720 if (nbl
->nexcl
+extra
> nbl
->excl_nalloc
)
722 nbl
->excl_nalloc
= over_alloc_small(nbl
->nexcl
+extra
);
723 nbnxn_realloc_void((void **)&nbl
->excl
,
724 nbl
->nexcl
*sizeof(*nbl
->excl
),
725 nbl
->excl_nalloc
*sizeof(*nbl
->excl
),
726 nbl
->alloc
, nbl
->free
);
730 /* Ensures there is enough space for maxNumExtraClusters extra j-clusters in the list */
731 static void check_cell_list_space_simple(nbnxn_pairlist_t
*nbl
,
732 int maxNumExtraClusters
)
736 cj_max
= nbl
->ncj
+ maxNumExtraClusters
;
738 if (cj_max
> nbl
->cj_nalloc
)
740 nbl
->cj_nalloc
= over_alloc_small(cj_max
);
741 nbnxn_realloc_void((void **)&nbl
->cj
,
742 nbl
->ncj
*sizeof(*nbl
->cj
),
743 nbl
->cj_nalloc
*sizeof(*nbl
->cj
),
744 nbl
->alloc
, nbl
->free
);
746 nbnxn_realloc_void((void **)&nbl
->cjOuter
,
747 nbl
->ncj
*sizeof(*nbl
->cjOuter
),
748 nbl
->cj_nalloc
*sizeof(*nbl
->cjOuter
),
749 nbl
->alloc
, nbl
->free
);
753 /* Ensures there is enough space for ncell extra j-clusters in the list */
754 static void check_cell_list_space_supersub(nbnxn_pairlist_t
*nbl
,
759 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
760 /* We can store 4 j-subcell - i-supercell pairs in one struct.
761 * since we round down, we need one extra entry.
763 ncj4_max
= ((nbl
->work
->cj_ind
+ ncell
*c_gpuNumClusterPerCell
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
);
765 if (ncj4_max
> nbl
->cj4_nalloc
)
767 nbl
->cj4_nalloc
= over_alloc_small(ncj4_max
);
768 nbnxn_realloc_void((void **)&nbl
->cj4
,
769 nbl
->work
->cj4_init
*sizeof(*nbl
->cj4
),
770 nbl
->cj4_nalloc
*sizeof(*nbl
->cj4
),
771 nbl
->alloc
, nbl
->free
);
774 if (ncj4_max
> nbl
->work
->cj4_init
)
776 for (int j4
= nbl
->work
->cj4_init
; j4
< ncj4_max
; j4
++)
778 /* No i-subcells and no excl's in the list initially */
779 for (w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
781 nbl
->cj4
[j4
].imei
[w
].imask
= 0U;
782 nbl
->cj4
[j4
].imei
[w
].excl_ind
= 0;
786 nbl
->work
->cj4_init
= ncj4_max
;
790 /* Set all excl masks for one GPU warp no exclusions */
791 static void set_no_excls(nbnxn_excl_t
*excl
)
793 for (int t
= 0; t
< c_nbnxnGpuExclSize
; t
++)
795 /* Turn all interaction bits on */
796 excl
->pair
[t
] = NBNXN_INTERACTION_MASK_ALL
;
800 /* Initializes a single nbnxn_pairlist_t data structure */
801 static void nbnxn_init_pairlist(nbnxn_pairlist_t
*nbl
,
803 nbnxn_alloc_t
*alloc
,
806 if (alloc
== nullptr)
808 nbl
->alloc
= nbnxn_alloc_aligned
;
816 nbl
->free
= nbnxn_free_aligned
;
823 nbl
->bSimple
= bSimple
;
838 /* We need one element extra in sj, so alloc initially with 1 */
845 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster
== c_gpuNumClusterPerCell
, "The search code assumes that the a super-cluster matches a search grid cell");
847 GMX_ASSERT(sizeof(nbl
->cj4
[0].imei
[0].imask
)*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
848 GMX_ASSERT(sizeof(nbl
->excl
[0])*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The GPU exclusion mask does not contain a sufficient number of bits");
851 nbl
->excl_nalloc
= 0;
853 check_excl_space(nbl
, 1);
855 set_no_excls(&nbl
->excl
[0]);
861 snew_aligned(nbl
->work
->bb_ci
, 1, NBNXN_SEARCH_BB_MEM_ALIGN
);
866 snew_aligned(nbl
->work
->pbb_ci
, c_gpuNumClusterPerCell
/STRIDE_PBB
*NNBSBB_XXXX
, NBNXN_SEARCH_BB_MEM_ALIGN
);
868 snew_aligned(nbl
->work
->bb_ci
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
871 int gpu_clusterpair_nc
= c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
*DIM
;
872 snew(nbl
->work
->x_ci
, gpu_clusterpair_nc
);
874 snew_aligned(nbl
->work
->x_ci_simd
,
875 std::max(NBNXN_CPU_CLUSTER_I_SIZE
*DIM
*GMX_SIMD_REAL_WIDTH
,
877 GMX_SIMD_REAL_WIDTH
);
879 snew_aligned(nbl
->work
->d2
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
881 nbl
->work
->sort
= nullptr;
882 nbl
->work
->sort_nalloc
= 0;
883 nbl
->work
->sci_sort
= nullptr;
884 nbl
->work
->sci_sort_nalloc
= 0;
887 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t
*nbl_list
,
888 gmx_bool bSimple
, gmx_bool bCombined
,
889 nbnxn_alloc_t
*alloc
,
892 nbl_list
->bSimple
= bSimple
;
893 nbl_list
->bCombined
= bCombined
;
895 nbl_list
->nnbl
= gmx_omp_nthreads_get(emntNonbonded
);
897 if (!nbl_list
->bCombined
&&
898 nbl_list
->nnbl
> NBNXN_BUFFERFLAG_MAX_THREADS
)
900 gmx_fatal(FARGS
, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
901 nbl_list
->nnbl
, NBNXN_BUFFERFLAG_MAX_THREADS
, NBNXN_BUFFERFLAG_MAX_THREADS
);
904 snew(nbl_list
->nbl
, nbl_list
->nnbl
);
905 if (bSimple
&& nbl_list
->nnbl
> 1)
907 snew(nbl_list
->nbl_work
, nbl_list
->nnbl
);
909 snew(nbl_list
->nbl_fep
, nbl_list
->nnbl
);
910 /* Execute in order to avoid memory interleaving between threads */
911 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
912 for (int i
= 0; i
< nbl_list
->nnbl
; i
++)
916 /* Allocate the nblist data structure locally on each thread
917 * to optimize memory access for NUMA architectures.
919 snew(nbl_list
->nbl
[i
], 1);
921 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
922 if (!bSimple
&& i
== 0)
924 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, alloc
, free
);
928 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, nullptr, nullptr);
929 if (bSimple
&& nbl_list
->nnbl
> 1)
931 snew(nbl_list
->nbl_work
[i
], 1);
932 nbnxn_init_pairlist(nbl_list
->nbl_work
[i
], nbl_list
->bSimple
, nullptr, nullptr);
936 snew(nbl_list
->nbl_fep
[i
], 1);
937 nbnxn_init_pairlist_fep(nbl_list
->nbl_fep
[i
]);
939 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
943 /* Print statistics of a pair list, used for debug output */
944 static void print_nblist_statistics_simple(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
945 const nbnxn_search_t nbs
, real rl
)
947 const nbnxn_grid_t
*grid
;
951 grid
= &nbs
->grid
[0];
953 fprintf(fp
, "nbl nci %d ncj %d\n",
954 nbl
->nci
, nbl
->ncjInUse
);
955 fprintf(fp
, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
956 nbl
->na_sc
, rl
, nbl
->ncjInUse
, nbl
->ncjInUse
/(double)grid
->nc
,
957 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
,
958 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nc
*grid
->na_sc
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
960 fprintf(fp
, "nbl average j cell list length %.1f\n",
961 0.25*nbl
->ncjInUse
/(double)std::max(nbl
->nci
, 1));
963 for (int s
= 0; s
< SHIFTS
; s
++)
968 for (int i
= 0; i
< nbl
->nci
; i
++)
970 cs
[nbl
->ci
[i
].shift
& NBNXN_CI_SHIFT
] +=
971 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
;
973 int j
= nbl
->ci
[i
].cj_ind_start
;
974 while (j
< nbl
->ci
[i
].cj_ind_end
&&
975 nbl
->cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
981 fprintf(fp
, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
982 nbl
->ncj
, npexcl
, 100*npexcl
/(double)std::max(nbl
->ncj
, 1));
983 for (int s
= 0; s
< SHIFTS
; s
++)
987 fprintf(fp
, "nbl shift %2d ncj %3d\n", s
, cs
[s
]);
992 /* Print statistics of a pair lists, used for debug output */
993 static void print_nblist_statistics_supersub(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
994 const nbnxn_search_t nbs
, real rl
)
996 const nbnxn_grid_t
*grid
;
998 int c
[c_gpuNumClusterPerCell
+ 1];
999 double sum_nsp
, sum_nsp2
;
1002 /* This code only produces correct statistics with domain decomposition */
1003 grid
= &nbs
->grid
[0];
1005 fprintf(fp
, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
1006 nbl
->nsci
, nbl
->ncj4
, nbl
->nci_tot
, nbl
->nexcl
);
1007 fprintf(fp
, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
1008 nbl
->na_ci
, rl
, nbl
->nci_tot
, nbl
->nci_tot
/(double)grid
->nsubc_tot
,
1009 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
,
1010 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nsubc_tot
*grid
->na_c
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
1015 for (int si
= 0; si
<= c_gpuNumClusterPerCell
; si
++)
1019 for (int i
= 0; i
< nbl
->nsci
; i
++)
1024 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
1026 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
1029 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
1031 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
1041 sum_nsp2
+= nsp
*nsp
;
1042 nsp_max
= std::max(nsp_max
, nsp
);
1046 sum_nsp
/= nbl
->nsci
;
1047 sum_nsp2
/= nbl
->nsci
;
1049 fprintf(fp
, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1050 sum_nsp
, std::sqrt(sum_nsp2
- sum_nsp
*sum_nsp
), nsp_max
);
1054 for (b
= 0; b
<= c_gpuNumClusterPerCell
; b
++)
1056 fprintf(fp
, "nbl j-list #i-subcell %d %7d %4.1f\n",
1058 100.0*c
[b
]/(double)(nbl
->ncj4
*c_nbnxnGpuJgroupSize
));
1063 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1064 static void low_get_nbl_exclusions(nbnxn_pairlist_t
*nbl
, int cj4
,
1065 int warp
, nbnxn_excl_t
**excl
)
1067 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1069 /* No exclusions set, make a new list entry */
1070 nbl
->cj4
[cj4
].imei
[warp
].excl_ind
= nbl
->nexcl
;
1072 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1073 set_no_excls(*excl
);
1077 /* We already have some exclusions, new ones can be added to the list */
1078 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1082 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1083 * generates a new element and allocates extra memory, if necessary.
1085 static void get_nbl_exclusions_1(nbnxn_pairlist_t
*nbl
, int cj4
,
1086 int warp
, nbnxn_excl_t
**excl
)
1088 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1090 /* We need to make a new list entry, check if we have space */
1091 check_excl_space(nbl
, 1);
1093 low_get_nbl_exclusions(nbl
, cj4
, warp
, excl
);
1096 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1097 * generates a new element and allocates extra memory, if necessary.
1099 static void get_nbl_exclusions_2(nbnxn_pairlist_t
*nbl
, int cj4
,
1100 nbnxn_excl_t
**excl_w0
,
1101 nbnxn_excl_t
**excl_w1
)
1103 /* Check for space we might need */
1104 check_excl_space(nbl
, 2);
1106 low_get_nbl_exclusions(nbl
, cj4
, 0, excl_w0
);
1107 low_get_nbl_exclusions(nbl
, cj4
, 1, excl_w1
);
1110 /* Sets the self exclusions i=j and pair exclusions i>j */
1111 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t
*nbl
,
1112 int cj4_ind
, int sj_offset
,
1113 int i_cluster_in_cell
)
1115 nbnxn_excl_t
*excl
[c_nbnxnGpuClusterpairSplit
];
1117 /* Here we only set the set self and double pair exclusions */
1119 static_assert(c_nbnxnGpuClusterpairSplit
== 2, "");
1121 get_nbl_exclusions_2(nbl
, cj4_ind
, &excl
[0], &excl
[1]);
1123 /* Only minor < major bits set */
1124 for (int ej
= 0; ej
< nbl
->na_ci
; ej
++)
1127 for (int ei
= ej
; ei
< nbl
->na_ci
; ei
++)
1129 excl
[w
]->pair
[(ej
& (c_nbnxnGpuJgroupSize
-1))*nbl
->na_ci
+ ei
] &=
1130 ~(1U << (sj_offset
*c_gpuNumClusterPerCell
+ i_cluster_in_cell
));
1135 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1136 static unsigned int get_imask(gmx_bool rdiag
, int ci
, int cj
)
1138 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1141 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1142 gmx_unused
static unsigned int get_imask_simd_j2(gmx_bool rdiag
, int ci
, int cj
)
1144 return (rdiag
&& ci
*2 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_0
:
1145 (rdiag
&& ci
*2+1 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_1
:
1146 NBNXN_INTERACTION_MASK_ALL
));
1149 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1150 gmx_unused
static unsigned int get_imask_simd_j4(gmx_bool rdiag
, int ci
, int cj
)
1152 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1155 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1156 gmx_unused
static unsigned int get_imask_simd_j8(gmx_bool rdiag
, int ci
, int cj
)
1158 return (rdiag
&& ci
== cj
*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
:
1159 (rdiag
&& ci
== cj
*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
:
1160 NBNXN_INTERACTION_MASK_ALL
));
1164 #if GMX_SIMD_REAL_WIDTH == 2
1165 #define get_imask_simd_4xn get_imask_simd_j2
1167 #if GMX_SIMD_REAL_WIDTH == 4
1168 #define get_imask_simd_4xn get_imask_simd_j4
1170 #if GMX_SIMD_REAL_WIDTH == 8
1171 #define get_imask_simd_4xn get_imask_simd_j8
1172 #define get_imask_simd_2xnn get_imask_simd_j4
1174 #if GMX_SIMD_REAL_WIDTH == 16
1175 #define get_imask_simd_2xnn get_imask_simd_j8
1179 /* Plain C code for checking and adding cluster-pairs to the list.
1181 * \param[in] gridj The j-grid
1182 * \param[in,out] nbl The pair-list to store the cluster pairs in
1183 * \param[in] icluster The index of the i-cluster
1184 * \param[in] jclusterFirst The first cluster in the j-range
1185 * \param[in] jclusterLast The last cluster in the j-range
1186 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1187 * \param[in] x_j Coordinates for the j-atom, in xyz format
1188 * \param[in] rlist2 The squared list cut-off
1189 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1190 * \param[in,out] numDistanceChecks The number of distance checks performed
1193 makeClusterListSimple(const nbnxn_grid_t
* gridj
,
1194 nbnxn_pairlist_t
* nbl
,
1198 bool excludeSubDiagonal
,
1199 const real
* gmx_restrict x_j
,
1202 int * gmx_restrict numDistanceChecks
)
1204 const nbnxn_bb_t
* gmx_restrict bb_ci
= nbl
->work
->bb_ci
;
1205 const real
* gmx_restrict x_ci
= nbl
->work
->x_ci
;
1210 while (!InRange
&& jclusterFirst
<= jclusterLast
)
1212 real d2
= subc_bb_dist2(0, bb_ci
, jclusterFirst
, gridj
->bb
);
1213 *numDistanceChecks
+= 2;
1215 /* Check if the distance is within the distance where
1216 * we use only the bounding box distance rbb,
1217 * or within the cut-off and there is at least one atom pair
1218 * within the cut-off.
1224 else if (d2
< rlist2
)
1226 int cjf_gl
= gridj
->cell0
+ jclusterFirst
;
1227 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1229 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1231 InRange
= InRange
||
1232 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1233 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1234 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rlist2
);
1237 *numDistanceChecks
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1250 while (!InRange
&& jclusterLast
> jclusterFirst
)
1252 real d2
= subc_bb_dist2(0, bb_ci
, jclusterLast
, gridj
->bb
);
1253 *numDistanceChecks
+= 2;
1255 /* Check if the distance is within the distance where
1256 * we use only the bounding box distance rbb,
1257 * or within the cut-off and there is at least one atom pair
1258 * within the cut-off.
1264 else if (d2
< rlist2
)
1266 int cjl_gl
= gridj
->cell0
+ jclusterLast
;
1267 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1269 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1271 InRange
= InRange
||
1272 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1273 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1274 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rlist2
);
1277 *numDistanceChecks
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1285 if (jclusterFirst
<= jclusterLast
)
1287 for (int jcluster
= jclusterFirst
; jcluster
<= jclusterLast
; jcluster
++)
1289 /* Store cj and the interaction mask */
1290 nbl
->cj
[nbl
->ncj
].cj
= gridj
->cell0
+ jcluster
;
1291 nbl
->cj
[nbl
->ncj
].excl
= get_imask(excludeSubDiagonal
, icluster
, jcluster
);
1294 /* Increase the closing index in i super-cell list */
1295 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
1299 #ifdef GMX_NBNXN_SIMD_4XN
1300 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1302 #ifdef GMX_NBNXN_SIMD_2XNN
1303 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1306 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1307 * Checks bounding box distances and possibly atom pair distances.
1309 static void make_cluster_list_supersub(const nbnxn_grid_t
*gridi
,
1310 const nbnxn_grid_t
*gridj
,
1311 nbnxn_pairlist_t
*nbl
,
1313 gmx_bool sci_equals_scj
,
1314 int stride
, const real
*x
,
1315 real rlist2
, float rbb2
,
1316 int *numDistanceChecks
)
1318 nbnxn_list_work_t
*work
= nbl
->work
;
1321 const float *pbb_ci
= work
->pbb_ci
;
1323 const nbnxn_bb_t
*bb_ci
= work
->bb_ci
;
1326 assert(c_nbnxnGpuClusterSize
== gridi
->na_c
);
1327 assert(c_nbnxnGpuClusterSize
== gridj
->na_c
);
1329 /* We generate the pairlist mainly based on bounding-box distances
1330 * and do atom pair distance based pruning on the GPU.
1331 * Only if a j-group contains a single cluster-pair, we try to prune
1332 * that pair based on atom distances on the CPU to avoid empty j-groups.
1334 #define PRUNE_LIST_CPU_ONE 1
1335 #define PRUNE_LIST_CPU_ALL 0
1337 #if PRUNE_LIST_CPU_ONE
1341 float *d2l
= work
->d2
;
1343 for (int subc
= 0; subc
< gridj
->nsubc
[scj
]; subc
++)
1345 int cj4_ind
= nbl
->work
->cj_ind
/c_nbnxnGpuJgroupSize
;
1346 int cj_offset
= nbl
->work
->cj_ind
- cj4_ind
*c_nbnxnGpuJgroupSize
;
1347 nbnxn_cj4_t
*cj4
= &nbl
->cj4
[cj4_ind
];
1349 int cj
= scj
*c_gpuNumClusterPerCell
+ subc
;
1351 int cj_gl
= gridj
->cell0
*c_gpuNumClusterPerCell
+ cj
;
1353 /* Initialize this j-subcell i-subcell list */
1354 cj4
->cj
[cj_offset
] = cj_gl
;
1363 ci1
= gridi
->nsubc
[sci
];
1367 /* Determine all ci1 bb distances in one call with SIMD4 */
1368 subc_bb_dist2_simd4_xxxx(gridj
->pbb
.data() + (cj
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
+ (cj
& (STRIDE_PBB
-1)),
1370 *numDistanceChecks
+= c_nbnxnGpuClusterSize
*2;
1374 unsigned int imask
= 0;
1375 /* We use a fixed upper-bound instead of ci1 to help optimization */
1376 for (int ci
= 0; ci
< c_gpuNumClusterPerCell
; ci
++)
1384 /* Determine the bb distance between ci and cj */
1385 d2l
[ci
] = subc_bb_dist2(ci
, bb_ci
, cj
, gridj
->bb
);
1386 *numDistanceChecks
+= 2;
1390 #if PRUNE_LIST_CPU_ALL
1391 /* Check if the distance is within the distance where
1392 * we use only the bounding box distance rbb,
1393 * or within the cut-off and there is at least one atom pair
1394 * within the cut-off. This check is very costly.
1396 *numDistanceChecks
+= c_nbnxnGpuClusterSize
*c_nbnxnGpuClusterSize
;
1399 clusterpair_in_range(work
, ci
, cj_gl
, stride
, x
, rlist2
)))
1401 /* Check if the distance between the two bounding boxes
1402 * in within the pair-list cut-off.
1407 /* Flag this i-subcell to be taken into account */
1408 imask
|= (1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci
));
1410 #if PRUNE_LIST_CPU_ONE
1418 #if PRUNE_LIST_CPU_ONE
1419 /* If we only found 1 pair, check if any atoms are actually
1420 * within the cut-off, so we could get rid of it.
1422 if (npair
== 1 && d2l
[ci_last
] >= rbb2
&&
1423 !clusterpair_in_range(work
, ci_last
, cj_gl
, stride
, x
, rlist2
))
1425 imask
&= ~(1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci_last
));
1432 /* We have a useful sj entry, close it now */
1434 /* Set the exclusions for the ci==sj entry.
1435 * Here we don't bother to check if this entry is actually flagged,
1436 * as it will nearly always be in the list.
1440 set_self_and_newton_excls_supersub(nbl
, cj4_ind
, cj_offset
, subc
);
1443 /* Copy the cluster interaction mask to the list */
1444 for (int w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
1446 cj4
->imei
[w
].imask
|= imask
;
1449 nbl
->work
->cj_ind
++;
1451 /* Keep the count */
1452 nbl
->nci_tot
+= npair
;
1454 /* Increase the closing index in i super-cell list */
1455 nbl
->sci
[nbl
->nsci
].cj4_ind_end
=
1456 (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
1461 /* Returns how many contiguous j-clusters we have starting in the i-list */
1462 template <typename CjListType
>
1463 static int numContiguousJClusters(const int cjIndexStart
,
1464 const int cjIndexEnd
,
1465 const CjListType
&cjList
)
1467 const int firstJCluster
= nblCj(cjList
, cjIndexStart
);
1469 int numContiguous
= 0;
1471 while (cjIndexStart
+ numContiguous
< cjIndexEnd
&&
1472 nblCj(cjList
, cjIndexStart
+ numContiguous
) == firstJCluster
+ numContiguous
)
1477 return numContiguous
;
1480 /* Helper struct for efficient searching for excluded atoms in a j-list */
1484 template <typename CjListType
>
1485 JListRanges(int cjIndexStart
,
1487 const CjListType
&cjList
);
1489 int cjIndexStart
; // The start index in the j-list
1490 int cjIndexEnd
; // The end index in the j-list
1491 int cjFirst
; // The j-cluster with index cjIndexStart
1492 int cjLast
; // The j-cluster with index cjIndexEnd-1
1493 int numDirect
; // Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1496 template <typename CjListType
>
1497 JListRanges::JListRanges(int cjIndexStart
,
1499 const CjListType
&cjList
) :
1500 cjIndexStart(cjIndexStart
),
1501 cjIndexEnd(cjIndexEnd
)
1503 GMX_ASSERT(cjIndexEnd
> cjIndexStart
, "JListRanges should only be called with non-empty lists");
1505 cjFirst
= nblCj(cjList
, cjIndexStart
);
1506 cjLast
= nblCj(cjList
, cjIndexEnd
- 1);
1508 /* Determine how many contiguous j-cells we have starting
1509 * from the first i-cell. This number can be used to directly
1510 * calculate j-cell indices for excluded atoms.
1512 numDirect
= numContiguousJClusters(cjIndexStart
, cjIndexEnd
, cjList
);
1515 /* Return the index of \p jCluster in the given range or -1 when not present
1517 * Note: This code is executed very often and therefore performance is
1518 * important. It should be inlined and fully optimized.
1520 template <typename CjListType
>
1521 static inline int findJClusterInJList(int jCluster
,
1522 const JListRanges
&ranges
,
1523 const CjListType
&cjList
)
1527 if (jCluster
< ranges
.cjFirst
+ ranges
.numDirect
)
1529 /* We can calculate the index directly using the offset */
1530 index
= ranges
.cjIndexStart
+ jCluster
- ranges
.cjFirst
;
1534 /* Search for jCluster using bisection */
1536 int rangeStart
= ranges
.cjIndexStart
+ ranges
.numDirect
;
1537 int rangeEnd
= ranges
.cjIndexEnd
;
1539 while (index
== -1 && rangeStart
< rangeEnd
)
1541 rangeMiddle
= (rangeStart
+ rangeEnd
) >> 1;
1543 const int clusterMiddle
= nblCj(cjList
, rangeMiddle
);
1545 if (jCluster
== clusterMiddle
)
1547 index
= rangeMiddle
;
1549 else if (jCluster
< clusterMiddle
)
1551 rangeEnd
= rangeMiddle
;
1555 rangeStart
= rangeMiddle
+ 1;
1563 /* Set all atom-pair exclusions for a simple type list i-entry
1565 * Set all atom-pair exclusions from the topology stored in exclusions
1566 * as masks in the pair-list for simple list entry iEntry.
1569 setExclusionsForSimpleIentry(const nbnxn_search_t nbs
,
1570 nbnxn_pairlist_t
*nbl
,
1571 gmx_bool diagRemoved
,
1573 const nbnxn_ci_t
&iEntry
,
1574 const t_blocka
&exclusions
)
1576 if (iEntry
.cj_ind_end
== iEntry
.cj_ind_start
)
1578 /* Empty list: no exclusions */
1582 const JListRanges
ranges(iEntry
.cj_ind_start
, iEntry
.cj_ind_end
, nbl
->cj
);
1584 const int iCluster
= iEntry
.ci
;
1586 gmx::ArrayRef
<const int> cell
= nbs
->cell
;
1588 /* Loop over the atoms in the i-cluster */
1589 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1591 const int iIndex
= iCluster
*nbl
->na_sc
+ i
;
1592 const int iAtom
= nbs
->a
[iIndex
];
1595 /* Loop over the topology-based exclusions for this i-atom */
1596 for (int exclIndex
= exclusions
.index
[iAtom
]; exclIndex
< exclusions
.index
[iAtom
+ 1]; exclIndex
++)
1598 const int jAtom
= exclusions
.a
[exclIndex
];
1602 /* The self exclusion are already set, save some time */
1606 /* Get the index of the j-atom in the nbnxn atom data */
1607 const int jIndex
= cell
[jAtom
];
1609 /* Without shifts we only calculate interactions j>i
1610 * for one-way pair-lists.
1612 if (diagRemoved
&& jIndex
<= iIndex
)
1617 const int jCluster
= (jIndex
>> na_cj_2log
);
1619 /* Could the cluster se be in our list? */
1620 if (jCluster
>= ranges
.cjFirst
&& jCluster
<= ranges
.cjLast
)
1623 findJClusterInJList(jCluster
, ranges
, nbl
->cj
);
1627 /* We found an exclusion, clear the corresponding
1630 const int innerJ
= jIndex
- (jCluster
<< na_cj_2log
);
1632 nbl
->cj
[index
].excl
&= ~(1U << ((i
<< na_cj_2log
) + innerJ
));
1640 /* Add a new i-entry to the FEP list and copy the i-properties */
1641 static inline void fep_list_new_nri_copy(t_nblist
*nlist
)
1643 /* Add a new i-entry */
1646 assert(nlist
->nri
< nlist
->maxnri
);
1648 /* Duplicate the last i-entry, except for jindex, which continues */
1649 nlist
->iinr
[nlist
->nri
] = nlist
->iinr
[nlist
->nri
-1];
1650 nlist
->shift
[nlist
->nri
] = nlist
->shift
[nlist
->nri
-1];
1651 nlist
->gid
[nlist
->nri
] = nlist
->gid
[nlist
->nri
-1];
1652 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1655 /* For load balancing of the free-energy lists over threads, we set
1656 * the maximum nrj size of an i-entry to 40. This leads to good
1657 * load balancing in the worst case scenario of a single perturbed
1658 * particle on 16 threads, while not introducing significant overhead.
1659 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1660 * since non perturbed i-particles will see few perturbed j-particles).
1662 const int max_nrj_fep
= 40;
1664 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1665 * singularities for overlapping particles (0/0), since the charges and
1666 * LJ parameters have been zeroed in the nbnxn data structure.
1667 * Simultaneously make a group pair list for the perturbed pairs.
1669 static void make_fep_list(const nbnxn_search_t nbs
,
1670 const nbnxn_atomdata_t
*nbat
,
1671 nbnxn_pairlist_t
*nbl
,
1672 gmx_bool bDiagRemoved
,
1674 const nbnxn_grid_t
*gridi
,
1675 const nbnxn_grid_t
*gridj
,
1678 int ci
, cj_ind_start
, cj_ind_end
, cja
, cjr
;
1680 int ngid
, gid_i
= 0, gid_j
, gid
;
1681 int egp_shift
, egp_mask
;
1683 int ind_i
, ind_j
, ai
, aj
;
1685 gmx_bool bFEP_i
, bFEP_i_all
;
1687 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1695 cj_ind_start
= nbl_ci
->cj_ind_start
;
1696 cj_ind_end
= nbl_ci
->cj_ind_end
;
1698 /* In worst case we have alternating energy groups
1699 * and create #atom-pair lists, which means we need the size
1700 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1702 nri_max
= nbl
->na_ci
*nbl
->na_cj
*(cj_ind_end
- cj_ind_start
);
1703 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1705 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1706 reallocate_nblist(nlist
);
1709 ngid
= nbat
->nenergrp
;
1711 if (static_cast<std::size_t>(ngid
*gridj
->na_cj
) > sizeof(gid_cj
)*8)
1713 gmx_fatal(FARGS
, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1714 gridi
->na_c
, gridj
->na_cj
, (sizeof(gid_cj
)*8)/gridj
->na_cj
);
1717 egp_shift
= nbat
->neg_2log
;
1718 egp_mask
= (1<<nbat
->neg_2log
) - 1;
1720 /* Loop over the atoms in the i sub-cell */
1722 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1724 ind_i
= ci
*nbl
->na_ci
+ i
;
1729 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1730 nlist
->iinr
[nri
] = ai
;
1731 /* The actual energy group pair index is set later */
1732 nlist
->gid
[nri
] = 0;
1733 nlist
->shift
[nri
] = nbl_ci
->shift
& NBNXN_CI_SHIFT
;
1735 bFEP_i
= gridi
->fep
[ci
- gridi
->cell0
] & (1 << i
);
1737 bFEP_i_all
= bFEP_i_all
&& bFEP_i
;
1739 if (nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
> nlist
->maxnrj
)
1741 nlist
->maxnrj
= over_alloc_small(nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
);
1742 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1743 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1748 gid_i
= (nbat
->energrp
[ci
] >> (egp_shift
*i
)) & egp_mask
;
1751 for (int cj_ind
= cj_ind_start
; cj_ind
< cj_ind_end
; cj_ind
++)
1753 unsigned int fep_cj
;
1755 cja
= nbl
->cj
[cj_ind
].cj
;
1757 if (gridj
->na_cj
== gridj
->na_c
)
1759 cjr
= cja
- gridj
->cell0
;
1760 fep_cj
= gridj
->fep
[cjr
];
1763 gid_cj
= nbat
->energrp
[cja
];
1766 else if (2*gridj
->na_cj
== gridj
->na_c
)
1768 cjr
= cja
- gridj
->cell0
*2;
1769 /* Extract half of the ci fep/energrp mask */
1770 fep_cj
= (gridj
->fep
[cjr
>>1] >> ((cjr
&1)*gridj
->na_cj
)) & ((1<<gridj
->na_cj
) - 1);
1773 gid_cj
= nbat
->energrp
[cja
>>1] >> ((cja
&1)*gridj
->na_cj
*egp_shift
) & ((1<<(gridj
->na_cj
*egp_shift
)) - 1);
1778 cjr
= cja
- (gridj
->cell0
>>1);
1779 /* Combine two ci fep masks/energrp */
1780 fep_cj
= gridj
->fep
[cjr
*2] + (gridj
->fep
[cjr
*2+1] << gridj
->na_c
);
1783 gid_cj
= nbat
->energrp
[cja
*2] + (nbat
->energrp
[cja
*2+1] << (gridj
->na_c
*egp_shift
));
1787 if (bFEP_i
|| fep_cj
!= 0)
1789 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1791 /* Is this interaction perturbed and not excluded? */
1792 ind_j
= cja
*nbl
->na_cj
+ j
;
1795 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1796 (!bDiagRemoved
|| ind_j
>= ind_i
))
1800 gid_j
= (gid_cj
>> (j
*egp_shift
)) & egp_mask
;
1801 gid
= GID(gid_i
, gid_j
, ngid
);
1803 if (nlist
->nrj
> nlist
->jindex
[nri
] &&
1804 nlist
->gid
[nri
] != gid
)
1806 /* Energy group pair changed: new list */
1807 fep_list_new_nri_copy(nlist
);
1810 nlist
->gid
[nri
] = gid
;
1813 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1815 fep_list_new_nri_copy(nlist
);
1819 /* Add it to the FEP list */
1820 nlist
->jjnr
[nlist
->nrj
] = aj
;
1821 nlist
->excl_fep
[nlist
->nrj
] = (nbl
->cj
[cj_ind
].excl
>> (i
*nbl
->na_cj
+ j
)) & 1;
1824 /* Exclude it from the normal list.
1825 * Note that the charge has been set to zero,
1826 * but we need to avoid 0/0, as perturbed atoms
1827 * can be on top of each other.
1829 nbl
->cj
[cj_ind
].excl
&= ~(1U << (i
*nbl
->na_cj
+ j
));
1835 if (nlist
->nrj
> nlist
->jindex
[nri
])
1837 /* Actually add this new, non-empty, list */
1839 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1846 /* All interactions are perturbed, we can skip this entry */
1847 nbl_ci
->cj_ind_end
= cj_ind_start
;
1848 nbl
->ncjInUse
-= cj_ind_end
- cj_ind_start
;
1852 /* Return the index of atom a within a cluster */
1853 static inline int cj_mod_cj4(int cj
)
1855 return cj
& (c_nbnxnGpuJgroupSize
- 1);
1858 /* Convert a j-cluster to a cj4 group */
1859 static inline int cj_to_cj4(int cj
)
1861 return cj
/c_nbnxnGpuJgroupSize
;
1864 /* Return the index of an j-atom within a warp */
1865 static inline int a_mod_wj(int a
)
1867 return a
& (c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
- 1);
1870 /* As make_fep_list above, but for super/sub lists. */
1871 static void make_fep_list_supersub(const nbnxn_search_t nbs
,
1872 const nbnxn_atomdata_t
*nbat
,
1873 nbnxn_pairlist_t
*nbl
,
1874 gmx_bool bDiagRemoved
,
1875 const nbnxn_sci_t
*nbl_sci
,
1880 const nbnxn_grid_t
*gridi
,
1881 const nbnxn_grid_t
*gridj
,
1884 int sci
, cj4_ind_start
, cj4_ind_end
, cjr
;
1887 int ind_i
, ind_j
, ai
, aj
;
1891 const nbnxn_cj4_t
*cj4
;
1893 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1901 cj4_ind_start
= nbl_sci
->cj4_ind_start
;
1902 cj4_ind_end
= nbl_sci
->cj4_ind_end
;
1904 /* Here we process one super-cell, max #atoms na_sc, versus a list
1905 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1906 * of size na_cj atoms.
1907 * On the GPU we don't support energy groups (yet).
1908 * So for each of the na_sc i-atoms, we need max one FEP list
1909 * for each max_nrj_fep j-atoms.
1911 nri_max
= nbl
->na_sc
*nbl
->na_cj
*(1 + ((cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
)/max_nrj_fep
);
1912 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1914 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1915 reallocate_nblist(nlist
);
1918 /* Loop over the atoms in the i super-cluster */
1919 for (int c
= 0; c
< c_gpuNumClusterPerCell
; c
++)
1921 c_abs
= sci
*c_gpuNumClusterPerCell
+ c
;
1923 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1925 ind_i
= c_abs
*nbl
->na_ci
+ i
;
1930 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1931 nlist
->iinr
[nri
] = ai
;
1932 /* With GPUs, energy groups are not supported */
1933 nlist
->gid
[nri
] = 0;
1934 nlist
->shift
[nri
] = nbl_sci
->shift
& NBNXN_CI_SHIFT
;
1936 bFEP_i
= (gridi
->fep
[c_abs
- gridi
->cell0
*c_gpuNumClusterPerCell
] & (1 << i
));
1938 xi
= nbat
->x
[ind_i
*nbat
->xstride
+XX
] + shx
;
1939 yi
= nbat
->x
[ind_i
*nbat
->xstride
+YY
] + shy
;
1940 zi
= nbat
->x
[ind_i
*nbat
->xstride
+ZZ
] + shz
;
1942 if ((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
> nlist
->maxnrj
)
1944 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
);
1945 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1946 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1949 for (int cj4_ind
= cj4_ind_start
; cj4_ind
< cj4_ind_end
; cj4_ind
++)
1951 cj4
= &nbl
->cj4
[cj4_ind
];
1953 for (int gcj
= 0; gcj
< c_nbnxnGpuJgroupSize
; gcj
++)
1955 unsigned int fep_cj
;
1957 if ((cj4
->imei
[0].imask
& (1U << (gcj
*c_gpuNumClusterPerCell
+ c
))) == 0)
1959 /* Skip this ci for this cj */
1963 cjr
= cj4
->cj
[gcj
] - gridj
->cell0
*c_gpuNumClusterPerCell
;
1965 fep_cj
= gridj
->fep
[cjr
];
1967 if (bFEP_i
|| fep_cj
!= 0)
1969 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1971 /* Is this interaction perturbed and not excluded? */
1972 ind_j
= (gridj
->cell0
*c_gpuNumClusterPerCell
+ cjr
)*nbl
->na_cj
+ j
;
1975 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1976 (!bDiagRemoved
|| ind_j
>= ind_i
))
1980 unsigned int excl_bit
;
1983 const int jHalf
= j
/(c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
);
1984 get_nbl_exclusions_1(nbl
, cj4_ind
, jHalf
, &excl
);
1986 excl_pair
= a_mod_wj(j
)*nbl
->na_ci
+ i
;
1987 excl_bit
= (1U << (gcj
*c_gpuNumClusterPerCell
+ c
));
1989 dx
= nbat
->x
[ind_j
*nbat
->xstride
+XX
] - xi
;
1990 dy
= nbat
->x
[ind_j
*nbat
->xstride
+YY
] - yi
;
1991 dz
= nbat
->x
[ind_j
*nbat
->xstride
+ZZ
] - zi
;
1993 /* The unpruned GPU list has more than 2/3
1994 * of the atom pairs beyond rlist. Using
1995 * this list will cause a lot of overhead
1996 * in the CPU FEP kernels, especially
1997 * relative to the fast GPU kernels.
1998 * So we prune the FEP list here.
2000 if (dx
*dx
+ dy
*dy
+ dz
*dz
< rlist_fep2
)
2002 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
2004 fep_list_new_nri_copy(nlist
);
2008 /* Add it to the FEP list */
2009 nlist
->jjnr
[nlist
->nrj
] = aj
;
2010 nlist
->excl_fep
[nlist
->nrj
] = (excl
->pair
[excl_pair
] & excl_bit
) ? 1 : 0;
2014 /* Exclude it from the normal list.
2015 * Note that the charge and LJ parameters have
2016 * been set to zero, but we need to avoid 0/0,
2017 * as perturbed atoms can be on top of each other.
2019 excl
->pair
[excl_pair
] &= ~excl_bit
;
2023 /* Note that we could mask out this pair in imask
2024 * if all i- and/or all j-particles are perturbed.
2025 * But since the perturbed pairs on the CPU will
2026 * take an order of magnitude more time, the GPU
2027 * will finish before the CPU and there is no gain.
2033 if (nlist
->nrj
> nlist
->jindex
[nri
])
2035 /* Actually add this new, non-empty, list */
2037 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
2044 /* Set all atom-pair exclusions for a GPU type list i-entry
2046 * Sets all atom-pair exclusions from the topology stored in exclusions
2047 * as masks in the pair-list for i-super-cluster list entry iEntry.
2050 setExclusionsForGpuIentry(const nbnxn_search_t nbs
,
2051 nbnxn_pairlist_t
*nbl
,
2052 gmx_bool diagRemoved
,
2053 const nbnxn_sci_t
&iEntry
,
2054 const t_blocka
&exclusions
)
2056 if (iEntry
.cj4_ind_end
== iEntry
.cj4_ind_start
)
2062 /* Set the search ranges using start and end j-cluster indices.
2063 * Note that here we can not use cj4_ind_end, since the last cj4
2064 * can be only partially filled, so we use cj_ind.
2066 const JListRanges
ranges(iEntry
.cj4_ind_start
*c_nbnxnGpuJgroupSize
,
2070 GMX_ASSERT(nbl
->na_ci
== c_nbnxnGpuClusterSize
, "na_ci should match the GPU cluster size");
2071 constexpr int c_clusterSize
= c_nbnxnGpuClusterSize
;
2072 constexpr int c_superClusterSize
= c_nbnxnGpuNumClusterPerSupercluster
*c_nbnxnGpuClusterSize
;
2074 const int iSuperCluster
= iEntry
.sci
;
2076 gmx::ArrayRef
<const int> cell
= nbs
->cell
;
2078 /* Loop over the atoms in the i super-cluster */
2079 for (int i
= 0; i
< c_superClusterSize
; i
++)
2081 const int iIndex
= iSuperCluster
*c_superClusterSize
+ i
;
2082 const int iAtom
= nbs
->a
[iIndex
];
2085 const int iCluster
= i
/c_clusterSize
;
2087 /* Loop over the topology-based exclusions for this i-atom */
2088 for (int exclIndex
= exclusions
.index
[iAtom
]; exclIndex
< exclusions
.index
[iAtom
+ 1]; exclIndex
++)
2090 const int jAtom
= exclusions
.a
[exclIndex
];
2094 /* The self exclusions are already set, save some time */
2098 /* Get the index of the j-atom in the nbnxn atom data */
2099 const int jIndex
= cell
[jAtom
];
2101 /* Without shifts we only calculate interactions j>i
2102 * for one-way pair-lists.
2104 /* NOTE: We would like to use iIndex on the right hand side,
2105 * but that makes this routine 25% slower with gcc6/7.
2106 * Even using c_superClusterSize makes it slower.
2107 * Either of these changes triggers peeling of the exclIndex
2108 * loop, which apparently leads to far less efficient code.
2110 if (diagRemoved
&& jIndex
<= iSuperCluster
*nbl
->na_sc
+ i
)
2115 const int jCluster
= jIndex
/c_clusterSize
;
2117 /* Check whether the cluster is in our list? */
2118 if (jCluster
>= ranges
.cjFirst
&& jCluster
<= ranges
.cjLast
)
2121 findJClusterInJList(jCluster
, ranges
, nbl
->cj4
);
2125 /* We found an exclusion, clear the corresponding
2128 const unsigned int pairMask
= (1U << (cj_mod_cj4(index
)*c_gpuNumClusterPerCell
+ iCluster
));
2129 /* Check if the i-cluster interacts with the j-cluster */
2130 if (nbl_imask0(nbl
, index
) & pairMask
)
2132 const int innerI
= (i
& (c_clusterSize
- 1));
2133 const int innerJ
= (jIndex
& (c_clusterSize
- 1));
2135 /* Determine which j-half (CUDA warp) we are in */
2136 const int jHalf
= innerJ
/(c_clusterSize
/c_nbnxnGpuClusterpairSplit
);
2138 nbnxn_excl_t
*interactionMask
;
2139 get_nbl_exclusions_1(nbl
, cj_to_cj4(index
), jHalf
, &interactionMask
);
2141 interactionMask
->pair
[a_mod_wj(innerJ
)*c_clusterSize
+ innerI
] &= ~pairMask
;
2150 /* Reallocate the simple ci list for at least n entries */
2151 static void nb_realloc_ci(nbnxn_pairlist_t
*nbl
, int n
)
2153 nbl
->ci_nalloc
= over_alloc_small(n
);
2154 nbnxn_realloc_void((void **)&nbl
->ci
,
2155 nbl
->nci
*sizeof(*nbl
->ci
),
2156 nbl
->ci_nalloc
*sizeof(*nbl
->ci
),
2157 nbl
->alloc
, nbl
->free
);
2159 nbnxn_realloc_void((void **)&nbl
->ciOuter
,
2160 nbl
->nci
*sizeof(*nbl
->ciOuter
),
2161 nbl
->ci_nalloc
*sizeof(*nbl
->ciOuter
),
2162 nbl
->alloc
, nbl
->free
);
2165 /* Reallocate the super-cell sci list for at least n entries */
2166 static void nb_realloc_sci(nbnxn_pairlist_t
*nbl
, int n
)
2168 nbl
->sci_nalloc
= over_alloc_small(n
);
2169 nbnxn_realloc_void((void **)&nbl
->sci
,
2170 nbl
->nsci
*sizeof(*nbl
->sci
),
2171 nbl
->sci_nalloc
*sizeof(*nbl
->sci
),
2172 nbl
->alloc
, nbl
->free
);
2175 /* Make a new ci entry at index nbl->nci */
2176 static void new_ci_entry(nbnxn_pairlist_t
*nbl
, int ci
, int shift
, int flags
)
2178 if (nbl
->nci
+ 1 > nbl
->ci_nalloc
)
2180 nb_realloc_ci(nbl
, nbl
->nci
+1);
2182 nbl
->ci
[nbl
->nci
].ci
= ci
;
2183 nbl
->ci
[nbl
->nci
].shift
= shift
;
2184 /* Store the interaction flags along with the shift */
2185 nbl
->ci
[nbl
->nci
].shift
|= flags
;
2186 nbl
->ci
[nbl
->nci
].cj_ind_start
= nbl
->ncj
;
2187 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
2190 /* Make a new sci entry at index nbl->nsci */
2191 static void new_sci_entry(nbnxn_pairlist_t
*nbl
, int sci
, int shift
)
2193 if (nbl
->nsci
+ 1 > nbl
->sci_nalloc
)
2195 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2197 nbl
->sci
[nbl
->nsci
].sci
= sci
;
2198 nbl
->sci
[nbl
->nsci
].shift
= shift
;
2199 nbl
->sci
[nbl
->nsci
].cj4_ind_start
= nbl
->ncj4
;
2200 nbl
->sci
[nbl
->nsci
].cj4_ind_end
= nbl
->ncj4
;
2203 /* Sort the simple j-list cj on exclusions.
2204 * Entries with exclusions will all be sorted to the beginning of the list.
2206 static void sort_cj_excl(nbnxn_cj_t
*cj
, int ncj
,
2207 nbnxn_list_work_t
*work
)
2211 if (ncj
> work
->cj_nalloc
)
2213 work
->cj_nalloc
= over_alloc_large(ncj
);
2214 srenew(work
->cj
, work
->cj_nalloc
);
2217 /* Make a list of the j-cells involving exclusions */
2219 for (int j
= 0; j
< ncj
; j
++)
2221 if (cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
2223 work
->cj
[jnew
++] = cj
[j
];
2226 /* Check if there are exclusions at all or not just the first entry */
2227 if (!((jnew
== 0) ||
2228 (jnew
== 1 && cj
[0].excl
!= NBNXN_INTERACTION_MASK_ALL
)))
2230 for (int j
= 0; j
< ncj
; j
++)
2232 if (cj
[j
].excl
== NBNXN_INTERACTION_MASK_ALL
)
2234 work
->cj
[jnew
++] = cj
[j
];
2237 for (int j
= 0; j
< ncj
; j
++)
2239 cj
[j
] = work
->cj
[j
];
2244 /* Close this simple list i entry */
2245 static void close_ci_entry_simple(nbnxn_pairlist_t
*nbl
)
2249 /* All content of the new ci entry have already been filled correctly,
2250 * we only need to increase the count here (for non empty lists).
2252 jlen
= nbl
->ci
[nbl
->nci
].cj_ind_end
- nbl
->ci
[nbl
->nci
].cj_ind_start
;
2255 sort_cj_excl(nbl
->cj
+nbl
->ci
[nbl
->nci
].cj_ind_start
, jlen
, nbl
->work
);
2257 /* The counts below are used for non-bonded pair/flop counts
2258 * and should therefore match the available kernel setups.
2260 if (!(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_COUL(0)))
2262 nbl
->work
->ncj_noq
+= jlen
;
2264 else if ((nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_HALF_LJ(0)) ||
2265 !(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_LJ(0)))
2267 nbl
->work
->ncj_hlj
+= jlen
;
2274 /* Split sci entry for load balancing on the GPU.
2275 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2276 * With progBal we generate progressively smaller lists, which improves
2277 * load balancing. As we only know the current count on our own thread,
2278 * we will need to estimate the current total amount of i-entries.
2279 * As the lists get concatenated later, this estimate depends
2280 * both on nthread and our own thread index.
2282 static void split_sci_entry(nbnxn_pairlist_t
*nbl
,
2284 gmx_bool progBal
, float nsp_tot_est
,
2285 int thread
, int nthread
)
2288 int cj4_start
, cj4_end
, j4len
;
2290 int nsp
, nsp_sci
, nsp_cj4
, nsp_cj4_e
, nsp_cj4_p
;
2296 /* Estimate the total numbers of ci's of the nblist combined
2297 * over all threads using the target number of ci's.
2299 nsp_est
= (nsp_tot_est
*thread
)/nthread
+ nbl
->nci_tot
;
2301 /* The first ci blocks should be larger, to avoid overhead.
2302 * The last ci blocks should be smaller, to improve load balancing.
2303 * The factor 3/2 makes the first block 3/2 times the target average
2304 * and ensures that the total number of blocks end up equal to
2305 * that of equally sized blocks of size nsp_target_av.
2307 nsp_max
= static_cast<int>(nsp_target_av
*(nsp_tot_est
*1.5/(nsp_est
+ nsp_tot_est
)));
2311 nsp_max
= nsp_target_av
;
2314 cj4_start
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_start
;
2315 cj4_end
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_end
;
2316 j4len
= cj4_end
- cj4_start
;
2318 if (j4len
> 1 && j4len
*c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
> nsp_max
)
2320 /* Remove the last ci entry and process the cj4's again */
2328 for (int cj4
= cj4_start
; cj4
< cj4_end
; cj4
++)
2330 nsp_cj4_p
= nsp_cj4
;
2331 /* Count the number of cluster pairs in this cj4 group */
2333 for (int p
= 0; p
< c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
; p
++)
2335 nsp_cj4
+= (nbl
->cj4
[cj4
].imei
[0].imask
>> p
) & 1;
2338 /* If adding the current cj4 with nsp_cj4 pairs get us further
2339 * away from our target nsp_max, split the list before this cj4.
2341 if (nsp
> 0 && nsp_max
- nsp
< nsp
+ nsp_cj4
- nsp_max
)
2343 /* Split the list at cj4 */
2344 nbl
->sci
[sci
].cj4_ind_end
= cj4
;
2345 /* Create a new sci entry */
2348 if (nbl
->nsci
+1 > nbl
->sci_nalloc
)
2350 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2352 nbl
->sci
[sci
].sci
= nbl
->sci
[nbl
->nsci
-1].sci
;
2353 nbl
->sci
[sci
].shift
= nbl
->sci
[nbl
->nsci
-1].shift
;
2354 nbl
->sci
[sci
].cj4_ind_start
= cj4
;
2356 nsp_cj4_e
= nsp_cj4_p
;
2362 /* Put the remaining cj4's in the last sci entry */
2363 nbl
->sci
[sci
].cj4_ind_end
= cj4_end
;
2365 /* Possibly balance out the last two sci's
2366 * by moving the last cj4 of the second last sci.
2368 if (nsp_sci
- nsp_cj4_e
>= nsp
+ nsp_cj4_e
)
2370 nbl
->sci
[sci
-1].cj4_ind_end
--;
2371 nbl
->sci
[sci
].cj4_ind_start
--;
2378 /* Clost this super/sub list i entry */
2379 static void close_ci_entry_supersub(nbnxn_pairlist_t
*nbl
,
2381 gmx_bool progBal
, float nsp_tot_est
,
2382 int thread
, int nthread
)
2384 /* All content of the new ci entry have already been filled correctly,
2385 * we only need to increase the count here (for non empty lists).
2387 int j4len
= nbl
->sci
[nbl
->nsci
].cj4_ind_end
- nbl
->sci
[nbl
->nsci
].cj4_ind_start
;
2390 /* We can only have complete blocks of 4 j-entries in a list,
2391 * so round the count up before closing.
2393 nbl
->ncj4
= (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
2394 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2400 /* Measure the size of the new entry and potentially split it */
2401 split_sci_entry(nbl
, nsp_max_av
, progBal
, nsp_tot_est
,
2407 /* Syncs the working array before adding another grid pair to the list */
2408 static void sync_work(nbnxn_pairlist_t
*nbl
)
2412 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2413 nbl
->work
->cj4_init
= nbl
->ncj4
;
2417 /* Clears an nbnxn_pairlist_t data structure */
2418 static void clear_pairlist(nbnxn_pairlist_t
*nbl
)
2429 nbl
->work
->ncj_noq
= 0;
2430 nbl
->work
->ncj_hlj
= 0;
2433 /* Clears a group scheme pair list */
2434 static void clear_pairlist_fep(t_nblist
*nl
)
2438 if (nl
->jindex
== nullptr)
2440 snew(nl
->jindex
, 1);
2445 /* Sets a simple list i-cell bounding box, including PBC shift */
2446 static inline void set_icell_bb_simple(gmx::ArrayRef
<const nbnxn_bb_t
> bb
,
2448 real shx
, real shy
, real shz
,
2451 bb_ci
->lower
[BB_X
] = bb
[ci
].lower
[BB_X
] + shx
;
2452 bb_ci
->lower
[BB_Y
] = bb
[ci
].lower
[BB_Y
] + shy
;
2453 bb_ci
->lower
[BB_Z
] = bb
[ci
].lower
[BB_Z
] + shz
;
2454 bb_ci
->upper
[BB_X
] = bb
[ci
].upper
[BB_X
] + shx
;
2455 bb_ci
->upper
[BB_Y
] = bb
[ci
].upper
[BB_Y
] + shy
;
2456 bb_ci
->upper
[BB_Z
] = bb
[ci
].upper
[BB_Z
] + shz
;
2460 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2461 static void set_icell_bbxxxx_supersub(gmx::ArrayRef
<const float> bb
,
2463 real shx
, real shy
, real shz
,
2466 int ia
= ci
*(c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
;
2467 for (int m
= 0; m
< (c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
; m
+= NNBSBB_XXXX
)
2469 for (int i
= 0; i
< STRIDE_PBB
; i
++)
2471 bb_ci
[m
+0*STRIDE_PBB
+i
] = bb
[ia
+m
+0*STRIDE_PBB
+i
] + shx
;
2472 bb_ci
[m
+1*STRIDE_PBB
+i
] = bb
[ia
+m
+1*STRIDE_PBB
+i
] + shy
;
2473 bb_ci
[m
+2*STRIDE_PBB
+i
] = bb
[ia
+m
+2*STRIDE_PBB
+i
] + shz
;
2474 bb_ci
[m
+3*STRIDE_PBB
+i
] = bb
[ia
+m
+3*STRIDE_PBB
+i
] + shx
;
2475 bb_ci
[m
+4*STRIDE_PBB
+i
] = bb
[ia
+m
+4*STRIDE_PBB
+i
] + shy
;
2476 bb_ci
[m
+5*STRIDE_PBB
+i
] = bb
[ia
+m
+5*STRIDE_PBB
+i
] + shz
;
2482 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2483 gmx_unused
static void set_icell_bb_supersub(gmx::ArrayRef
<const nbnxn_bb_t
> bb
,
2485 real shx
, real shy
, real shz
,
2488 for (int i
= 0; i
< c_gpuNumClusterPerCell
; i
++)
2490 set_icell_bb_simple(bb
, ci
*c_gpuNumClusterPerCell
+i
,
2496 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2497 static void icell_set_x_simple(int ci
,
2498 real shx
, real shy
, real shz
,
2499 int stride
, const real
*x
,
2500 nbnxn_list_work_t
*work
)
2502 int ia
= ci
*NBNXN_CPU_CLUSTER_I_SIZE
;
2504 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
; i
++)
2506 work
->x_ci
[i
*STRIDE_XYZ
+XX
] = x
[(ia
+i
)*stride
+XX
] + shx
;
2507 work
->x_ci
[i
*STRIDE_XYZ
+YY
] = x
[(ia
+i
)*stride
+YY
] + shy
;
2508 work
->x_ci
[i
*STRIDE_XYZ
+ZZ
] = x
[(ia
+i
)*stride
+ZZ
] + shz
;
2512 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2513 static void icell_set_x_supersub(int ci
,
2514 real shx
, real shy
, real shz
,
2515 int stride
, const real
*x
,
2516 nbnxn_list_work_t
*work
)
2518 #if !GMX_SIMD4_HAVE_REAL
2520 real
* x_ci
= work
->x_ci
;
2522 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
;
2523 for (int i
= 0; i
< c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
; i
++)
2525 x_ci
[i
*DIM
+ XX
] = x
[(ia
+i
)*stride
+ XX
] + shx
;
2526 x_ci
[i
*DIM
+ YY
] = x
[(ia
+i
)*stride
+ YY
] + shy
;
2527 x_ci
[i
*DIM
+ ZZ
] = x
[(ia
+i
)*stride
+ ZZ
] + shz
;
2530 #else /* !GMX_SIMD4_HAVE_REAL */
2532 real
* x_ci
= work
->x_ci_simd
;
2534 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2536 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
+= GMX_SIMD4_WIDTH
)
2538 int io
= si
*c_nbnxnGpuClusterSize
+ i
;
2539 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
+ io
;
2540 for (int j
= 0; j
< GMX_SIMD4_WIDTH
; j
++)
2542 x_ci
[io
*DIM
+ j
+ XX
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ XX
] + shx
;
2543 x_ci
[io
*DIM
+ j
+ YY
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ YY
] + shy
;
2544 x_ci
[io
*DIM
+ j
+ ZZ
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ ZZ
] + shz
;
2549 #endif /* !GMX_SIMD4_HAVE_REAL */
2552 static real
minimum_subgrid_size_xy(const nbnxn_grid_t
*grid
)
2556 return std::min(grid
->sx
, grid
->sy
);
2560 return std::min(grid
->sx
/c_gpuNumClusterPerCellX
,
2561 grid
->sy
/c_gpuNumClusterPerCellY
);
2565 static real
effective_buffer_1x1_vs_MxN(const nbnxn_grid_t
*gridi
,
2566 const nbnxn_grid_t
*gridj
)
2568 const real eff_1x1_buffer_fac_overest
= 0.1;
2570 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2571 * to be added to rlist (including buffer) used for MxN.
2572 * This is for converting an MxN list to a 1x1 list. This means we can't
2573 * use the normal buffer estimate, as we have an MxN list in which
2574 * some atom pairs beyond rlist are missing. We want to capture
2575 * the beneficial effect of buffering by extra pairs just outside rlist,
2576 * while removing the useless pairs that are further away from rlist.
2577 * (Also the buffer could have been set manually not using the estimate.)
2578 * This buffer size is an overestimate.
2579 * We add 10% of the smallest grid sub-cell dimensions.
2580 * Note that the z-size differs per cell and we don't use this,
2581 * so we overestimate.
2582 * With PME, the 10% value gives a buffer that is somewhat larger
2583 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2584 * Smaller tolerances or using RF lead to a smaller effective buffer,
2585 * so 10% gives a safe overestimate.
2587 return eff_1x1_buffer_fac_overest
*(minimum_subgrid_size_xy(gridi
) +
2588 minimum_subgrid_size_xy(gridj
));
2591 /* Clusters at the cut-off only increase rlist by 60% of their size */
2592 static real nbnxn_rlist_inc_outside_fac
= 0.6;
2594 /* Due to the cluster size the effective pair-list is longer than
2595 * that of a simple atom pair-list. This function gives the extra distance.
2597 real
nbnxn_get_rlist_effective_inc(int cluster_size_j
, real atom_density
)
2600 real vol_inc_i
, vol_inc_j
;
2602 /* We should get this from the setup, but currently it's the same for
2603 * all setups, including GPUs.
2605 cluster_size_i
= NBNXN_CPU_CLUSTER_I_SIZE
;
2607 vol_inc_i
= (cluster_size_i
- 1)/atom_density
;
2608 vol_inc_j
= (cluster_size_j
- 1)/atom_density
;
2610 return nbnxn_rlist_inc_outside_fac
*std::cbrt(vol_inc_i
+ vol_inc_j
);
2613 /* Estimates the interaction volume^2 for non-local interactions */
2614 static real
nonlocal_vol2(const struct gmx_domdec_zones_t
*zones
, rvec ls
, real r
)
2622 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2623 * not home interaction volume^2. As these volumes are not additive,
2624 * this is an overestimate, but it would only be significant in the limit
2625 * of small cells, where we anyhow need to split the lists into
2626 * as small parts as possible.
2629 for (int z
= 0; z
< zones
->n
; z
++)
2631 if (zones
->shift
[z
][XX
] + zones
->shift
[z
][YY
] + zones
->shift
[z
][ZZ
] == 1)
2636 for (int d
= 0; d
< DIM
; d
++)
2638 if (zones
->shift
[z
][d
] == 0)
2642 za
*= zones
->size
[z
].x1
[d
] - zones
->size
[z
].x0
[d
];
2646 /* 4 octants of a sphere */
2647 vold_est
= 0.25*M_PI
*r
*r
*r
*r
;
2648 /* 4 quarter pie slices on the edges */
2649 vold_est
+= 4*cl
*M_PI
/6.0*r
*r
*r
;
2650 /* One rectangular volume on a face */
2651 vold_est
+= ca
*0.5*r
*r
;
2653 vol2_est_tot
+= vold_est
*za
;
2657 return vol2_est_tot
;
2660 /* Estimates the average size of a full j-list for super/sub setup */
2661 static void get_nsubpair_target(const nbnxn_search_t nbs
,
2664 int min_ci_balanced
,
2665 int *nsubpair_target
,
2666 float *nsubpair_tot_est
)
2668 /* The target value of 36 seems to be the optimum for Kepler.
2669 * Maxwell is less sensitive to the exact value.
2671 const int nsubpair_target_min
= 36;
2672 const nbnxn_grid_t
*grid
;
2674 real r_eff_sup
, vol_est
, nsp_est
, nsp_est_nl
;
2676 grid
= &nbs
->grid
[0];
2678 /* We don't need to balance list sizes if:
2679 * - We didn't request balancing.
2680 * - The number of grid cells >= the number of lists requested,
2681 * since we will always generate at least #cells lists.
2682 * - We don't have any cells, since then there won't be any lists.
2684 if (min_ci_balanced
<= 0 || grid
->nc
>= min_ci_balanced
|| grid
->nc
== 0)
2686 /* nsubpair_target==0 signals no balancing */
2687 *nsubpair_target
= 0;
2688 *nsubpair_tot_est
= 0;
2693 ls
[XX
] = (grid
->c1
[XX
] - grid
->c0
[XX
])/(grid
->ncx
*c_gpuNumClusterPerCellX
);
2694 ls
[YY
] = (grid
->c1
[YY
] - grid
->c0
[YY
])/(grid
->ncy
*c_gpuNumClusterPerCellY
);
2695 ls
[ZZ
] = grid
->na_c
/(grid
->atom_density
*ls
[XX
]*ls
[YY
]);
2697 /* The average length of the diagonal of a sub cell */
2698 real diagonal
= std::sqrt(ls
[XX
]*ls
[XX
] + ls
[YY
]*ls
[YY
] + ls
[ZZ
]*ls
[ZZ
]);
2700 /* The formulas below are a heuristic estimate of the average nsj per si*/
2701 r_eff_sup
= rlist
+ nbnxn_rlist_inc_outside_fac
*gmx::square((grid
->na_c
- 1.0)/grid
->na_c
)*0.5*diagonal
;
2703 if (!nbs
->DomDec
|| nbs
->zones
->n
== 1)
2710 gmx::square(grid
->atom_density
/grid
->na_c
)*
2711 nonlocal_vol2(nbs
->zones
, ls
, r_eff_sup
);
2716 /* Sub-cell interacts with itself */
2717 vol_est
= ls
[XX
]*ls
[YY
]*ls
[ZZ
];
2718 /* 6/2 rectangular volume on the faces */
2719 vol_est
+= (ls
[XX
]*ls
[YY
] + ls
[XX
]*ls
[ZZ
] + ls
[YY
]*ls
[ZZ
])*r_eff_sup
;
2720 /* 12/2 quarter pie slices on the edges */
2721 vol_est
+= 2*(ls
[XX
] + ls
[YY
] + ls
[ZZ
])*0.25*M_PI
*gmx::square(r_eff_sup
);
2722 /* 4 octants of a sphere */
2723 vol_est
+= 0.5*4.0/3.0*M_PI
*gmx::power3(r_eff_sup
);
2725 /* Estimate the number of cluster pairs as the local number of
2726 * clusters times the volume they interact with times the density.
2728 nsp_est
= grid
->nsubc_tot
*vol_est
*grid
->atom_density
/grid
->na_c
;
2730 /* Subtract the non-local pair count */
2731 nsp_est
-= nsp_est_nl
;
2733 /* For small cut-offs nsp_est will be an underesimate.
2734 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2735 * So to avoid too small or negative nsp_est we set a minimum of
2736 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2737 * This might be a slight overestimate for small non-periodic groups of
2738 * atoms as will occur for a local domain with DD, but for small
2739 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2740 * so this overestimation will not matter.
2742 nsp_est
= std::max(nsp_est
, grid
->nsubc_tot
*static_cast<real
>(14));
2746 fprintf(debug
, "nsp_est local %5.1f non-local %5.1f\n",
2747 nsp_est
, nsp_est_nl
);
2752 nsp_est
= nsp_est_nl
;
2755 /* Thus the (average) maximum j-list size should be as follows.
2756 * Since there is overhead, we shouldn't make the lists too small
2757 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2759 *nsubpair_target
= std::max(nsubpair_target_min
,
2760 static_cast<int>(nsp_est
/min_ci_balanced
+ 0.5));
2761 *nsubpair_tot_est
= static_cast<int>(nsp_est
);
2765 fprintf(debug
, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2766 nsp_est
, *nsubpair_target
);
2770 /* Debug list print function */
2771 static void print_nblist_ci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2773 for (int i
= 0; i
< nbl
->nci
; i
++)
2775 fprintf(fp
, "ci %4d shift %2d ncj %3d\n",
2776 nbl
->ci
[i
].ci
, nbl
->ci
[i
].shift
,
2777 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
);
2779 for (int j
= nbl
->ci
[i
].cj_ind_start
; j
< nbl
->ci
[i
].cj_ind_end
; j
++)
2781 fprintf(fp
, " cj %5d imask %x\n",
2788 /* Debug list print function */
2789 static void print_nblist_sci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2791 for (int i
= 0; i
< nbl
->nsci
; i
++)
2793 fprintf(fp
, "ci %4d shift %2d ncj4 %2d\n",
2794 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2795 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
);
2798 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
2800 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
2802 fprintf(fp
, " sj %5d imask %x\n",
2804 nbl
->cj4
[j4
].imei
[0].imask
);
2805 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2807 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
2814 fprintf(fp
, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2815 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2816 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
,
2821 /* Combine pair lists *nbl generated on multiple threads nblc */
2822 static void combine_nblists(int nnbl
, nbnxn_pairlist_t
**nbl
,
2823 nbnxn_pairlist_t
*nblc
)
2825 int nsci
, ncj4
, nexcl
;
2829 gmx_incons("combine_nblists does not support simple lists");
2834 nexcl
= nblc
->nexcl
;
2835 for (int i
= 0; i
< nnbl
; i
++)
2837 nsci
+= nbl
[i
]->nsci
;
2838 ncj4
+= nbl
[i
]->ncj4
;
2839 nexcl
+= nbl
[i
]->nexcl
;
2842 if (nsci
> nblc
->sci_nalloc
)
2844 nb_realloc_sci(nblc
, nsci
);
2846 if (ncj4
> nblc
->cj4_nalloc
)
2848 nblc
->cj4_nalloc
= over_alloc_small(ncj4
);
2849 nbnxn_realloc_void((void **)&nblc
->cj4
,
2850 nblc
->ncj4
*sizeof(*nblc
->cj4
),
2851 nblc
->cj4_nalloc
*sizeof(*nblc
->cj4
),
2852 nblc
->alloc
, nblc
->free
);
2854 if (nexcl
> nblc
->excl_nalloc
)
2856 nblc
->excl_nalloc
= over_alloc_small(nexcl
);
2857 nbnxn_realloc_void((void **)&nblc
->excl
,
2858 nblc
->nexcl
*sizeof(*nblc
->excl
),
2859 nblc
->excl_nalloc
*sizeof(*nblc
->excl
),
2860 nblc
->alloc
, nblc
->free
);
2863 /* Each thread should copy its own data to the combined arrays,
2864 * as otherwise data will go back and forth between different caches.
2866 #if GMX_OPENMP && !(defined __clang_analyzer__)
2867 // cppcheck-suppress unreadVariable
2868 int nthreads
= gmx_omp_nthreads_get(emntPairsearch
);
2871 #pragma omp parallel for num_threads(nthreads) schedule(static)
2872 for (int n
= 0; n
< nnbl
; n
++)
2879 const nbnxn_pairlist_t
*nbli
;
2881 /* Determine the offset in the combined data for our thread */
2882 sci_offset
= nblc
->nsci
;
2883 cj4_offset
= nblc
->ncj4
;
2884 excl_offset
= nblc
->nexcl
;
2886 for (int i
= 0; i
< n
; i
++)
2888 sci_offset
+= nbl
[i
]->nsci
;
2889 cj4_offset
+= nbl
[i
]->ncj4
;
2890 excl_offset
+= nbl
[i
]->nexcl
;
2895 for (int i
= 0; i
< nbli
->nsci
; i
++)
2897 nblc
->sci
[sci_offset
+i
] = nbli
->sci
[i
];
2898 nblc
->sci
[sci_offset
+i
].cj4_ind_start
+= cj4_offset
;
2899 nblc
->sci
[sci_offset
+i
].cj4_ind_end
+= cj4_offset
;
2902 for (int j4
= 0; j4
< nbli
->ncj4
; j4
++)
2904 nblc
->cj4
[cj4_offset
+j4
] = nbli
->cj4
[j4
];
2905 nblc
->cj4
[cj4_offset
+j4
].imei
[0].excl_ind
+= excl_offset
;
2906 nblc
->cj4
[cj4_offset
+j4
].imei
[1].excl_ind
+= excl_offset
;
2909 for (int j4
= 0; j4
< nbli
->nexcl
; j4
++)
2911 nblc
->excl
[excl_offset
+j4
] = nbli
->excl
[j4
];
2914 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2917 for (int n
= 0; n
< nnbl
; n
++)
2919 nblc
->nsci
+= nbl
[n
]->nsci
;
2920 nblc
->ncj4
+= nbl
[n
]->ncj4
;
2921 nblc
->nci_tot
+= nbl
[n
]->nci_tot
;
2922 nblc
->nexcl
+= nbl
[n
]->nexcl
;
2926 static void balance_fep_lists(const nbnxn_search_t nbs
,
2927 nbnxn_pairlist_set_t
*nbl_lists
)
2930 int nri_tot
, nrj_tot
, nrj_target
;
2934 nnbl
= nbl_lists
->nnbl
;
2938 /* Nothing to balance */
2942 /* Count the total i-lists and pairs */
2945 for (int th
= 0; th
< nnbl
; th
++)
2947 nri_tot
+= nbl_lists
->nbl_fep
[th
]->nri
;
2948 nrj_tot
+= nbl_lists
->nbl_fep
[th
]->nrj
;
2951 nrj_target
= (nrj_tot
+ nnbl
- 1)/nnbl
;
2953 assert(gmx_omp_nthreads_get(emntNonbonded
) == nnbl
);
2955 #pragma omp parallel for schedule(static) num_threads(nnbl)
2956 for (int th
= 0; th
< nnbl
; th
++)
2962 nbl
= nbs
->work
[th
].nbl_fep
;
2964 /* Note that here we allocate for the total size, instead of
2965 * a per-thread esimate (which is hard to obtain).
2967 if (nri_tot
> nbl
->maxnri
)
2969 nbl
->maxnri
= over_alloc_large(nri_tot
);
2970 reallocate_nblist(nbl
);
2972 if (nri_tot
> nbl
->maxnri
|| nrj_tot
> nbl
->maxnrj
)
2974 nbl
->maxnrj
= over_alloc_small(nrj_tot
);
2975 srenew(nbl
->jjnr
, nbl
->maxnrj
);
2976 srenew(nbl
->excl_fep
, nbl
->maxnrj
);
2979 clear_pairlist_fep(nbl
);
2981 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2984 /* Loop over the source lists and assign and copy i-entries */
2986 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2987 for (int th
= 0; th
< nnbl
; th
++)
2991 nbls
= nbl_lists
->nbl_fep
[th
];
2993 for (int i
= 0; i
< nbls
->nri
; i
++)
2997 /* The number of pairs in this i-entry */
2998 nrj
= nbls
->jindex
[i
+1] - nbls
->jindex
[i
];
3000 /* Decide if list th_dest is too large and we should procede
3001 * to the next destination list.
3003 if (th_dest
+1 < nnbl
&& nbld
->nrj
> 0 &&
3004 nbld
->nrj
+ nrj
- nrj_target
> nrj_target
- nbld
->nrj
)
3007 nbld
= nbs
->work
[th_dest
].nbl_fep
;
3010 nbld
->iinr
[nbld
->nri
] = nbls
->iinr
[i
];
3011 nbld
->gid
[nbld
->nri
] = nbls
->gid
[i
];
3012 nbld
->shift
[nbld
->nri
] = nbls
->shift
[i
];
3014 for (int j
= nbls
->jindex
[i
]; j
< nbls
->jindex
[i
+1]; j
++)
3016 nbld
->jjnr
[nbld
->nrj
] = nbls
->jjnr
[j
];
3017 nbld
->excl_fep
[nbld
->nrj
] = nbls
->excl_fep
[j
];
3021 nbld
->jindex
[nbld
->nri
] = nbld
->nrj
;
3025 /* Swap the list pointers */
3026 for (int th
= 0; th
< nnbl
; th
++)
3030 nbl_tmp
= nbl_lists
->nbl_fep
[th
];
3031 nbl_lists
->nbl_fep
[th
] = nbs
->work
[th
].nbl_fep
;
3032 nbs
->work
[th
].nbl_fep
= nbl_tmp
;
3036 fprintf(debug
, "nbl_fep[%d] nri %4d nrj %4d\n",
3038 nbl_lists
->nbl_fep
[th
]->nri
,
3039 nbl_lists
->nbl_fep
[th
]->nrj
);
3044 /* Returns the next ci to be processes by our thread */
3045 static gmx_bool
next_ci(const nbnxn_grid_t
*grid
,
3046 int nth
, int ci_block
,
3047 int *ci_x
, int *ci_y
,
3053 if (*ci_b
== ci_block
)
3055 /* Jump to the next block assigned to this task */
3056 *ci
+= (nth
- 1)*ci_block
;
3060 if (*ci
>= grid
->nc
)
3065 while (*ci
>= grid
->cxy_ind
[*ci_x
*grid
->ncy
+ *ci_y
+ 1])
3068 if (*ci_y
== grid
->ncy
)
3078 /* Returns the distance^2 for which we put cell pairs in the list
3079 * without checking atom pair distances. This is usually < rlist^2.
3081 static float boundingbox_only_distance2(const nbnxn_grid_t
*gridi
,
3082 const nbnxn_grid_t
*gridj
,
3086 /* If the distance between two sub-cell bounding boxes is less
3087 * than this distance, do not check the distance between
3088 * all particle pairs in the sub-cell, since then it is likely
3089 * that the box pair has atom pairs within the cut-off.
3090 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3091 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3092 * Using more than 0.5 gains at most 0.5%.
3093 * If forces are calculated more than twice, the performance gain
3094 * in the force calculation outweighs the cost of checking.
3095 * Note that with subcell lists, the atom-pair distance check
3096 * is only performed when only 1 out of 8 sub-cells in within range,
3097 * this is because the GPU is much faster than the cpu.
3102 bbx
= 0.5*(gridi
->sx
+ gridj
->sx
);
3103 bby
= 0.5*(gridi
->sy
+ gridj
->sy
);
3106 bbx
/= c_gpuNumClusterPerCellX
;
3107 bby
/= c_gpuNumClusterPerCellY
;
3110 rbb2
= std::max(0.0, rlist
- 0.5*std::sqrt(bbx
*bbx
+ bby
*bby
));
3116 return (float)((1+GMX_FLOAT_EPS
)*rbb2
);
3120 static int get_ci_block_size(const nbnxn_grid_t
*gridi
,
3121 gmx_bool bDomDec
, int nth
)
3123 const int ci_block_enum
= 5;
3124 const int ci_block_denom
= 11;
3125 const int ci_block_min_atoms
= 16;
3128 /* Here we decide how to distribute the blocks over the threads.
3129 * We use prime numbers to try to avoid that the grid size becomes
3130 * a multiple of the number of threads, which would lead to some
3131 * threads getting "inner" pairs and others getting boundary pairs,
3132 * which in turns will lead to load imbalance between threads.
3133 * Set the block size as 5/11/ntask times the average number of cells
3134 * in a y,z slab. This should ensure a quite uniform distribution
3135 * of the grid parts of the different thread along all three grid
3136 * zone boundaries with 3D domain decomposition. At the same time
3137 * the blocks will not become too small.
3139 ci_block
= (gridi
->nc
*ci_block_enum
)/(ci_block_denom
*gridi
->ncx
*nth
);
3141 /* Ensure the blocks are not too small: avoids cache invalidation */
3142 if (ci_block
*gridi
->na_sc
< ci_block_min_atoms
)
3144 ci_block
= (ci_block_min_atoms
+ gridi
->na_sc
- 1)/gridi
->na_sc
;
3147 /* Without domain decomposition
3148 * or with less than 3 blocks per task, divide in nth blocks.
3150 if (!bDomDec
|| nth
*3*ci_block
> gridi
->nc
)
3152 ci_block
= (gridi
->nc
+ nth
- 1)/nth
;
3155 if (ci_block
> 1 && (nth
- 1)*ci_block
>= gridi
->nc
)
3157 /* Some threads have no work. Although reducing the block size
3158 * does not decrease the block count on the first few threads,
3159 * with GPUs better mixing of "upper" cells that have more empty
3160 * clusters results in a somewhat lower max load over all threads.
3161 * Without GPUs the regime of so few atoms per thread is less
3162 * performance relevant, but with 8-wide SIMD the same reasoning
3163 * applies, since the pair list uses 4 i-atom "sub-clusters".
3171 /* Returns the number of bits to right-shift a cluster index to obtain
3172 * the corresponding force buffer flag index.
3174 static int getBufferFlagShift(int numAtomsPerCluster
)
3176 int bufferFlagShift
= 0;
3177 while ((numAtomsPerCluster
<< bufferFlagShift
) < NBNXN_BUFFERFLAG_SIZE
)
3182 return bufferFlagShift
;
3185 /* Generates the part of pair-list nbl assigned to our thread */
3186 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs
,
3187 const nbnxn_grid_t
*gridi
,
3188 const nbnxn_grid_t
*gridj
,
3189 nbnxn_search_work_t
*work
,
3190 const nbnxn_atomdata_t
*nbat
,
3191 const t_blocka
&exclusions
,
3195 gmx_bool bFBufferFlag
,
3198 float nsubpair_tot_est
,
3200 nbnxn_pairlist_t
*nbl
,
3205 real rlist2
, rl_fep2
= 0;
3207 int ci_b
, ci
, ci_x
, ci_y
, ci_xy
, cj
;
3211 real bx0
, bx1
, by0
, by1
, bz0
, bz1
;
3213 real d2cx
, d2z
, d2z_cx
, d2z_cy
, d2zx
, d2zxy
, d2xy
;
3214 int cxf
, cxl
, cyf
, cyf_x
, cyl
;
3215 int numDistanceChecks
;
3216 int gridi_flag_shift
= 0, gridj_flag_shift
= 0;
3217 gmx_bitmask_t
*gridj_flag
= nullptr;
3218 int ncj_old_i
, ncj_old_j
;
3220 nbs_cycle_start(&work
->cc
[enbsCCsearch
]);
3222 if (gridj
->bSimple
!= nbl
->bSimple
)
3224 gmx_incons("Grid incompatible with pair-list");
3228 nbl
->na_sc
= gridj
->na_sc
;
3229 nbl
->na_ci
= gridj
->na_c
;
3230 nbl
->na_cj
= nbnxn_kernel_to_cluster_j_size(nb_kernel_type
);
3231 na_cj_2log
= get_2log(nbl
->na_cj
);
3237 /* Determine conversion of clusters to flag blocks */
3238 gridi_flag_shift
= getBufferFlagShift(nbl
->na_ci
);
3239 gridj_flag_shift
= getBufferFlagShift(nbl
->na_cj
);
3241 gridj_flag
= work
->buffer_flags
.flag
;
3244 copy_mat(nbs
->box
, box
);
3246 rlist2
= nbl
->rlist
*nbl
->rlist
;
3248 if (nbs
->bFEP
&& !nbl
->bSimple
)
3250 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3251 * We should not simply use rlist, since then we would not have
3252 * the small, effective buffering of the NxN lists.
3253 * The buffer is on overestimate, but the resulting cost for pairs
3254 * beyond rlist is neglible compared to the FEP pairs within rlist.
3256 rl_fep2
= nbl
->rlist
+ effective_buffer_1x1_vs_MxN(gridi
, gridj
);
3260 fprintf(debug
, "nbl_fep atom-pair rlist %f\n", rl_fep2
);
3262 rl_fep2
= rl_fep2
*rl_fep2
;
3265 rbb2
= boundingbox_only_distance2(gridi
, gridj
, nbl
->rlist
, nbl
->bSimple
);
3269 fprintf(debug
, "nbl bounding box only distance %f\n", std::sqrt(rbb2
));
3272 /* Set the shift range */
3273 for (int d
= 0; d
< DIM
; d
++)
3275 /* Check if we need periodicity shifts.
3276 * Without PBC or with domain decomposition we don't need them.
3278 if (d
>= ePBC2npbcdim(nbs
->ePBC
) || nbs
->dd_dim
[d
])
3285 box
[XX
][XX
] - fabs(box
[YY
][XX
]) - fabs(box
[ZZ
][XX
]) < std::sqrt(rlist2
))
3296 gmx::ArrayRef
<const nbnxn_bb_t
> bb_i
;
3298 gmx::ArrayRef
<const float> pbb_i
;
3308 /* We use the normal bounding box format for both grid types */
3311 gmx::ArrayRef
<const float> bbcz_i
= gridi
->bbcz
;
3312 gmx::ArrayRef
<const int> flags_i
= gridi
->flags
;
3313 gmx::ArrayRef
<const float> bbcz_j
= gridj
->bbcz
;
3314 int cell0_i
= gridi
->cell0
;
3318 fprintf(debug
, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3319 gridi
->nc
, gridi
->nc
/(double)(gridi
->ncx
*gridi
->ncy
), ci_block
);
3322 numDistanceChecks
= 0;
3324 /* Initially ci_b and ci to 1 before where we want them to start,
3325 * as they will both be incremented in next_ci.
3328 ci
= th
*ci_block
- 1;
3331 while (next_ci(gridi
, nth
, ci_block
, &ci_x
, &ci_y
, &ci_b
, &ci
))
3333 if (nbl
->bSimple
&& flags_i
[ci
] == 0)
3338 ncj_old_i
= nbl
->ncj
;
3341 if (gridj
!= gridi
&& shp
[XX
] == 0)
3345 bx1
= bb_i
[ci
].upper
[BB_X
];
3349 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
;
3351 if (bx1
< gridj
->c0
[XX
])
3353 d2cx
= gmx::square(gridj
->c0
[XX
] - bx1
);
3362 ci_xy
= ci_x
*gridi
->ncy
+ ci_y
;
3364 /* Loop over shift vectors in three dimensions */
3365 for (int tz
= -shp
[ZZ
]; tz
<= shp
[ZZ
]; tz
++)
3367 shz
= tz
*box
[ZZ
][ZZ
];
3369 bz0
= bbcz_i
[ci
*NNBSBB_D
] + shz
;
3370 bz1
= bbcz_i
[ci
*NNBSBB_D
+1] + shz
;
3378 d2z
= gmx::square(bz1
);
3382 d2z
= gmx::square(bz0
- box
[ZZ
][ZZ
]);
3385 d2z_cx
= d2z
+ d2cx
;
3387 if (d2z_cx
>= rlist2
)
3392 bz1_frac
= bz1
/(gridi
->cxy_ind
[ci_xy
+1] - gridi
->cxy_ind
[ci_xy
]);
3397 /* The check with bz1_frac close to or larger than 1 comes later */
3399 for (int ty
= -shp
[YY
]; ty
<= shp
[YY
]; ty
++)
3401 shy
= ty
*box
[YY
][YY
] + tz
*box
[ZZ
][YY
];
3405 by0
= bb_i
[ci
].lower
[BB_Y
] + shy
;
3406 by1
= bb_i
[ci
].upper
[BB_Y
] + shy
;
3410 by0
= gridi
->c0
[YY
] + (ci_y
)*gridi
->sy
+ shy
;
3411 by1
= gridi
->c0
[YY
] + (ci_y
+1)*gridi
->sy
+ shy
;
3414 get_cell_range(by0
, by1
,
3415 gridj
->ncy
, gridj
->c0
[YY
], gridj
->sy
, gridj
->inv_sy
,
3425 if (by1
< gridj
->c0
[YY
])
3427 d2z_cy
+= gmx::square(gridj
->c0
[YY
] - by1
);
3429 else if (by0
> gridj
->c1
[YY
])
3431 d2z_cy
+= gmx::square(by0
- gridj
->c1
[YY
]);
3434 for (int tx
= -shp
[XX
]; tx
<= shp
[XX
]; tx
++)
3436 shift
= XYZ2IS(tx
, ty
, tz
);
3438 if (c_pbcShiftBackward
&& gridi
== gridj
&& shift
> CENTRAL
)
3443 shx
= tx
*box
[XX
][XX
] + ty
*box
[YY
][XX
] + tz
*box
[ZZ
][XX
];
3447 bx0
= bb_i
[ci
].lower
[BB_X
] + shx
;
3448 bx1
= bb_i
[ci
].upper
[BB_X
] + shx
;
3452 bx0
= gridi
->c0
[XX
] + (ci_x
)*gridi
->sx
+ shx
;
3453 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
+ shx
;
3456 get_cell_range(bx0
, bx1
,
3457 gridj
->ncx
, gridj
->c0
[XX
], gridj
->sx
, gridj
->inv_sx
,
3468 new_ci_entry(nbl
, cell0_i
+ci
, shift
, flags_i
[ci
]);
3472 new_sci_entry(nbl
, cell0_i
+ci
, shift
);
3475 if ((!c_pbcShiftBackward
|| (shift
== CENTRAL
&&
3479 /* Leave the pairs with i > j.
3480 * x is the major index, so skip half of it.
3487 set_icell_bb_simple(bb_i
, ci
, shx
, shy
, shz
,
3493 set_icell_bbxxxx_supersub(pbb_i
, ci
, shx
, shy
, shz
,
3496 set_icell_bb_supersub(bb_i
, ci
, shx
, shy
, shz
,
3501 nbs
->icell_set_x(cell0_i
+ci
, shx
, shy
, shz
,
3502 nbat
->xstride
, nbat
->x
,
3505 for (int cx
= cxf
; cx
<= cxl
; cx
++)
3508 if (gridj
->c0
[XX
] + cx
*gridj
->sx
> bx1
)
3510 d2zx
+= gmx::square(gridj
->c0
[XX
] + cx
*gridj
->sx
- bx1
);
3512 else if (gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
< bx0
)
3514 d2zx
+= gmx::square(gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
- bx0
);
3517 if (gridi
== gridj
&&
3519 (!c_pbcShiftBackward
|| shift
== CENTRAL
) &&
3522 /* Leave the pairs with i > j.
3523 * Skip half of y when i and j have the same x.
3532 for (int cy
= cyf_x
; cy
<= cyl
; cy
++)
3534 const int columnStart
= gridj
->cxy_ind
[cx
*gridj
->ncy
+ cy
];
3535 const int columnEnd
= gridj
->cxy_ind
[cx
*gridj
->ncy
+ cy
+ 1];
3538 if (gridj
->c0
[YY
] + cy
*gridj
->sy
> by1
)
3540 d2zxy
+= gmx::square(gridj
->c0
[YY
] + cy
*gridj
->sy
- by1
);
3542 else if (gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
< by0
)
3544 d2zxy
+= gmx::square(gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
- by0
);
3546 if (columnStart
< columnEnd
&& d2zxy
< rlist2
)
3548 /* To improve efficiency in the common case
3549 * of a homogeneous particle distribution,
3550 * we estimate the index of the middle cell
3551 * in range (midCell). We search down and up
3552 * starting from this index.
3554 * Note that the bbcz_j array contains bounds
3555 * for i-clusters, thus for clusters of 4 atoms.
3556 * For the common case where the j-cluster size
3557 * is 8, we could step with a stride of 2,
3558 * but we do not do this because it would
3559 * complicate this code even more.
3561 int midCell
= columnStart
+ static_cast<int>(bz1_frac
*(columnEnd
- columnStart
));
3562 if (midCell
>= columnEnd
)
3564 midCell
= columnEnd
- 1;
3569 /* Find the lowest cell that can possibly
3571 * Check if we hit the bottom of the grid,
3572 * if the j-cell is below the i-cell and if so,
3573 * if it is within range.
3575 int downTestCell
= midCell
;
3576 while (downTestCell
>= columnStart
&&
3577 (bbcz_j
[downTestCell
*NNBSBB_D
+ 1] >= bz0
||
3578 d2xy
+ gmx::square(bbcz_j
[downTestCell
*NNBSBB_D
+ 1] - bz0
) < rlist2
))
3582 int firstCell
= downTestCell
+ 1;
3584 /* Find the highest cell that can possibly
3586 * Check if we hit the top of the grid,
3587 * if the j-cell is above the i-cell and if so,
3588 * if it is within range.
3590 int upTestCell
= midCell
+ 1;
3591 while (upTestCell
< columnEnd
&&
3592 (bbcz_j
[upTestCell
*NNBSBB_D
] <= bz1
||
3593 d2xy
+ gmx::square(bbcz_j
[upTestCell
*NNBSBB_D
] - bz1
) < rlist2
))
3597 int lastCell
= upTestCell
- 1;
3599 #define NBNXN_REFCODE 0
3602 /* Simple reference code, for debugging,
3603 * overrides the more complex code above.
3605 firstCell
= columnEnd
;
3607 for (int k
= columnStart
; k
< columnEnd
; k
++)
3609 if (d2xy
+ gmx::square(bbcz_j
[k
*NNBSBB_D
+ 1] - bz0
) < rlist2
&&
3614 if (d2xy
+ gmx::square(bbcz_j
[k
*NNBSBB_D
] - bz1
) < rlist2
&&
3625 /* We want each atom/cell pair only once,
3626 * only use cj >= ci.
3628 if (!c_pbcShiftBackward
|| shift
== CENTRAL
)
3630 firstCell
= std::max(firstCell
, ci
);
3634 if (firstCell
<= lastCell
)
3636 GMX_ASSERT(firstCell
>= columnStart
&& lastCell
< columnEnd
, "The range should reside within the current grid column");
3638 /* For f buffer flags with simple lists */
3639 ncj_old_j
= nbl
->ncj
;
3643 /* We have a maximum of 2 j-clusters
3644 * per i-cluster sized cell.
3646 check_cell_list_space_simple(nbl
, 2*(lastCell
- firstCell
+ 1));
3650 check_cell_list_space_supersub(nbl
, lastCell
- firstCell
+ 1);
3653 switch (nb_kernel_type
)
3655 case nbnxnk4x4_PlainC
:
3656 makeClusterListSimple(gridj
,
3657 nbl
, ci
, firstCell
, lastCell
,
3658 (gridi
== gridj
&& shift
== CENTRAL
),
3661 &numDistanceChecks
);
3663 #ifdef GMX_NBNXN_SIMD_4XN
3664 case nbnxnk4xN_SIMD_4xN
:
3665 makeClusterListSimd4xn(gridj
,
3666 nbl
, ci
, firstCell
, lastCell
,
3667 (gridi
== gridj
&& shift
== CENTRAL
),
3670 &numDistanceChecks
);
3673 #ifdef GMX_NBNXN_SIMD_2XNN
3674 case nbnxnk4xN_SIMD_2xNN
:
3675 makeClusterListSimd2xnn(gridj
,
3676 nbl
, ci
, firstCell
, lastCell
,
3677 (gridi
== gridj
&& shift
== CENTRAL
),
3680 &numDistanceChecks
);
3683 case nbnxnk8x8x8_PlainC
:
3684 case nbnxnk8x8x8_GPU
:
3685 for (cj
= firstCell
; cj
<= lastCell
; cj
++)
3687 make_cluster_list_supersub(gridi
, gridj
,
3689 (gridi
== gridj
&& shift
== CENTRAL
&& ci
== cj
),
3690 nbat
->xstride
, nbat
->x
,
3692 &numDistanceChecks
);
3697 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_j
)
3699 int cbf
= nbl
->cj
[ncj_old_j
].cj
>> gridj_flag_shift
;
3700 int cbl
= nbl
->cj
[nbl
->ncj
-1].cj
>> gridj_flag_shift
;
3701 for (int cb
= cbf
; cb
<= cbl
; cb
++)
3703 bitmask_init_bit(&gridj_flag
[cb
], th
);
3707 nbl
->ncjInUse
+= nbl
->ncj
- ncj_old_j
;
3713 /* Set the exclusions for this ci list */
3716 setExclusionsForSimpleIentry(nbs
,
3718 shift
== CENTRAL
&& gridi
== gridj
,
3725 make_fep_list(nbs
, nbat
, nbl
,
3726 shift
== CENTRAL
&& gridi
== gridj
,
3727 &(nbl
->ci
[nbl
->nci
]),
3728 gridi
, gridj
, nbl_fep
);
3733 setExclusionsForGpuIentry(nbs
,
3735 shift
== CENTRAL
&& gridi
== gridj
,
3736 nbl
->sci
[nbl
->nsci
],
3741 make_fep_list_supersub(nbs
, nbat
, nbl
,
3742 shift
== CENTRAL
&& gridi
== gridj
,
3743 &(nbl
->sci
[nbl
->nsci
]),
3746 gridi
, gridj
, nbl_fep
);
3750 /* Close this ci list */
3753 close_ci_entry_simple(nbl
);
3757 close_ci_entry_supersub(nbl
,
3759 progBal
, nsubpair_tot_est
,
3766 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_i
)
3768 bitmask_init_bit(&(work
->buffer_flags
.flag
[(gridi
->cell0
+ci
)>>gridi_flag_shift
]), th
);
3772 work
->ndistc
= numDistanceChecks
;
3774 nbs_cycle_stop(&work
->cc
[enbsCCsearch
]);
3776 GMX_ASSERT(nbl
->ncjInUse
== nbl
->ncj
|| nbs
->bFEP
, "Without free-energy all cj pair-list entries should be in use. Note that subsequent code does not make use of the equality, this check is only here to catch bugs");
3780 fprintf(debug
, "number of distance checks %d\n", numDistanceChecks
);
3784 print_nblist_statistics_simple(debug
, nbl
, nbs
, rlist
);
3788 print_nblist_statistics_supersub(debug
, nbl
, nbs
, rlist
);
3793 fprintf(debug
, "nbl FEP list pairs: %d\n", nbl_fep
->nrj
);
3798 static void reduce_buffer_flags(const nbnxn_search_t nbs
,
3800 const nbnxn_buffer_flags_t
*dest
)
3802 for (int s
= 0; s
< nsrc
; s
++)
3804 gmx_bitmask_t
* flag
= nbs
->work
[s
].buffer_flags
.flag
;
3806 for (int b
= 0; b
< dest
->nflag
; b
++)
3808 bitmask_union(&(dest
->flag
[b
]), flag
[b
]);
3813 static void print_reduction_cost(const nbnxn_buffer_flags_t
*flags
, int nout
)
3815 int nelem
, nkeep
, ncopy
, nred
, out
;
3816 gmx_bitmask_t mask_0
;
3822 bitmask_init_bit(&mask_0
, 0);
3823 for (int b
= 0; b
< flags
->nflag
; b
++)
3825 if (bitmask_is_equal(flags
->flag
[b
], mask_0
))
3827 /* Only flag 0 is set, no copy of reduction required */
3831 else if (!bitmask_is_zero(flags
->flag
[b
]))
3834 for (out
= 0; out
< nout
; out
++)
3836 if (bitmask_is_set(flags
->flag
[b
], out
))
3853 fprintf(debug
, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3855 nelem
/(double)(flags
->nflag
),
3856 nkeep
/(double)(flags
->nflag
),
3857 ncopy
/(double)(flags
->nflag
),
3858 nred
/(double)(flags
->nflag
));
3861 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3862 * *cjGlobal is updated with the cj count in src.
3863 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3865 template<bool setFlags
>
3866 static void copySelectedListRange(const nbnxn_ci_t
* gmx_restrict srcCi
,
3867 const nbnxn_pairlist_t
* gmx_restrict src
,
3868 nbnxn_pairlist_t
* gmx_restrict dest
,
3869 gmx_bitmask_t
*flag
,
3870 int iFlagShift
, int jFlagShift
, int t
)
3872 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3874 if (dest
->nci
+ 1 >= dest
->ci_nalloc
)
3876 nb_realloc_ci(dest
, dest
->nci
+ 1);
3878 check_cell_list_space_simple(dest
, ncj
);
3880 dest
->ci
[dest
->nci
] = *srcCi
;
3881 dest
->ci
[dest
->nci
].cj_ind_start
= dest
->ncj
;
3882 dest
->ci
[dest
->nci
].cj_ind_end
= dest
->ncj
+ ncj
;
3886 bitmask_init_bit(&flag
[srcCi
->ci
>> iFlagShift
], t
);
3889 for (int j
= srcCi
->cj_ind_start
; j
< srcCi
->cj_ind_end
; j
++)
3891 dest
->cj
[dest
->ncj
++] = src
->cj
[j
];
3895 /* NOTE: This is relatively expensive, since this
3896 * operation is done for all elements in the list,
3897 * whereas at list generation this is done only
3898 * once for each flag entry.
3900 bitmask_init_bit(&flag
[src
->cj
[j
].cj
>> jFlagShift
], t
);
3907 /* This routine re-balances the pairlists such that all are nearly equally
3908 * sized. Only whole i-entries are moved between lists. These are moved
3909 * between the ends of the lists, such that the buffer reduction cost should
3910 * not change significantly.
3911 * Note that all original reduction flags are currently kept. This can lead
3912 * to reduction of parts of the force buffer that could be avoided. But since
3913 * the original lists are quite balanced, this will only give minor overhead.
3915 static void rebalanceSimpleLists(int numLists
,
3916 nbnxn_pairlist_t
* const * const srcSet
,
3917 nbnxn_pairlist_t
**destSet
,
3918 nbnxn_search_work_t
*searchWork
)
3921 for (int s
= 0; s
< numLists
; s
++)
3923 ncjTotal
+= srcSet
[s
]->ncjInUse
;
3925 int ncjTarget
= (ncjTotal
+ numLists
- 1)/numLists
;
3927 #pragma omp parallel num_threads(numLists)
3929 int t
= gmx_omp_get_thread_num();
3931 int cjStart
= ncjTarget
* t
;
3932 int cjEnd
= ncjTarget
*(t
+ 1);
3934 /* The destination pair-list for task/thread t */
3935 nbnxn_pairlist_t
*dest
= destSet
[t
];
3937 clear_pairlist(dest
);
3938 dest
->bSimple
= srcSet
[0]->bSimple
;
3939 dest
->na_ci
= srcSet
[0]->na_ci
;
3940 dest
->na_cj
= srcSet
[0]->na_cj
;
3942 /* Note that the flags in the work struct (still) contain flags
3943 * for all entries that are present in srcSet->nbl[t].
3945 gmx_bitmask_t
*flag
= searchWork
[t
].buffer_flags
.flag
;
3947 int iFlagShift
= getBufferFlagShift(dest
->na_ci
);
3948 int jFlagShift
= getBufferFlagShift(dest
->na_cj
);
3951 for (int s
= 0; s
< numLists
&& cjGlobal
< cjEnd
; s
++)
3953 const nbnxn_pairlist_t
*src
= srcSet
[s
];
3955 if (cjGlobal
+ src
->ncjInUse
> cjStart
)
3957 for (int i
= 0; i
< src
->nci
&& cjGlobal
< cjEnd
; i
++)
3959 const nbnxn_ci_t
*srcCi
= &src
->ci
[i
];
3960 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3961 if (cjGlobal
>= cjStart
)
3963 /* If the source list is not our own, we need to set
3964 * extra flags (the template bool parameter).
3968 copySelectedListRange
3971 flag
, iFlagShift
, jFlagShift
, t
);
3975 copySelectedListRange
3978 dest
, flag
, iFlagShift
, jFlagShift
, t
);
3986 cjGlobal
+= src
->ncjInUse
;
3990 dest
->ncjInUse
= dest
->ncj
;
3994 int ncjTotalNew
= 0;
3995 for (int s
= 0; s
< numLists
; s
++)
3997 ncjTotalNew
+= destSet
[s
]->ncjInUse
;
3999 GMX_RELEASE_ASSERT(ncjTotalNew
== ncjTotal
, "The total size of the lists before and after rebalancing should match");
4003 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
4004 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t
*listSet
)
4006 int numLists
= listSet
->nnbl
;
4009 for (int s
= 0; s
< numLists
; s
++)
4011 ncjMax
= std::max(ncjMax
, listSet
->nbl
[s
]->ncjInUse
);
4012 ncjTotal
+= listSet
->nbl
[s
]->ncjInUse
;
4016 fprintf(debug
, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax
, ncjTotal
);
4018 /* The rebalancing adds 3% extra time to the search. Heuristically we
4019 * determined that under common conditions the non-bonded kernel balance
4020 * improvement will outweigh this when the imbalance is more than 3%.
4021 * But this will, obviously, depend on search vs kernel time and nstlist.
4023 const real rebalanceTolerance
= 1.03;
4025 return numLists
*ncjMax
> ncjTotal
*rebalanceTolerance
;
4028 /* Perform a count (linear) sort to sort the smaller lists to the end.
4029 * This avoids load imbalance on the GPU, as large lists will be
4030 * scheduled and executed first and the smaller lists later.
4031 * Load balancing between multi-processors only happens at the end
4032 * and there smaller lists lead to more effective load balancing.
4033 * The sorting is done on the cj4 count, not on the actual pair counts.
4034 * Not only does this make the sort faster, but it also results in
4035 * better load balancing than using a list sorted on exact load.
4036 * This function swaps the pointer in the pair list to avoid a copy operation.
4038 static void sort_sci(nbnxn_pairlist_t
*nbl
)
4040 nbnxn_list_work_t
*work
;
4042 nbnxn_sci_t
*sci_sort
;
4044 if (nbl
->ncj4
<= nbl
->nsci
)
4046 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4052 /* We will distinguish differences up to double the average */
4053 m
= (2*nbl
->ncj4
)/nbl
->nsci
;
4055 if (m
+ 1 > work
->sort_nalloc
)
4057 work
->sort_nalloc
= over_alloc_large(m
+ 1);
4058 srenew(work
->sort
, work
->sort_nalloc
);
4061 if (work
->sci_sort_nalloc
!= nbl
->sci_nalloc
)
4063 work
->sci_sort_nalloc
= nbl
->sci_nalloc
;
4064 nbnxn_realloc_void((void **)&work
->sci_sort
,
4066 work
->sci_sort_nalloc
*sizeof(*work
->sci_sort
),
4067 nbl
->alloc
, nbl
->free
);
4070 /* Count the entries of each size */
4071 for (int i
= 0; i
<= m
; i
++)
4075 for (int s
= 0; s
< nbl
->nsci
; s
++)
4077 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4080 /* Calculate the offset for each count */
4083 for (int i
= m
- 1; i
>= 0; i
--)
4086 work
->sort
[i
] = work
->sort
[i
+ 1] + s0
;
4090 /* Sort entries directly into place */
4091 sci_sort
= work
->sci_sort
;
4092 for (int s
= 0; s
< nbl
->nsci
; s
++)
4094 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4095 sci_sort
[work
->sort
[i
]++] = nbl
->sci
[s
];
4098 /* Swap the sci pointers so we use the new, sorted list */
4099 work
->sci_sort
= nbl
->sci
;
4100 nbl
->sci
= sci_sort
;
4103 /* Make a local or non-local pair-list, depending on iloc */
4104 void nbnxn_make_pairlist(const nbnxn_search_t nbs
,
4105 nbnxn_atomdata_t
*nbat
,
4106 const t_blocka
*excl
,
4108 int min_ci_balanced
,
4109 nbnxn_pairlist_set_t
*nbl_list
,
4114 nbnxn_grid_t
*gridi
, *gridj
;
4116 int nsubpair_target
;
4117 float nsubpair_tot_est
;
4119 nbnxn_pairlist_t
**nbl
;
4121 gmx_bool CombineNBLists
;
4123 int np_tot
, np_noq
, np_hlj
, nap
;
4125 nnbl
= nbl_list
->nnbl
;
4126 nbl
= nbl_list
->nbl
;
4127 CombineNBLists
= nbl_list
->bCombined
;
4131 fprintf(debug
, "ns making %d nblists\n", nnbl
);
4134 nbat
->bUseBufferFlags
= (nbat
->nout
> 1);
4135 /* We should re-init the flags before making the first list */
4136 if (nbat
->bUseBufferFlags
&& LOCAL_I(iloc
))
4138 init_buffer_flags(&nbat
->buffer_flags
, nbat
->natoms
);
4141 if (nbl_list
->bSimple
)
4144 switch (nb_kernel_type
)
4146 #ifdef GMX_NBNXN_SIMD_4XN
4147 case nbnxnk4xN_SIMD_4xN
:
4148 nbs
->icell_set_x
= icell_set_x_simd_4xn
;
4151 #ifdef GMX_NBNXN_SIMD_2XNN
4152 case nbnxnk4xN_SIMD_2xNN
:
4153 nbs
->icell_set_x
= icell_set_x_simd_2xnn
;
4157 nbs
->icell_set_x
= icell_set_x_simple
;
4161 /* MSVC 2013 complains about switch statements without case */
4162 nbs
->icell_set_x
= icell_set_x_simple
;
4167 nbs
->icell_set_x
= icell_set_x_supersub
;
4172 /* Only zone (grid) 0 vs 0 */
4179 nzi
= nbs
->zones
->nizone
;
4182 if (!nbl_list
->bSimple
&& min_ci_balanced
> 0)
4184 get_nsubpair_target(nbs
, iloc
, rlist
, min_ci_balanced
,
4185 &nsubpair_target
, &nsubpair_tot_est
);
4189 nsubpair_target
= 0;
4190 nsubpair_tot_est
= 0;
4193 /* Clear all pair-lists */
4194 for (int th
= 0; th
< nnbl
; th
++)
4196 clear_pairlist(nbl
[th
]);
4200 clear_pairlist_fep(nbl_list
->nbl_fep
[th
]);
4204 for (int zi
= 0; zi
< nzi
; zi
++)
4206 gridi
= &nbs
->grid
[zi
];
4208 if (NONLOCAL_I(iloc
))
4210 zj0
= nbs
->zones
->izone
[zi
].j0
;
4211 zj1
= nbs
->zones
->izone
[zi
].j1
;
4217 for (int zj
= zj0
; zj
< zj1
; zj
++)
4219 gridj
= &nbs
->grid
[zj
];
4223 fprintf(debug
, "ns search grid %d vs %d\n", zi
, zj
);
4226 nbs_cycle_start(&nbs
->cc
[enbsCCsearch
]);
4228 ci_block
= get_ci_block_size(gridi
, nbs
->DomDec
, nnbl
);
4230 /* With GPU: generate progressively smaller lists for
4231 * load balancing for local only or non-local with 2 zones.
4233 progBal
= (LOCAL_I(iloc
) || nbs
->zones
->n
<= 2);
4235 #pragma omp parallel for num_threads(nnbl) schedule(static)
4236 for (int th
= 0; th
< nnbl
; th
++)
4240 /* Re-init the thread-local work flag data before making
4241 * the first list (not an elegant conditional).
4243 if (nbat
->bUseBufferFlags
&& ((zi
== 0 && zj
== 0)))
4245 init_buffer_flags(&nbs
->work
[th
].buffer_flags
, nbat
->natoms
);
4248 if (CombineNBLists
&& th
> 0)
4250 clear_pairlist(nbl
[th
]);
4253 /* Divide the i super cell equally over the nblists */
4254 nbnxn_make_pairlist_part(nbs
, gridi
, gridj
,
4255 &nbs
->work
[th
], nbat
, *excl
,
4259 nbat
->bUseBufferFlags
,
4261 progBal
, nsubpair_tot_est
,
4264 nbl_list
->nbl_fep
[th
]);
4266 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4268 nbs_cycle_stop(&nbs
->cc
[enbsCCsearch
]);
4273 for (int th
= 0; th
< nnbl
; th
++)
4275 inc_nrnb(nrnb
, eNR_NBNXN_DIST2
, nbs
->work
[th
].ndistc
);
4277 if (nbl_list
->bSimple
)
4279 np_tot
+= nbl
[th
]->ncj
;
4280 np_noq
+= nbl
[th
]->work
->ncj_noq
;
4281 np_hlj
+= nbl
[th
]->work
->ncj_hlj
;
4285 /* This count ignores potential subsequent pair pruning */
4286 np_tot
+= nbl
[th
]->nci_tot
;
4289 nap
= nbl
[0]->na_ci
*nbl
[0]->na_cj
;
4290 nbl_list
->natpair_ljq
= (np_tot
- np_noq
)*nap
- np_hlj
*nap
/2;
4291 nbl_list
->natpair_lj
= np_noq
*nap
;
4292 nbl_list
->natpair_q
= np_hlj
*nap
/2;
4294 if (CombineNBLists
&& nnbl
> 1)
4296 nbs_cycle_start(&nbs
->cc
[enbsCCcombine
]);
4298 combine_nblists(nnbl
-1, nbl
+1, nbl
[0]);
4300 nbs_cycle_stop(&nbs
->cc
[enbsCCcombine
]);
4305 if (nbl_list
->bSimple
)
4307 if (nnbl
> 1 && checkRebalanceSimpleLists(nbl_list
))
4309 rebalanceSimpleLists(nbl_list
->nnbl
, nbl_list
->nbl
, nbl_list
->nbl_work
, nbs
->work
);
4311 /* Swap the pointer of the sets of pair lists */
4312 nbnxn_pairlist_t
**tmp
= nbl_list
->nbl
;
4313 nbl_list
->nbl
= nbl_list
->nbl_work
;
4314 nbl_list
->nbl_work
= tmp
;
4319 /* Sort the entries on size, large ones first */
4320 if (CombineNBLists
|| nnbl
== 1)
4326 #pragma omp parallel for num_threads(nnbl) schedule(static)
4327 for (int th
= 0; th
< nnbl
; th
++)
4333 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4338 if (nbat
->bUseBufferFlags
)
4340 reduce_buffer_flags(nbs
, nbl_list
->nnbl
, &nbat
->buffer_flags
);
4345 /* Balance the free-energy lists over all the threads */
4346 balance_fep_lists(nbs
, nbl_list
);
4349 /* This is a fresh list, so not pruned, stored using ci and nci.
4350 * ciOuter and nciOuter are invalid at this point.
4352 GMX_ASSERT(nbl_list
->nbl
[0]->nciOuter
== -1, "nciOuter should have been set to -1 to signal that it is invalid");
4354 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4357 nbs
->search_count
++;
4359 if (nbs
->print_cycles
&&
4360 (!nbs
->DomDec
|| !LOCAL_I(iloc
)) &&
4361 nbs
->search_count
% 100 == 0)
4363 nbs_cycle_print(stderr
, nbs
);
4366 /* If we have more than one list, they either got rebalancing (CPU)
4367 * or combined (GPU), so we should dump the final result to debug.
4369 if (debug
&& nbl_list
->nnbl
> 1)
4371 if (nbl_list
->bSimple
)
4373 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4375 print_nblist_statistics_simple(debug
, nbl_list
->nbl
[t
], nbs
, rlist
);
4380 print_nblist_statistics_supersub(debug
, nbl_list
->nbl
[0], nbs
, rlist
);
4388 if (nbl_list
->bSimple
)
4390 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4392 print_nblist_ci_cj(debug
, nbl_list
->nbl
[t
]);
4397 print_nblist_sci_cj(debug
, nbl_list
->nbl
[0]);
4401 if (nbat
->bUseBufferFlags
)
4403 print_reduction_cost(&nbat
->buffer_flags
, nbl_list
->nnbl
);
4408 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t
*listSet
)
4410 /* TODO: Restructure the lists so we have actual outer and inner
4411 * list objects so we can set a single pointer instead of
4412 * swapping several pointers.
4415 for (int i
= 0; i
< listSet
->nnbl
; i
++)
4417 /* The search produced a list in ci/cj.
4418 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4419 * and we can prune that to get an inner list in ci/cj.
4421 nbnxn_pairlist_t
*list
= listSet
->nbl
[i
];
4422 list
->nciOuter
= list
->nci
;
4424 nbnxn_ci_t
*ciTmp
= list
->ciOuter
;
4425 list
->ciOuter
= list
->ci
;
4428 nbnxn_cj_t
*cjTmp
= list
->cjOuter
;
4429 list
->cjOuter
= list
->cj
;
4432 /* Signal that this inner list is currently invalid */