2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/topology/block.h"
70 #include "gromacs/utility/exceptions.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/gmxomp.h"
73 #include "gromacs/utility/smalloc.h"
75 using namespace gmx
; // TODO: Remove when this file is moved into gmx namespace
78 /* We shift the i-particles backward for PBC.
79 * This leads to more conditionals than shifting forward.
80 * We do this to get more balanced pair lists.
82 constexpr bool c_pbcShiftBackward
= true;
85 static void nbs_cycle_clear(nbnxn_cycle_t
*cc
)
87 for (int i
= 0; i
< enbsCCnr
; i
++)
94 static double Mcyc_av(const nbnxn_cycle_t
*cc
)
96 return (double)cc
->c
*1e-6/cc
->count
;
99 static void nbs_cycle_print(FILE *fp
, const nbnxn_search_t nbs
)
102 fprintf(fp
, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
103 nbs
->cc
[enbsCCgrid
].count
,
104 Mcyc_av(&nbs
->cc
[enbsCCgrid
]),
105 Mcyc_av(&nbs
->cc
[enbsCCsearch
]),
106 Mcyc_av(&nbs
->cc
[enbsCCreducef
]));
108 if (nbs
->work
.size() > 1)
110 if (nbs
->cc
[enbsCCcombine
].count
> 0)
112 fprintf(fp
, " comb %5.2f",
113 Mcyc_av(&nbs
->cc
[enbsCCcombine
]));
115 fprintf(fp
, " s. th");
116 for (const nbnxn_search_work_t
&work
: nbs
->work
)
118 fprintf(fp
, " %4.1f",
119 Mcyc_av(&work
.cc
[enbsCCsearch
]));
125 /* Layout for the nonbonded NxN pair lists */
126 enum class NbnxnLayout
128 NoSimd4x4
, // i-cluster size 4, j-cluster size 4
129 Simd4xN
, // i-cluster size 4, j-cluster size SIMD width
130 Simd2xNN
, // i-cluster size 4, j-cluster size half SIMD width
131 Gpu8x8x8
// i-cluster size 8, j-cluster size 8 + super-clustering
134 /* Returns the j-cluster size */
135 template <NbnxnLayout layout
>
136 static constexpr int jClusterSize()
139 static_assert(layout
== NbnxnLayout::NoSimd4x4
|| layout
== NbnxnLayout::Simd4xN
|| layout
== NbnxnLayout::Simd2xNN
, "Currently jClusterSize only supports CPU layouts");
141 return layout
== NbnxnLayout::Simd4xN
? GMX_SIMD_REAL_WIDTH
: (layout
== NbnxnLayout::Simd2xNN
? GMX_SIMD_REAL_WIDTH
/2 : NBNXN_CPU_CLUSTER_I_SIZE
);
143 static_assert(layout
== NbnxnLayout::NoSimd4x4
, "Currently without SIMD, jClusterSize only supports NoSimd4x4");
145 return NBNXN_CPU_CLUSTER_I_SIZE
;
149 /* Returns the j-cluster index given the i-cluster index */
150 template <int jClusterSize
>
151 static inline int cjFromCi(int ci
)
153 static_assert(jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
155 if (jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2)
159 else if (jClusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
)
169 /* Returns the j-cluster index given the i-cluster index */
170 template <NbnxnLayout layout
>
171 static inline int cjFromCi(int ci
)
173 constexpr int clusterSize
= jClusterSize
<layout
>();
175 return cjFromCi
<clusterSize
>(ci
);
178 /* Returns the nbnxn coordinate data index given the i-cluster index */
179 template <NbnxnLayout layout
>
180 static inline int xIndexFromCi(int ci
)
182 constexpr int clusterSize
= jClusterSize
<layout
>();
184 static_assert(clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
186 if (clusterSize
<= NBNXN_CPU_CLUSTER_I_SIZE
)
188 /* Coordinates are stored packed in groups of 4 */
193 /* Coordinates packed in 8, i-cluster size is half the packing width */
194 return (ci
>> 1)*STRIDE_P8
+ (ci
& 1)*(c_packX8
>> 1);
198 /* Returns the nbnxn coordinate data index given the j-cluster index */
199 template <NbnxnLayout layout
>
200 static inline int xIndexFromCj(int cj
)
202 constexpr int clusterSize
= jClusterSize
<layout
>();
204 static_assert(clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2 || clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
|| clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
*2, "Only j-cluster sizes 2, 4 and 8 are currently implemented");
206 if (clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
/2)
208 /* Coordinates are stored packed in groups of 4 */
209 return (cj
>> 1)*STRIDE_P4
+ (cj
& 1)*(c_packX4
>> 1);
211 else if (clusterSize
== NBNXN_CPU_CLUSTER_I_SIZE
)
213 /* Coordinates are stored packed in groups of 4 */
218 /* Coordinates are stored packed in groups of 8 */
223 gmx_bool
nbnxn_kernel_pairlist_simple(int nb_kernel_type
)
225 if (nb_kernel_type
== nbnxnkNotSet
)
227 gmx_fatal(FARGS
, "Non-bonded kernel type not set for Verlet-style pair-list.");
230 switch (nb_kernel_type
)
232 case nbnxnk8x8x8_GPU
:
233 case nbnxnk8x8x8_PlainC
:
236 case nbnxnk4x4_PlainC
:
237 case nbnxnk4xN_SIMD_4xN
:
238 case nbnxnk4xN_SIMD_2xNN
:
242 gmx_incons("Invalid nonbonded kernel type passed!");
247 /* Initializes a single nbnxn_pairlist_t data structure */
248 static void nbnxn_init_pairlist_fep(t_nblist
*nl
)
250 nl
->type
= GMX_NBLIST_INTERACTION_FREE_ENERGY
;
251 nl
->igeometry
= GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE
;
252 /* The interaction functions are set in the free energy kernel fuction */
265 nl
->jindex
= nullptr;
267 nl
->excl_fep
= nullptr;
271 static void free_nblist(t_nblist
*nl
)
281 nbnxn_search_work_t::nbnxn_search_work_t() :
284 buffer_flags({0, nullptr, 0}),
286 nbl_fep(new t_nblist
),
289 nbnxn_init_pairlist_fep(nbl_fep
.get());
294 nbnxn_search_work_t::~nbnxn_search_work_t()
296 sfree(buffer_flags
.flag
);
298 free_nblist(nbl_fep
.get());
301 nbnxn_search::nbnxn_search(const ivec
*n_dd_cells
,
302 const gmx_domdec_zones_t
*zones
,
306 ePBC(epbcNONE
), // The correct value will be set during the gridding
307 DomDec(n_dd_cells
!= nullptr),
314 // The correct value will be set during the gridding
320 for (int d
= 0; d
< DIM
; d
++)
322 if ((*n_dd_cells
)[d
] > 1)
325 /* Each grid matches a DD zone */
331 grid
.resize(numGrids
);
333 /* Initialize detailed nbsearch cycle counting */
334 print_cycles
= (getenv("GMX_NBNXN_CYCLE") != nullptr);
338 nbnxn_search
*nbnxn_init_search(const ivec
*n_dd_cells
,
339 const gmx_domdec_zones_t
*zones
,
343 return new nbnxn_search(n_dd_cells
, zones
, bFEP
, nthread_max
);
346 static void init_buffer_flags(nbnxn_buffer_flags_t
*flags
,
349 flags
->nflag
= (natoms
+ NBNXN_BUFFERFLAG_SIZE
- 1)/NBNXN_BUFFERFLAG_SIZE
;
350 if (flags
->nflag
> flags
->flag_nalloc
)
352 flags
->flag_nalloc
= over_alloc_large(flags
->nflag
);
353 srenew(flags
->flag
, flags
->flag_nalloc
);
355 for (int b
= 0; b
< flags
->nflag
; b
++)
357 bitmask_clear(&(flags
->flag
[b
]));
361 /* Determines the cell range along one dimension that
362 * the bounding box b0 - b1 sees.
365 static void get_cell_range(real b0
, real b1
,
366 const nbnxn_grid_t
&gridj
,
367 real d2
, real r2
, int *cf
, int *cl
)
369 real distanceInCells
= (b0
- gridj
.c0
[dim
])*gridj
.invCellSize
[dim
];
370 *cf
= std::max(static_cast<int>(distanceInCells
), 0);
373 d2
+ gmx::square((b0
- gridj
.c0
[dim
]) - (*cf
- 1 + 1)*gridj
.cellSize
[dim
]) < r2
)
378 *cl
= std::min(static_cast<int>((b1
- gridj
.c0
[dim
])*gridj
.invCellSize
[dim
]), gridj
.numCells
[dim
] - 1);
379 while (*cl
< gridj
.numCells
[dim
] - 1 &&
380 d2
+ gmx::square((*cl
+ 1)*gridj
.cellSize
[dim
] - (b1
- gridj
.c0
[dim
])) < r2
)
386 /* Reference code calculating the distance^2 between two bounding boxes */
388 static float box_dist2(float bx0, float bx1, float by0,
389 float by1, float bz0, float bz1,
390 const nbnxn_bb_t *bb)
393 float dl, dh, dm, dm0;
397 dl = bx0 - bb->upper[BB_X];
398 dh = bb->lower[BB_X] - bx1;
399 dm = std::max(dl, dh);
400 dm0 = std::max(dm, 0.0f);
403 dl = by0 - bb->upper[BB_Y];
404 dh = bb->lower[BB_Y] - by1;
405 dm = std::max(dl, dh);
406 dm0 = std::max(dm, 0.0f);
409 dl = bz0 - bb->upper[BB_Z];
410 dh = bb->lower[BB_Z] - bz1;
411 dm = std::max(dl, dh);
412 dm0 = std::max(dm, 0.0f);
419 /* Plain C code calculating the distance^2 between two bounding boxes */
420 static float subc_bb_dist2(int si
,
421 const nbnxn_bb_t
*bb_i_ci
,
423 gmx::ArrayRef
<const nbnxn_bb_t
> bb_j_all
)
425 const nbnxn_bb_t
*bb_i
= bb_i_ci
+ si
;
426 const nbnxn_bb_t
*bb_j
= bb_j_all
.data() + csj
;
429 float dl
, dh
, dm
, dm0
;
431 dl
= bb_i
->lower
[BB_X
] - bb_j
->upper
[BB_X
];
432 dh
= bb_j
->lower
[BB_X
] - bb_i
->upper
[BB_X
];
433 dm
= std::max(dl
, dh
);
434 dm0
= std::max(dm
, 0.0f
);
437 dl
= bb_i
->lower
[BB_Y
] - bb_j
->upper
[BB_Y
];
438 dh
= bb_j
->lower
[BB_Y
] - bb_i
->upper
[BB_Y
];
439 dm
= std::max(dl
, dh
);
440 dm0
= std::max(dm
, 0.0f
);
443 dl
= bb_i
->lower
[BB_Z
] - bb_j
->upper
[BB_Z
];
444 dh
= bb_j
->lower
[BB_Z
] - bb_i
->upper
[BB_Z
];
445 dm
= std::max(dl
, dh
);
446 dm0
= std::max(dm
, 0.0f
);
452 #if NBNXN_SEARCH_BB_SIMD4
454 /* 4-wide SIMD code for bb distance for bb format xyz0 */
455 static float subc_bb_dist2_simd4(int si
,
456 const nbnxn_bb_t
*bb_i_ci
,
458 gmx::ArrayRef
<const nbnxn_bb_t
> bb_j_all
)
460 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
463 Simd4Float bb_i_S0
, bb_i_S1
;
464 Simd4Float bb_j_S0
, bb_j_S1
;
470 bb_i_S0
= load4(&bb_i_ci
[si
].lower
[0]);
471 bb_i_S1
= load4(&bb_i_ci
[si
].upper
[0]);
472 bb_j_S0
= load4(&bb_j_all
[csj
].lower
[0]);
473 bb_j_S1
= load4(&bb_j_all
[csj
].upper
[0]);
475 dl_S
= bb_i_S0
- bb_j_S1
;
476 dh_S
= bb_j_S0
- bb_i_S1
;
478 dm_S
= max(dl_S
, dh_S
);
479 dm0_S
= max(dm_S
, simd4SetZeroF());
481 return dotProduct(dm0_S
, dm0_S
);
484 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
485 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
489 Simd4Float dx_0, dy_0, dz_0; \
490 Simd4Float dx_1, dy_1, dz_1; \
492 Simd4Float mx, my, mz; \
493 Simd4Float m0x, m0y, m0z; \
495 Simd4Float d2x, d2y, d2z; \
496 Simd4Float d2s, d2t; \
498 shi = si*NNBSBB_D*DIM; \
500 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
501 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
502 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
503 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
504 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
505 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
507 dx_0 = xi_l - xj_h; \
508 dy_0 = yi_l - yj_h; \
509 dz_0 = zi_l - zj_h; \
511 dx_1 = xj_l - xi_h; \
512 dy_1 = yj_l - yi_h; \
513 dz_1 = zj_l - zi_h; \
515 mx = max(dx_0, dx_1); \
516 my = max(dy_0, dy_1); \
517 mz = max(dz_0, dz_1); \
519 m0x = max(mx, zero); \
520 m0y = max(my, zero); \
521 m0z = max(mz, zero); \
530 store4(d2+si, d2t); \
533 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
534 static void subc_bb_dist2_simd4_xxxx(const float *bb_j
,
535 int nsi
, const float *bb_i
,
538 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
541 Simd4Float xj_l
, yj_l
, zj_l
;
542 Simd4Float xj_h
, yj_h
, zj_h
;
543 Simd4Float xi_l
, yi_l
, zi_l
;
544 Simd4Float xi_h
, yi_h
, zi_h
;
550 xj_l
= Simd4Float(bb_j
[0*STRIDE_PBB
]);
551 yj_l
= Simd4Float(bb_j
[1*STRIDE_PBB
]);
552 zj_l
= Simd4Float(bb_j
[2*STRIDE_PBB
]);
553 xj_h
= Simd4Float(bb_j
[3*STRIDE_PBB
]);
554 yj_h
= Simd4Float(bb_j
[4*STRIDE_PBB
]);
555 zj_h
= Simd4Float(bb_j
[5*STRIDE_PBB
]);
557 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
558 * But as we know the number of iterations is 1 or 2, we unroll manually.
560 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i
, d2
);
561 if (STRIDE_PBB
< nsi
)
563 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB
, bb_i
, d2
);
567 #endif /* NBNXN_SEARCH_BB_SIMD4 */
570 /* Returns if any atom pair from two clusters is within distance sqrt(rlist2) */
571 static inline gmx_bool
572 clusterpair_in_range(const nbnxn_list_work_t
*work
,
574 int csj
, int stride
, const real
*x_j
,
577 #if !GMX_SIMD4_HAVE_REAL
580 * All coordinates are stored as xyzxyz...
583 const real
*x_i
= work
->x_ci
;
585 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
++)
587 int i0
= (si
*c_nbnxnGpuClusterSize
+ i
)*DIM
;
588 for (int j
= 0; j
< c_nbnxnGpuClusterSize
; j
++)
590 int j0
= (csj
*c_nbnxnGpuClusterSize
+ j
)*stride
;
592 real d2
= gmx::square(x_i
[i0
] - x_j
[j0
]) + gmx::square(x_i
[i0
+1] - x_j
[j0
+1]) + gmx::square(x_i
[i0
+2] - x_j
[j0
+2]);
603 #else /* !GMX_SIMD4_HAVE_REAL */
605 /* 4-wide SIMD version.
606 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
607 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
609 static_assert(c_nbnxnGpuClusterSize
== 8 || c_nbnxnGpuClusterSize
== 4,
610 "A cluster is hard-coded to 4/8 atoms.");
612 Simd4Real rc2_S
= Simd4Real(rlist2
);
614 const real
*x_i
= work
->x_ci_simd
;
616 int dim_stride
= c_nbnxnGpuClusterSize
*DIM
;
617 Simd4Real ix_S0
= load4(x_i
+ si
*dim_stride
+ 0*GMX_SIMD4_WIDTH
);
618 Simd4Real iy_S0
= load4(x_i
+ si
*dim_stride
+ 1*GMX_SIMD4_WIDTH
);
619 Simd4Real iz_S0
= load4(x_i
+ si
*dim_stride
+ 2*GMX_SIMD4_WIDTH
);
621 Simd4Real ix_S1
, iy_S1
, iz_S1
;
622 if (c_nbnxnGpuClusterSize
== 8)
624 ix_S1
= load4(x_i
+ si
*dim_stride
+ 3*GMX_SIMD4_WIDTH
);
625 iy_S1
= load4(x_i
+ si
*dim_stride
+ 4*GMX_SIMD4_WIDTH
);
626 iz_S1
= load4(x_i
+ si
*dim_stride
+ 5*GMX_SIMD4_WIDTH
);
628 /* We loop from the outer to the inner particles to maximize
629 * the chance that we find a pair in range quickly and return.
631 int j0
= csj
*c_nbnxnGpuClusterSize
;
632 int j1
= j0
+ c_nbnxnGpuClusterSize
- 1;
635 Simd4Real jx0_S
, jy0_S
, jz0_S
;
636 Simd4Real jx1_S
, jy1_S
, jz1_S
;
638 Simd4Real dx_S0
, dy_S0
, dz_S0
;
639 Simd4Real dx_S1
, dy_S1
, dz_S1
;
640 Simd4Real dx_S2
, dy_S2
, dz_S2
;
641 Simd4Real dx_S3
, dy_S3
, dz_S3
;
652 Simd4Bool wco_any_S01
, wco_any_S23
, wco_any_S
;
654 jx0_S
= Simd4Real(x_j
[j0
*stride
+0]);
655 jy0_S
= Simd4Real(x_j
[j0
*stride
+1]);
656 jz0_S
= Simd4Real(x_j
[j0
*stride
+2]);
658 jx1_S
= Simd4Real(x_j
[j1
*stride
+0]);
659 jy1_S
= Simd4Real(x_j
[j1
*stride
+1]);
660 jz1_S
= Simd4Real(x_j
[j1
*stride
+2]);
662 /* Calculate distance */
663 dx_S0
= ix_S0
- jx0_S
;
664 dy_S0
= iy_S0
- jy0_S
;
665 dz_S0
= iz_S0
- jz0_S
;
666 dx_S2
= ix_S0
- jx1_S
;
667 dy_S2
= iy_S0
- jy1_S
;
668 dz_S2
= iz_S0
- jz1_S
;
669 if (c_nbnxnGpuClusterSize
== 8)
671 dx_S1
= ix_S1
- jx0_S
;
672 dy_S1
= iy_S1
- jy0_S
;
673 dz_S1
= iz_S1
- jz0_S
;
674 dx_S3
= ix_S1
- jx1_S
;
675 dy_S3
= iy_S1
- jy1_S
;
676 dz_S3
= iz_S1
- jz1_S
;
679 /* rsq = dx*dx+dy*dy+dz*dz */
680 rsq_S0
= norm2(dx_S0
, dy_S0
, dz_S0
);
681 rsq_S2
= norm2(dx_S2
, dy_S2
, dz_S2
);
682 if (c_nbnxnGpuClusterSize
== 8)
684 rsq_S1
= norm2(dx_S1
, dy_S1
, dz_S1
);
685 rsq_S3
= norm2(dx_S3
, dy_S3
, dz_S3
);
688 wco_S0
= (rsq_S0
< rc2_S
);
689 wco_S2
= (rsq_S2
< rc2_S
);
690 if (c_nbnxnGpuClusterSize
== 8)
692 wco_S1
= (rsq_S1
< rc2_S
);
693 wco_S3
= (rsq_S3
< rc2_S
);
695 if (c_nbnxnGpuClusterSize
== 8)
697 wco_any_S01
= wco_S0
|| wco_S1
;
698 wco_any_S23
= wco_S2
|| wco_S3
;
699 wco_any_S
= wco_any_S01
|| wco_any_S23
;
703 wco_any_S
= wco_S0
|| wco_S2
;
706 if (anyTrue(wco_any_S
))
717 #endif /* !GMX_SIMD4_HAVE_REAL */
720 /* Returns the j-cluster index for index cjIndex in a cj list */
721 static inline int nblCj(const nbnxn_cj_t
*cjList
, int cjIndex
)
723 return cjList
[cjIndex
].cj
;
726 /* Returns the j-cluster index for index cjIndex in a cj4 list */
727 static inline int nblCj(const nbnxn_cj4_t
*cj4List
, int cjIndex
)
729 return cj4List
[cjIndex
/c_nbnxnGpuJgroupSize
].cj
[cjIndex
& (c_nbnxnGpuJgroupSize
- 1)];
732 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
733 static unsigned int nbl_imask0(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
735 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].imei
[0].imask
;
738 /* Ensures there is enough space for extra extra exclusion masks */
739 static void check_excl_space(nbnxn_pairlist_t
*nbl
, int extra
)
741 if (nbl
->nexcl
+extra
> nbl
->excl_nalloc
)
743 nbl
->excl_nalloc
= over_alloc_small(nbl
->nexcl
+extra
);
744 nbnxn_realloc_void((void **)&nbl
->excl
,
745 nbl
->nexcl
*sizeof(*nbl
->excl
),
746 nbl
->excl_nalloc
*sizeof(*nbl
->excl
),
747 nbl
->alloc
, nbl
->free
);
751 /* Ensures there is enough space for maxNumExtraClusters extra j-clusters in the list */
752 static void check_cell_list_space_simple(nbnxn_pairlist_t
*nbl
,
753 int maxNumExtraClusters
)
757 cj_max
= nbl
->ncj
+ maxNumExtraClusters
;
759 if (cj_max
> nbl
->cj_nalloc
)
761 nbl
->cj_nalloc
= over_alloc_small(cj_max
);
762 nbnxn_realloc_void((void **)&nbl
->cj
,
763 nbl
->ncj
*sizeof(*nbl
->cj
),
764 nbl
->cj_nalloc
*sizeof(*nbl
->cj
),
765 nbl
->alloc
, nbl
->free
);
767 nbnxn_realloc_void((void **)&nbl
->cjOuter
,
768 nbl
->ncj
*sizeof(*nbl
->cjOuter
),
769 nbl
->cj_nalloc
*sizeof(*nbl
->cjOuter
),
770 nbl
->alloc
, nbl
->free
);
774 /* Ensures there is enough space for ncell extra j-clusters in the list */
775 static void check_cell_list_space_supersub(nbnxn_pairlist_t
*nbl
,
780 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
781 /* We can store 4 j-subcell - i-supercell pairs in one struct.
782 * since we round down, we need one extra entry.
784 ncj4_max
= ((nbl
->work
->cj_ind
+ ncell
*c_gpuNumClusterPerCell
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
);
786 if (ncj4_max
> nbl
->cj4_nalloc
)
788 nbl
->cj4_nalloc
= over_alloc_small(ncj4_max
);
789 nbnxn_realloc_void((void **)&nbl
->cj4
,
790 nbl
->work
->cj4_init
*sizeof(*nbl
->cj4
),
791 nbl
->cj4_nalloc
*sizeof(*nbl
->cj4
),
792 nbl
->alloc
, nbl
->free
);
795 if (ncj4_max
> nbl
->work
->cj4_init
)
797 for (int j4
= nbl
->work
->cj4_init
; j4
< ncj4_max
; j4
++)
799 /* No i-subcells and no excl's in the list initially */
800 for (w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
802 nbl
->cj4
[j4
].imei
[w
].imask
= 0U;
803 nbl
->cj4
[j4
].imei
[w
].excl_ind
= 0;
807 nbl
->work
->cj4_init
= ncj4_max
;
811 /* Set all excl masks for one GPU warp no exclusions */
812 static void set_no_excls(nbnxn_excl_t
*excl
)
814 for (int t
= 0; t
< c_nbnxnGpuExclSize
; t
++)
816 /* Turn all interaction bits on */
817 excl
->pair
[t
] = NBNXN_INTERACTION_MASK_ALL
;
821 /* Initializes a single nbnxn_pairlist_t data structure */
822 static void nbnxn_init_pairlist(nbnxn_pairlist_t
*nbl
,
824 nbnxn_alloc_t
*alloc
,
827 if (alloc
== nullptr)
829 nbl
->alloc
= nbnxn_alloc_aligned
;
837 nbl
->free
= nbnxn_free_aligned
;
844 nbl
->bSimple
= bSimple
;
859 /* We need one element extra in sj, so alloc initially with 1 */
866 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster
== c_gpuNumClusterPerCell
, "The search code assumes that the a super-cluster matches a search grid cell");
868 GMX_ASSERT(sizeof(nbl
->cj4
[0].imei
[0].imask
)*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
869 GMX_ASSERT(sizeof(nbl
->excl
[0])*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The GPU exclusion mask does not contain a sufficient number of bits");
872 nbl
->excl_nalloc
= 0;
874 check_excl_space(nbl
, 1);
876 set_no_excls(&nbl
->excl
[0]);
882 snew_aligned(nbl
->work
->bb_ci
, 1, NBNXN_SEARCH_BB_MEM_ALIGN
);
887 snew_aligned(nbl
->work
->pbb_ci
, c_gpuNumClusterPerCell
/STRIDE_PBB
*NNBSBB_XXXX
, NBNXN_SEARCH_BB_MEM_ALIGN
);
889 snew_aligned(nbl
->work
->bb_ci
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
892 int gpu_clusterpair_nc
= c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
*DIM
;
893 snew(nbl
->work
->x_ci
, gpu_clusterpair_nc
);
895 snew_aligned(nbl
->work
->x_ci_simd
,
896 std::max(NBNXN_CPU_CLUSTER_I_SIZE
*DIM
*GMX_SIMD_REAL_WIDTH
,
898 GMX_SIMD_REAL_WIDTH
);
900 snew_aligned(nbl
->work
->d2
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
902 nbl
->work
->sort
= nullptr;
903 nbl
->work
->sort_nalloc
= 0;
904 nbl
->work
->sci_sort
= nullptr;
905 nbl
->work
->sci_sort_nalloc
= 0;
908 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t
*nbl_list
,
909 gmx_bool bSimple
, gmx_bool bCombined
,
910 nbnxn_alloc_t
*alloc
,
913 nbl_list
->bSimple
= bSimple
;
914 nbl_list
->bCombined
= bCombined
;
916 nbl_list
->nnbl
= gmx_omp_nthreads_get(emntNonbonded
);
918 if (!nbl_list
->bCombined
&&
919 nbl_list
->nnbl
> NBNXN_BUFFERFLAG_MAX_THREADS
)
921 gmx_fatal(FARGS
, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
922 nbl_list
->nnbl
, NBNXN_BUFFERFLAG_MAX_THREADS
, NBNXN_BUFFERFLAG_MAX_THREADS
);
925 snew(nbl_list
->nbl
, nbl_list
->nnbl
);
926 if (bSimple
&& nbl_list
->nnbl
> 1)
928 snew(nbl_list
->nbl_work
, nbl_list
->nnbl
);
930 snew(nbl_list
->nbl_fep
, nbl_list
->nnbl
);
931 /* Execute in order to avoid memory interleaving between threads */
932 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
933 for (int i
= 0; i
< nbl_list
->nnbl
; i
++)
937 /* Allocate the nblist data structure locally on each thread
938 * to optimize memory access for NUMA architectures.
940 snew(nbl_list
->nbl
[i
], 1);
942 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
943 if (!bSimple
&& i
== 0)
945 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, alloc
, free
);
949 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, nullptr, nullptr);
950 if (bSimple
&& nbl_list
->nnbl
> 1)
952 snew(nbl_list
->nbl_work
[i
], 1);
953 nbnxn_init_pairlist(nbl_list
->nbl_work
[i
], nbl_list
->bSimple
, nullptr, nullptr);
957 snew(nbl_list
->nbl_fep
[i
], 1);
958 nbnxn_init_pairlist_fep(nbl_list
->nbl_fep
[i
]);
960 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
964 /* Print statistics of a pair list, used for debug output */
965 static void print_nblist_statistics_simple(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
966 const nbnxn_search_t nbs
, real rl
)
968 const nbnxn_grid_t
*grid
;
972 grid
= &nbs
->grid
[0];
974 fprintf(fp
, "nbl nci %d ncj %d\n",
975 nbl
->nci
, nbl
->ncjInUse
);
976 fprintf(fp
, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
977 nbl
->na_sc
, rl
, nbl
->ncjInUse
, nbl
->ncjInUse
/(double)grid
->nc
,
978 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
,
979 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nc
*grid
->na_sc
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
981 fprintf(fp
, "nbl average j cell list length %.1f\n",
982 0.25*nbl
->ncjInUse
/(double)std::max(nbl
->nci
, 1));
984 for (int s
= 0; s
< SHIFTS
; s
++)
989 for (int i
= 0; i
< nbl
->nci
; i
++)
991 cs
[nbl
->ci
[i
].shift
& NBNXN_CI_SHIFT
] +=
992 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
;
994 int j
= nbl
->ci
[i
].cj_ind_start
;
995 while (j
< nbl
->ci
[i
].cj_ind_end
&&
996 nbl
->cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
1002 fprintf(fp
, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
1003 nbl
->ncj
, npexcl
, 100*npexcl
/(double)std::max(nbl
->ncj
, 1));
1004 for (int s
= 0; s
< SHIFTS
; s
++)
1008 fprintf(fp
, "nbl shift %2d ncj %3d\n", s
, cs
[s
]);
1013 /* Print statistics of a pair lists, used for debug output */
1014 static void print_nblist_statistics_supersub(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
1015 const nbnxn_search_t nbs
, real rl
)
1017 const nbnxn_grid_t
*grid
;
1019 int c
[c_gpuNumClusterPerCell
+ 1];
1020 double sum_nsp
, sum_nsp2
;
1023 /* This code only produces correct statistics with domain decomposition */
1024 grid
= &nbs
->grid
[0];
1026 fprintf(fp
, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
1027 nbl
->nsci
, nbl
->ncj4
, nbl
->nci_tot
, nbl
->nexcl
);
1028 fprintf(fp
, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
1029 nbl
->na_ci
, rl
, nbl
->nci_tot
, nbl
->nci_tot
/(double)grid
->nsubc_tot
,
1030 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
,
1031 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nsubc_tot
*grid
->na_c
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
1036 for (int si
= 0; si
<= c_gpuNumClusterPerCell
; si
++)
1040 for (int i
= 0; i
< nbl
->nsci
; i
++)
1045 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
1047 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
1050 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
1052 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
1062 sum_nsp2
+= nsp
*nsp
;
1063 nsp_max
= std::max(nsp_max
, nsp
);
1067 sum_nsp
/= nbl
->nsci
;
1068 sum_nsp2
/= nbl
->nsci
;
1070 fprintf(fp
, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1071 sum_nsp
, std::sqrt(sum_nsp2
- sum_nsp
*sum_nsp
), nsp_max
);
1075 for (b
= 0; b
<= c_gpuNumClusterPerCell
; b
++)
1077 fprintf(fp
, "nbl j-list #i-subcell %d %7d %4.1f\n",
1079 100.0*c
[b
]/(double)(nbl
->ncj4
*c_nbnxnGpuJgroupSize
));
1084 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1085 static void low_get_nbl_exclusions(nbnxn_pairlist_t
*nbl
, int cj4
,
1086 int warp
, nbnxn_excl_t
**excl
)
1088 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1090 /* No exclusions set, make a new list entry */
1091 nbl
->cj4
[cj4
].imei
[warp
].excl_ind
= nbl
->nexcl
;
1093 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1094 set_no_excls(*excl
);
1098 /* We already have some exclusions, new ones can be added to the list */
1099 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1103 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1104 * generates a new element and allocates extra memory, if necessary.
1106 static void get_nbl_exclusions_1(nbnxn_pairlist_t
*nbl
, int cj4
,
1107 int warp
, nbnxn_excl_t
**excl
)
1109 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1111 /* We need to make a new list entry, check if we have space */
1112 check_excl_space(nbl
, 1);
1114 low_get_nbl_exclusions(nbl
, cj4
, warp
, excl
);
1117 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1118 * generates a new element and allocates extra memory, if necessary.
1120 static void get_nbl_exclusions_2(nbnxn_pairlist_t
*nbl
, int cj4
,
1121 nbnxn_excl_t
**excl_w0
,
1122 nbnxn_excl_t
**excl_w1
)
1124 /* Check for space we might need */
1125 check_excl_space(nbl
, 2);
1127 low_get_nbl_exclusions(nbl
, cj4
, 0, excl_w0
);
1128 low_get_nbl_exclusions(nbl
, cj4
, 1, excl_w1
);
1131 /* Sets the self exclusions i=j and pair exclusions i>j */
1132 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t
*nbl
,
1133 int cj4_ind
, int sj_offset
,
1134 int i_cluster_in_cell
)
1136 nbnxn_excl_t
*excl
[c_nbnxnGpuClusterpairSplit
];
1138 /* Here we only set the set self and double pair exclusions */
1140 static_assert(c_nbnxnGpuClusterpairSplit
== 2, "");
1142 get_nbl_exclusions_2(nbl
, cj4_ind
, &excl
[0], &excl
[1]);
1144 /* Only minor < major bits set */
1145 for (int ej
= 0; ej
< nbl
->na_ci
; ej
++)
1148 for (int ei
= ej
; ei
< nbl
->na_ci
; ei
++)
1150 excl
[w
]->pair
[(ej
& (c_nbnxnGpuJgroupSize
-1))*nbl
->na_ci
+ ei
] &=
1151 ~(1U << (sj_offset
*c_gpuNumClusterPerCell
+ i_cluster_in_cell
));
1156 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1157 static unsigned int get_imask(gmx_bool rdiag
, int ci
, int cj
)
1159 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1162 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1163 gmx_unused
static unsigned int get_imask_simd_j2(gmx_bool rdiag
, int ci
, int cj
)
1165 return (rdiag
&& ci
*2 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_0
:
1166 (rdiag
&& ci
*2+1 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_1
:
1167 NBNXN_INTERACTION_MASK_ALL
));
1170 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1171 gmx_unused
static unsigned int get_imask_simd_j4(gmx_bool rdiag
, int ci
, int cj
)
1173 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1176 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1177 gmx_unused
static unsigned int get_imask_simd_j8(gmx_bool rdiag
, int ci
, int cj
)
1179 return (rdiag
&& ci
== cj
*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
:
1180 (rdiag
&& ci
== cj
*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
:
1181 NBNXN_INTERACTION_MASK_ALL
));
1185 #if GMX_SIMD_REAL_WIDTH == 2
1186 #define get_imask_simd_4xn get_imask_simd_j2
1188 #if GMX_SIMD_REAL_WIDTH == 4
1189 #define get_imask_simd_4xn get_imask_simd_j4
1191 #if GMX_SIMD_REAL_WIDTH == 8
1192 #define get_imask_simd_4xn get_imask_simd_j8
1193 #define get_imask_simd_2xnn get_imask_simd_j4
1195 #if GMX_SIMD_REAL_WIDTH == 16
1196 #define get_imask_simd_2xnn get_imask_simd_j8
1200 /* Plain C code for checking and adding cluster-pairs to the list.
1202 * \param[in] gridj The j-grid
1203 * \param[in,out] nbl The pair-list to store the cluster pairs in
1204 * \param[in] icluster The index of the i-cluster
1205 * \param[in] jclusterFirst The first cluster in the j-range
1206 * \param[in] jclusterLast The last cluster in the j-range
1207 * \param[in] excludeSubDiagonal Exclude atom pairs with i-index > j-index
1208 * \param[in] x_j Coordinates for the j-atom, in xyz format
1209 * \param[in] rlist2 The squared list cut-off
1210 * \param[in] rbb2 The squared cut-off for putting cluster-pairs in the list based on bounding box distance only
1211 * \param[in,out] numDistanceChecks The number of distance checks performed
1214 makeClusterListSimple(const nbnxn_grid_t
* gridj
,
1215 nbnxn_pairlist_t
* nbl
,
1219 bool excludeSubDiagonal
,
1220 const real
* gmx_restrict x_j
,
1223 int * gmx_restrict numDistanceChecks
)
1225 const nbnxn_bb_t
* gmx_restrict bb_ci
= nbl
->work
->bb_ci
;
1226 const real
* gmx_restrict x_ci
= nbl
->work
->x_ci
;
1231 while (!InRange
&& jclusterFirst
<= jclusterLast
)
1233 real d2
= subc_bb_dist2(0, bb_ci
, jclusterFirst
, gridj
->bb
);
1234 *numDistanceChecks
+= 2;
1236 /* Check if the distance is within the distance where
1237 * we use only the bounding box distance rbb,
1238 * or within the cut-off and there is at least one atom pair
1239 * within the cut-off.
1245 else if (d2
< rlist2
)
1247 int cjf_gl
= gridj
->cell0
+ jclusterFirst
;
1248 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1250 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1252 InRange
= InRange
||
1253 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1254 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1255 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rlist2
);
1258 *numDistanceChecks
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1271 while (!InRange
&& jclusterLast
> jclusterFirst
)
1273 real d2
= subc_bb_dist2(0, bb_ci
, jclusterLast
, gridj
->bb
);
1274 *numDistanceChecks
+= 2;
1276 /* Check if the distance is within the distance where
1277 * we use only the bounding box distance rbb,
1278 * or within the cut-off and there is at least one atom pair
1279 * within the cut-off.
1285 else if (d2
< rlist2
)
1287 int cjl_gl
= gridj
->cell0
+ jclusterLast
;
1288 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1290 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1292 InRange
= InRange
||
1293 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1294 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1295 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rlist2
);
1298 *numDistanceChecks
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1306 if (jclusterFirst
<= jclusterLast
)
1308 for (int jcluster
= jclusterFirst
; jcluster
<= jclusterLast
; jcluster
++)
1310 /* Store cj and the interaction mask */
1311 nbl
->cj
[nbl
->ncj
].cj
= gridj
->cell0
+ jcluster
;
1312 nbl
->cj
[nbl
->ncj
].excl
= get_imask(excludeSubDiagonal
, icluster
, jcluster
);
1315 /* Increase the closing index in i super-cell list */
1316 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
1320 #ifdef GMX_NBNXN_SIMD_4XN
1321 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1323 #ifdef GMX_NBNXN_SIMD_2XNN
1324 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1327 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1328 * Checks bounding box distances and possibly atom pair distances.
1330 static void make_cluster_list_supersub(const nbnxn_grid_t
*gridi
,
1331 const nbnxn_grid_t
*gridj
,
1332 nbnxn_pairlist_t
*nbl
,
1334 gmx_bool sci_equals_scj
,
1335 int stride
, const real
*x
,
1336 real rlist2
, float rbb2
,
1337 int *numDistanceChecks
)
1339 nbnxn_list_work_t
*work
= nbl
->work
;
1342 const float *pbb_ci
= work
->pbb_ci
;
1344 const nbnxn_bb_t
*bb_ci
= work
->bb_ci
;
1347 assert(c_nbnxnGpuClusterSize
== gridi
->na_c
);
1348 assert(c_nbnxnGpuClusterSize
== gridj
->na_c
);
1350 /* We generate the pairlist mainly based on bounding-box distances
1351 * and do atom pair distance based pruning on the GPU.
1352 * Only if a j-group contains a single cluster-pair, we try to prune
1353 * that pair based on atom distances on the CPU to avoid empty j-groups.
1355 #define PRUNE_LIST_CPU_ONE 1
1356 #define PRUNE_LIST_CPU_ALL 0
1358 #if PRUNE_LIST_CPU_ONE
1362 float *d2l
= work
->d2
;
1364 for (int subc
= 0; subc
< gridj
->nsubc
[scj
]; subc
++)
1366 int cj4_ind
= nbl
->work
->cj_ind
/c_nbnxnGpuJgroupSize
;
1367 int cj_offset
= nbl
->work
->cj_ind
- cj4_ind
*c_nbnxnGpuJgroupSize
;
1368 nbnxn_cj4_t
*cj4
= &nbl
->cj4
[cj4_ind
];
1370 int cj
= scj
*c_gpuNumClusterPerCell
+ subc
;
1372 int cj_gl
= gridj
->cell0
*c_gpuNumClusterPerCell
+ cj
;
1374 /* Initialize this j-subcell i-subcell list */
1375 cj4
->cj
[cj_offset
] = cj_gl
;
1384 ci1
= gridi
->nsubc
[sci
];
1388 /* Determine all ci1 bb distances in one call with SIMD4 */
1389 subc_bb_dist2_simd4_xxxx(gridj
->pbb
.data() + (cj
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
+ (cj
& (STRIDE_PBB
-1)),
1391 *numDistanceChecks
+= c_nbnxnGpuClusterSize
*2;
1395 unsigned int imask
= 0;
1396 /* We use a fixed upper-bound instead of ci1 to help optimization */
1397 for (int ci
= 0; ci
< c_gpuNumClusterPerCell
; ci
++)
1405 /* Determine the bb distance between ci and cj */
1406 d2l
[ci
] = subc_bb_dist2(ci
, bb_ci
, cj
, gridj
->bb
);
1407 *numDistanceChecks
+= 2;
1411 #if PRUNE_LIST_CPU_ALL
1412 /* Check if the distance is within the distance where
1413 * we use only the bounding box distance rbb,
1414 * or within the cut-off and there is at least one atom pair
1415 * within the cut-off. This check is very costly.
1417 *numDistanceChecks
+= c_nbnxnGpuClusterSize
*c_nbnxnGpuClusterSize
;
1420 clusterpair_in_range(work
, ci
, cj_gl
, stride
, x
, rlist2
)))
1422 /* Check if the distance between the two bounding boxes
1423 * in within the pair-list cut-off.
1428 /* Flag this i-subcell to be taken into account */
1429 imask
|= (1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci
));
1431 #if PRUNE_LIST_CPU_ONE
1439 #if PRUNE_LIST_CPU_ONE
1440 /* If we only found 1 pair, check if any atoms are actually
1441 * within the cut-off, so we could get rid of it.
1443 if (npair
== 1 && d2l
[ci_last
] >= rbb2
&&
1444 !clusterpair_in_range(work
, ci_last
, cj_gl
, stride
, x
, rlist2
))
1446 imask
&= ~(1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci_last
));
1453 /* We have a useful sj entry, close it now */
1455 /* Set the exclusions for the ci==sj entry.
1456 * Here we don't bother to check if this entry is actually flagged,
1457 * as it will nearly always be in the list.
1461 set_self_and_newton_excls_supersub(nbl
, cj4_ind
, cj_offset
, subc
);
1464 /* Copy the cluster interaction mask to the list */
1465 for (int w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
1467 cj4
->imei
[w
].imask
|= imask
;
1470 nbl
->work
->cj_ind
++;
1472 /* Keep the count */
1473 nbl
->nci_tot
+= npair
;
1475 /* Increase the closing index in i super-cell list */
1476 nbl
->sci
[nbl
->nsci
].cj4_ind_end
=
1477 (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
1482 /* Returns how many contiguous j-clusters we have starting in the i-list */
1483 template <typename CjListType
>
1484 static int numContiguousJClusters(const int cjIndexStart
,
1485 const int cjIndexEnd
,
1486 const CjListType
&cjList
)
1488 const int firstJCluster
= nblCj(cjList
, cjIndexStart
);
1490 int numContiguous
= 0;
1492 while (cjIndexStart
+ numContiguous
< cjIndexEnd
&&
1493 nblCj(cjList
, cjIndexStart
+ numContiguous
) == firstJCluster
+ numContiguous
)
1498 return numContiguous
;
1501 /* Helper struct for efficient searching for excluded atoms in a j-list */
1505 template <typename CjListType
>
1506 JListRanges(int cjIndexStart
,
1508 const CjListType
&cjList
);
1510 int cjIndexStart
; // The start index in the j-list
1511 int cjIndexEnd
; // The end index in the j-list
1512 int cjFirst
; // The j-cluster with index cjIndexStart
1513 int cjLast
; // The j-cluster with index cjIndexEnd-1
1514 int numDirect
; // Up to cjIndexStart+numDirect the j-clusters are cjFirst + the index offset
1517 template <typename CjListType
>
1518 JListRanges::JListRanges(int cjIndexStart
,
1520 const CjListType
&cjList
) :
1521 cjIndexStart(cjIndexStart
),
1522 cjIndexEnd(cjIndexEnd
)
1524 GMX_ASSERT(cjIndexEnd
> cjIndexStart
, "JListRanges should only be called with non-empty lists");
1526 cjFirst
= nblCj(cjList
, cjIndexStart
);
1527 cjLast
= nblCj(cjList
, cjIndexEnd
- 1);
1529 /* Determine how many contiguous j-cells we have starting
1530 * from the first i-cell. This number can be used to directly
1531 * calculate j-cell indices for excluded atoms.
1533 numDirect
= numContiguousJClusters(cjIndexStart
, cjIndexEnd
, cjList
);
1536 /* Return the index of \p jCluster in the given range or -1 when not present
1538 * Note: This code is executed very often and therefore performance is
1539 * important. It should be inlined and fully optimized.
1541 template <typename CjListType
>
1542 static inline int findJClusterInJList(int jCluster
,
1543 const JListRanges
&ranges
,
1544 const CjListType
&cjList
)
1548 if (jCluster
< ranges
.cjFirst
+ ranges
.numDirect
)
1550 /* We can calculate the index directly using the offset */
1551 index
= ranges
.cjIndexStart
+ jCluster
- ranges
.cjFirst
;
1555 /* Search for jCluster using bisection */
1557 int rangeStart
= ranges
.cjIndexStart
+ ranges
.numDirect
;
1558 int rangeEnd
= ranges
.cjIndexEnd
;
1560 while (index
== -1 && rangeStart
< rangeEnd
)
1562 rangeMiddle
= (rangeStart
+ rangeEnd
) >> 1;
1564 const int clusterMiddle
= nblCj(cjList
, rangeMiddle
);
1566 if (jCluster
== clusterMiddle
)
1568 index
= rangeMiddle
;
1570 else if (jCluster
< clusterMiddle
)
1572 rangeEnd
= rangeMiddle
;
1576 rangeStart
= rangeMiddle
+ 1;
1584 /* Set all atom-pair exclusions for a simple type list i-entry
1586 * Set all atom-pair exclusions from the topology stored in exclusions
1587 * as masks in the pair-list for simple list entry iEntry.
1590 setExclusionsForSimpleIentry(const nbnxn_search_t nbs
,
1591 nbnxn_pairlist_t
*nbl
,
1592 gmx_bool diagRemoved
,
1594 const nbnxn_ci_t
&iEntry
,
1595 const t_blocka
&exclusions
)
1597 if (iEntry
.cj_ind_end
== iEntry
.cj_ind_start
)
1599 /* Empty list: no exclusions */
1603 const JListRanges
ranges(iEntry
.cj_ind_start
, iEntry
.cj_ind_end
, nbl
->cj
);
1605 const int iCluster
= iEntry
.ci
;
1607 gmx::ArrayRef
<const int> cell
= nbs
->cell
;
1609 /* Loop over the atoms in the i-cluster */
1610 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1612 const int iIndex
= iCluster
*nbl
->na_sc
+ i
;
1613 const int iAtom
= nbs
->a
[iIndex
];
1616 /* Loop over the topology-based exclusions for this i-atom */
1617 for (int exclIndex
= exclusions
.index
[iAtom
]; exclIndex
< exclusions
.index
[iAtom
+ 1]; exclIndex
++)
1619 const int jAtom
= exclusions
.a
[exclIndex
];
1623 /* The self exclusion are already set, save some time */
1627 /* Get the index of the j-atom in the nbnxn atom data */
1628 const int jIndex
= cell
[jAtom
];
1630 /* Without shifts we only calculate interactions j>i
1631 * for one-way pair-lists.
1633 if (diagRemoved
&& jIndex
<= iIndex
)
1638 const int jCluster
= (jIndex
>> na_cj_2log
);
1640 /* Could the cluster se be in our list? */
1641 if (jCluster
>= ranges
.cjFirst
&& jCluster
<= ranges
.cjLast
)
1644 findJClusterInJList(jCluster
, ranges
, nbl
->cj
);
1648 /* We found an exclusion, clear the corresponding
1651 const int innerJ
= jIndex
- (jCluster
<< na_cj_2log
);
1653 nbl
->cj
[index
].excl
&= ~(1U << ((i
<< na_cj_2log
) + innerJ
));
1661 /* Add a new i-entry to the FEP list and copy the i-properties */
1662 static inline void fep_list_new_nri_copy(t_nblist
*nlist
)
1664 /* Add a new i-entry */
1667 assert(nlist
->nri
< nlist
->maxnri
);
1669 /* Duplicate the last i-entry, except for jindex, which continues */
1670 nlist
->iinr
[nlist
->nri
] = nlist
->iinr
[nlist
->nri
-1];
1671 nlist
->shift
[nlist
->nri
] = nlist
->shift
[nlist
->nri
-1];
1672 nlist
->gid
[nlist
->nri
] = nlist
->gid
[nlist
->nri
-1];
1673 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1676 /* For load balancing of the free-energy lists over threads, we set
1677 * the maximum nrj size of an i-entry to 40. This leads to good
1678 * load balancing in the worst case scenario of a single perturbed
1679 * particle on 16 threads, while not introducing significant overhead.
1680 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1681 * since non perturbed i-particles will see few perturbed j-particles).
1683 const int max_nrj_fep
= 40;
1685 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1686 * singularities for overlapping particles (0/0), since the charges and
1687 * LJ parameters have been zeroed in the nbnxn data structure.
1688 * Simultaneously make a group pair list for the perturbed pairs.
1690 static void make_fep_list(const nbnxn_search_t nbs
,
1691 const nbnxn_atomdata_t
*nbat
,
1692 nbnxn_pairlist_t
*nbl
,
1693 gmx_bool bDiagRemoved
,
1695 const nbnxn_grid_t
*gridi
,
1696 const nbnxn_grid_t
*gridj
,
1699 int ci
, cj_ind_start
, cj_ind_end
, cja
, cjr
;
1701 int ngid
, gid_i
= 0, gid_j
, gid
;
1702 int egp_shift
, egp_mask
;
1704 int ind_i
, ind_j
, ai
, aj
;
1706 gmx_bool bFEP_i
, bFEP_i_all
;
1708 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1716 cj_ind_start
= nbl_ci
->cj_ind_start
;
1717 cj_ind_end
= nbl_ci
->cj_ind_end
;
1719 /* In worst case we have alternating energy groups
1720 * and create #atom-pair lists, which means we need the size
1721 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1723 nri_max
= nbl
->na_ci
*nbl
->na_cj
*(cj_ind_end
- cj_ind_start
);
1724 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1726 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1727 reallocate_nblist(nlist
);
1730 ngid
= nbat
->nenergrp
;
1732 if (static_cast<std::size_t>(ngid
*gridj
->na_cj
) > sizeof(gid_cj
)*8)
1734 gmx_fatal(FARGS
, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1735 gridi
->na_c
, gridj
->na_cj
, (sizeof(gid_cj
)*8)/gridj
->na_cj
);
1738 egp_shift
= nbat
->neg_2log
;
1739 egp_mask
= (1<<nbat
->neg_2log
) - 1;
1741 /* Loop over the atoms in the i sub-cell */
1743 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1745 ind_i
= ci
*nbl
->na_ci
+ i
;
1750 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1751 nlist
->iinr
[nri
] = ai
;
1752 /* The actual energy group pair index is set later */
1753 nlist
->gid
[nri
] = 0;
1754 nlist
->shift
[nri
] = nbl_ci
->shift
& NBNXN_CI_SHIFT
;
1756 bFEP_i
= gridi
->fep
[ci
- gridi
->cell0
] & (1 << i
);
1758 bFEP_i_all
= bFEP_i_all
&& bFEP_i
;
1760 if (nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
> nlist
->maxnrj
)
1762 nlist
->maxnrj
= over_alloc_small(nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
);
1763 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1764 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1769 gid_i
= (nbat
->energrp
[ci
] >> (egp_shift
*i
)) & egp_mask
;
1772 for (int cj_ind
= cj_ind_start
; cj_ind
< cj_ind_end
; cj_ind
++)
1774 unsigned int fep_cj
;
1776 cja
= nbl
->cj
[cj_ind
].cj
;
1778 if (gridj
->na_cj
== gridj
->na_c
)
1780 cjr
= cja
- gridj
->cell0
;
1781 fep_cj
= gridj
->fep
[cjr
];
1784 gid_cj
= nbat
->energrp
[cja
];
1787 else if (2*gridj
->na_cj
== gridj
->na_c
)
1789 cjr
= cja
- gridj
->cell0
*2;
1790 /* Extract half of the ci fep/energrp mask */
1791 fep_cj
= (gridj
->fep
[cjr
>>1] >> ((cjr
&1)*gridj
->na_cj
)) & ((1<<gridj
->na_cj
) - 1);
1794 gid_cj
= nbat
->energrp
[cja
>>1] >> ((cja
&1)*gridj
->na_cj
*egp_shift
) & ((1<<(gridj
->na_cj
*egp_shift
)) - 1);
1799 cjr
= cja
- (gridj
->cell0
>>1);
1800 /* Combine two ci fep masks/energrp */
1801 fep_cj
= gridj
->fep
[cjr
*2] + (gridj
->fep
[cjr
*2+1] << gridj
->na_c
);
1804 gid_cj
= nbat
->energrp
[cja
*2] + (nbat
->energrp
[cja
*2+1] << (gridj
->na_c
*egp_shift
));
1808 if (bFEP_i
|| fep_cj
!= 0)
1810 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1812 /* Is this interaction perturbed and not excluded? */
1813 ind_j
= cja
*nbl
->na_cj
+ j
;
1816 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1817 (!bDiagRemoved
|| ind_j
>= ind_i
))
1821 gid_j
= (gid_cj
>> (j
*egp_shift
)) & egp_mask
;
1822 gid
= GID(gid_i
, gid_j
, ngid
);
1824 if (nlist
->nrj
> nlist
->jindex
[nri
] &&
1825 nlist
->gid
[nri
] != gid
)
1827 /* Energy group pair changed: new list */
1828 fep_list_new_nri_copy(nlist
);
1831 nlist
->gid
[nri
] = gid
;
1834 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1836 fep_list_new_nri_copy(nlist
);
1840 /* Add it to the FEP list */
1841 nlist
->jjnr
[nlist
->nrj
] = aj
;
1842 nlist
->excl_fep
[nlist
->nrj
] = (nbl
->cj
[cj_ind
].excl
>> (i
*nbl
->na_cj
+ j
)) & 1;
1845 /* Exclude it from the normal list.
1846 * Note that the charge has been set to zero,
1847 * but we need to avoid 0/0, as perturbed atoms
1848 * can be on top of each other.
1850 nbl
->cj
[cj_ind
].excl
&= ~(1U << (i
*nbl
->na_cj
+ j
));
1856 if (nlist
->nrj
> nlist
->jindex
[nri
])
1858 /* Actually add this new, non-empty, list */
1860 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1867 /* All interactions are perturbed, we can skip this entry */
1868 nbl_ci
->cj_ind_end
= cj_ind_start
;
1869 nbl
->ncjInUse
-= cj_ind_end
- cj_ind_start
;
1873 /* Return the index of atom a within a cluster */
1874 static inline int cj_mod_cj4(int cj
)
1876 return cj
& (c_nbnxnGpuJgroupSize
- 1);
1879 /* Convert a j-cluster to a cj4 group */
1880 static inline int cj_to_cj4(int cj
)
1882 return cj
/c_nbnxnGpuJgroupSize
;
1885 /* Return the index of an j-atom within a warp */
1886 static inline int a_mod_wj(int a
)
1888 return a
& (c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
- 1);
1891 /* As make_fep_list above, but for super/sub lists. */
1892 static void make_fep_list_supersub(const nbnxn_search_t nbs
,
1893 const nbnxn_atomdata_t
*nbat
,
1894 nbnxn_pairlist_t
*nbl
,
1895 gmx_bool bDiagRemoved
,
1896 const nbnxn_sci_t
*nbl_sci
,
1901 const nbnxn_grid_t
*gridi
,
1902 const nbnxn_grid_t
*gridj
,
1905 int sci
, cj4_ind_start
, cj4_ind_end
, cjr
;
1908 int ind_i
, ind_j
, ai
, aj
;
1912 const nbnxn_cj4_t
*cj4
;
1914 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1922 cj4_ind_start
= nbl_sci
->cj4_ind_start
;
1923 cj4_ind_end
= nbl_sci
->cj4_ind_end
;
1925 /* Here we process one super-cell, max #atoms na_sc, versus a list
1926 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1927 * of size na_cj atoms.
1928 * On the GPU we don't support energy groups (yet).
1929 * So for each of the na_sc i-atoms, we need max one FEP list
1930 * for each max_nrj_fep j-atoms.
1932 nri_max
= nbl
->na_sc
*nbl
->na_cj
*(1 + ((cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
)/max_nrj_fep
);
1933 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1935 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1936 reallocate_nblist(nlist
);
1939 /* Loop over the atoms in the i super-cluster */
1940 for (int c
= 0; c
< c_gpuNumClusterPerCell
; c
++)
1942 c_abs
= sci
*c_gpuNumClusterPerCell
+ c
;
1944 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1946 ind_i
= c_abs
*nbl
->na_ci
+ i
;
1951 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1952 nlist
->iinr
[nri
] = ai
;
1953 /* With GPUs, energy groups are not supported */
1954 nlist
->gid
[nri
] = 0;
1955 nlist
->shift
[nri
] = nbl_sci
->shift
& NBNXN_CI_SHIFT
;
1957 bFEP_i
= (gridi
->fep
[c_abs
- gridi
->cell0
*c_gpuNumClusterPerCell
] & (1 << i
));
1959 xi
= nbat
->x
[ind_i
*nbat
->xstride
+XX
] + shx
;
1960 yi
= nbat
->x
[ind_i
*nbat
->xstride
+YY
] + shy
;
1961 zi
= nbat
->x
[ind_i
*nbat
->xstride
+ZZ
] + shz
;
1963 if ((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
> nlist
->maxnrj
)
1965 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
);
1966 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1967 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1970 for (int cj4_ind
= cj4_ind_start
; cj4_ind
< cj4_ind_end
; cj4_ind
++)
1972 cj4
= &nbl
->cj4
[cj4_ind
];
1974 for (int gcj
= 0; gcj
< c_nbnxnGpuJgroupSize
; gcj
++)
1976 unsigned int fep_cj
;
1978 if ((cj4
->imei
[0].imask
& (1U << (gcj
*c_gpuNumClusterPerCell
+ c
))) == 0)
1980 /* Skip this ci for this cj */
1984 cjr
= cj4
->cj
[gcj
] - gridj
->cell0
*c_gpuNumClusterPerCell
;
1986 fep_cj
= gridj
->fep
[cjr
];
1988 if (bFEP_i
|| fep_cj
!= 0)
1990 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1992 /* Is this interaction perturbed and not excluded? */
1993 ind_j
= (gridj
->cell0
*c_gpuNumClusterPerCell
+ cjr
)*nbl
->na_cj
+ j
;
1996 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1997 (!bDiagRemoved
|| ind_j
>= ind_i
))
2001 unsigned int excl_bit
;
2004 const int jHalf
= j
/(c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
);
2005 get_nbl_exclusions_1(nbl
, cj4_ind
, jHalf
, &excl
);
2007 excl_pair
= a_mod_wj(j
)*nbl
->na_ci
+ i
;
2008 excl_bit
= (1U << (gcj
*c_gpuNumClusterPerCell
+ c
));
2010 dx
= nbat
->x
[ind_j
*nbat
->xstride
+XX
] - xi
;
2011 dy
= nbat
->x
[ind_j
*nbat
->xstride
+YY
] - yi
;
2012 dz
= nbat
->x
[ind_j
*nbat
->xstride
+ZZ
] - zi
;
2014 /* The unpruned GPU list has more than 2/3
2015 * of the atom pairs beyond rlist. Using
2016 * this list will cause a lot of overhead
2017 * in the CPU FEP kernels, especially
2018 * relative to the fast GPU kernels.
2019 * So we prune the FEP list here.
2021 if (dx
*dx
+ dy
*dy
+ dz
*dz
< rlist_fep2
)
2023 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
2025 fep_list_new_nri_copy(nlist
);
2029 /* Add it to the FEP list */
2030 nlist
->jjnr
[nlist
->nrj
] = aj
;
2031 nlist
->excl_fep
[nlist
->nrj
] = (excl
->pair
[excl_pair
] & excl_bit
) ? 1 : 0;
2035 /* Exclude it from the normal list.
2036 * Note that the charge and LJ parameters have
2037 * been set to zero, but we need to avoid 0/0,
2038 * as perturbed atoms can be on top of each other.
2040 excl
->pair
[excl_pair
] &= ~excl_bit
;
2044 /* Note that we could mask out this pair in imask
2045 * if all i- and/or all j-particles are perturbed.
2046 * But since the perturbed pairs on the CPU will
2047 * take an order of magnitude more time, the GPU
2048 * will finish before the CPU and there is no gain.
2054 if (nlist
->nrj
> nlist
->jindex
[nri
])
2056 /* Actually add this new, non-empty, list */
2058 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
2065 /* Set all atom-pair exclusions for a GPU type list i-entry
2067 * Sets all atom-pair exclusions from the topology stored in exclusions
2068 * as masks in the pair-list for i-super-cluster list entry iEntry.
2071 setExclusionsForGpuIentry(const nbnxn_search_t nbs
,
2072 nbnxn_pairlist_t
*nbl
,
2073 gmx_bool diagRemoved
,
2074 const nbnxn_sci_t
&iEntry
,
2075 const t_blocka
&exclusions
)
2077 if (iEntry
.cj4_ind_end
== iEntry
.cj4_ind_start
)
2083 /* Set the search ranges using start and end j-cluster indices.
2084 * Note that here we can not use cj4_ind_end, since the last cj4
2085 * can be only partially filled, so we use cj_ind.
2087 const JListRanges
ranges(iEntry
.cj4_ind_start
*c_nbnxnGpuJgroupSize
,
2091 GMX_ASSERT(nbl
->na_ci
== c_nbnxnGpuClusterSize
, "na_ci should match the GPU cluster size");
2092 constexpr int c_clusterSize
= c_nbnxnGpuClusterSize
;
2093 constexpr int c_superClusterSize
= c_nbnxnGpuNumClusterPerSupercluster
*c_nbnxnGpuClusterSize
;
2095 const int iSuperCluster
= iEntry
.sci
;
2097 gmx::ArrayRef
<const int> cell
= nbs
->cell
;
2099 /* Loop over the atoms in the i super-cluster */
2100 for (int i
= 0; i
< c_superClusterSize
; i
++)
2102 const int iIndex
= iSuperCluster
*c_superClusterSize
+ i
;
2103 const int iAtom
= nbs
->a
[iIndex
];
2106 const int iCluster
= i
/c_clusterSize
;
2108 /* Loop over the topology-based exclusions for this i-atom */
2109 for (int exclIndex
= exclusions
.index
[iAtom
]; exclIndex
< exclusions
.index
[iAtom
+ 1]; exclIndex
++)
2111 const int jAtom
= exclusions
.a
[exclIndex
];
2115 /* The self exclusions are already set, save some time */
2119 /* Get the index of the j-atom in the nbnxn atom data */
2120 const int jIndex
= cell
[jAtom
];
2122 /* Without shifts we only calculate interactions j>i
2123 * for one-way pair-lists.
2125 /* NOTE: We would like to use iIndex on the right hand side,
2126 * but that makes this routine 25% slower with gcc6/7.
2127 * Even using c_superClusterSize makes it slower.
2128 * Either of these changes triggers peeling of the exclIndex
2129 * loop, which apparently leads to far less efficient code.
2131 if (diagRemoved
&& jIndex
<= iSuperCluster
*nbl
->na_sc
+ i
)
2136 const int jCluster
= jIndex
/c_clusterSize
;
2138 /* Check whether the cluster is in our list? */
2139 if (jCluster
>= ranges
.cjFirst
&& jCluster
<= ranges
.cjLast
)
2142 findJClusterInJList(jCluster
, ranges
, nbl
->cj4
);
2146 /* We found an exclusion, clear the corresponding
2149 const unsigned int pairMask
= (1U << (cj_mod_cj4(index
)*c_gpuNumClusterPerCell
+ iCluster
));
2150 /* Check if the i-cluster interacts with the j-cluster */
2151 if (nbl_imask0(nbl
, index
) & pairMask
)
2153 const int innerI
= (i
& (c_clusterSize
- 1));
2154 const int innerJ
= (jIndex
& (c_clusterSize
- 1));
2156 /* Determine which j-half (CUDA warp) we are in */
2157 const int jHalf
= innerJ
/(c_clusterSize
/c_nbnxnGpuClusterpairSplit
);
2159 nbnxn_excl_t
*interactionMask
;
2160 get_nbl_exclusions_1(nbl
, cj_to_cj4(index
), jHalf
, &interactionMask
);
2162 interactionMask
->pair
[a_mod_wj(innerJ
)*c_clusterSize
+ innerI
] &= ~pairMask
;
2171 /* Reallocate the simple ci list for at least n entries */
2172 static void nb_realloc_ci(nbnxn_pairlist_t
*nbl
, int n
)
2174 nbl
->ci_nalloc
= over_alloc_small(n
);
2175 nbnxn_realloc_void((void **)&nbl
->ci
,
2176 nbl
->nci
*sizeof(*nbl
->ci
),
2177 nbl
->ci_nalloc
*sizeof(*nbl
->ci
),
2178 nbl
->alloc
, nbl
->free
);
2180 nbnxn_realloc_void((void **)&nbl
->ciOuter
,
2181 nbl
->nci
*sizeof(*nbl
->ciOuter
),
2182 nbl
->ci_nalloc
*sizeof(*nbl
->ciOuter
),
2183 nbl
->alloc
, nbl
->free
);
2186 /* Reallocate the super-cell sci list for at least n entries */
2187 static void nb_realloc_sci(nbnxn_pairlist_t
*nbl
, int n
)
2189 nbl
->sci_nalloc
= over_alloc_small(n
);
2190 nbnxn_realloc_void((void **)&nbl
->sci
,
2191 nbl
->nsci
*sizeof(*nbl
->sci
),
2192 nbl
->sci_nalloc
*sizeof(*nbl
->sci
),
2193 nbl
->alloc
, nbl
->free
);
2196 /* Make a new ci entry at index nbl->nci */
2197 static void new_ci_entry(nbnxn_pairlist_t
*nbl
, int ci
, int shift
, int flags
)
2199 if (nbl
->nci
+ 1 > nbl
->ci_nalloc
)
2201 nb_realloc_ci(nbl
, nbl
->nci
+1);
2203 nbl
->ci
[nbl
->nci
].ci
= ci
;
2204 nbl
->ci
[nbl
->nci
].shift
= shift
;
2205 /* Store the interaction flags along with the shift */
2206 nbl
->ci
[nbl
->nci
].shift
|= flags
;
2207 nbl
->ci
[nbl
->nci
].cj_ind_start
= nbl
->ncj
;
2208 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
2211 /* Make a new sci entry at index nbl->nsci */
2212 static void new_sci_entry(nbnxn_pairlist_t
*nbl
, int sci
, int shift
)
2214 if (nbl
->nsci
+ 1 > nbl
->sci_nalloc
)
2216 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2218 nbl
->sci
[nbl
->nsci
].sci
= sci
;
2219 nbl
->sci
[nbl
->nsci
].shift
= shift
;
2220 nbl
->sci
[nbl
->nsci
].cj4_ind_start
= nbl
->ncj4
;
2221 nbl
->sci
[nbl
->nsci
].cj4_ind_end
= nbl
->ncj4
;
2224 /* Sort the simple j-list cj on exclusions.
2225 * Entries with exclusions will all be sorted to the beginning of the list.
2227 static void sort_cj_excl(nbnxn_cj_t
*cj
, int ncj
,
2228 nbnxn_list_work_t
*work
)
2232 if (ncj
> work
->cj_nalloc
)
2234 work
->cj_nalloc
= over_alloc_large(ncj
);
2235 srenew(work
->cj
, work
->cj_nalloc
);
2238 /* Make a list of the j-cells involving exclusions */
2240 for (int j
= 0; j
< ncj
; j
++)
2242 if (cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
2244 work
->cj
[jnew
++] = cj
[j
];
2247 /* Check if there are exclusions at all or not just the first entry */
2248 if (!((jnew
== 0) ||
2249 (jnew
== 1 && cj
[0].excl
!= NBNXN_INTERACTION_MASK_ALL
)))
2251 for (int j
= 0; j
< ncj
; j
++)
2253 if (cj
[j
].excl
== NBNXN_INTERACTION_MASK_ALL
)
2255 work
->cj
[jnew
++] = cj
[j
];
2258 for (int j
= 0; j
< ncj
; j
++)
2260 cj
[j
] = work
->cj
[j
];
2265 /* Close this simple list i entry */
2266 static void close_ci_entry_simple(nbnxn_pairlist_t
*nbl
)
2270 /* All content of the new ci entry have already been filled correctly,
2271 * we only need to increase the count here (for non empty lists).
2273 jlen
= nbl
->ci
[nbl
->nci
].cj_ind_end
- nbl
->ci
[nbl
->nci
].cj_ind_start
;
2276 sort_cj_excl(nbl
->cj
+nbl
->ci
[nbl
->nci
].cj_ind_start
, jlen
, nbl
->work
);
2278 /* The counts below are used for non-bonded pair/flop counts
2279 * and should therefore match the available kernel setups.
2281 if (!(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_COUL(0)))
2283 nbl
->work
->ncj_noq
+= jlen
;
2285 else if ((nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_HALF_LJ(0)) ||
2286 !(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_LJ(0)))
2288 nbl
->work
->ncj_hlj
+= jlen
;
2295 /* Split sci entry for load balancing on the GPU.
2296 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2297 * With progBal we generate progressively smaller lists, which improves
2298 * load balancing. As we only know the current count on our own thread,
2299 * we will need to estimate the current total amount of i-entries.
2300 * As the lists get concatenated later, this estimate depends
2301 * both on nthread and our own thread index.
2303 static void split_sci_entry(nbnxn_pairlist_t
*nbl
,
2305 gmx_bool progBal
, float nsp_tot_est
,
2306 int thread
, int nthread
)
2309 int cj4_start
, cj4_end
, j4len
;
2311 int nsp
, nsp_sci
, nsp_cj4
, nsp_cj4_e
, nsp_cj4_p
;
2317 /* Estimate the total numbers of ci's of the nblist combined
2318 * over all threads using the target number of ci's.
2320 nsp_est
= (nsp_tot_est
*thread
)/nthread
+ nbl
->nci_tot
;
2322 /* The first ci blocks should be larger, to avoid overhead.
2323 * The last ci blocks should be smaller, to improve load balancing.
2324 * The factor 3/2 makes the first block 3/2 times the target average
2325 * and ensures that the total number of blocks end up equal to
2326 * that of equally sized blocks of size nsp_target_av.
2328 nsp_max
= static_cast<int>(nsp_target_av
*(nsp_tot_est
*1.5/(nsp_est
+ nsp_tot_est
)));
2332 nsp_max
= nsp_target_av
;
2335 cj4_start
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_start
;
2336 cj4_end
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_end
;
2337 j4len
= cj4_end
- cj4_start
;
2339 if (j4len
> 1 && j4len
*c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
> nsp_max
)
2341 /* Remove the last ci entry and process the cj4's again */
2349 for (int cj4
= cj4_start
; cj4
< cj4_end
; cj4
++)
2351 nsp_cj4_p
= nsp_cj4
;
2352 /* Count the number of cluster pairs in this cj4 group */
2354 for (int p
= 0; p
< c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
; p
++)
2356 nsp_cj4
+= (nbl
->cj4
[cj4
].imei
[0].imask
>> p
) & 1;
2359 /* If adding the current cj4 with nsp_cj4 pairs get us further
2360 * away from our target nsp_max, split the list before this cj4.
2362 if (nsp
> 0 && nsp_max
- nsp
< nsp
+ nsp_cj4
- nsp_max
)
2364 /* Split the list at cj4 */
2365 nbl
->sci
[sci
].cj4_ind_end
= cj4
;
2366 /* Create a new sci entry */
2369 if (nbl
->nsci
+1 > nbl
->sci_nalloc
)
2371 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2373 nbl
->sci
[sci
].sci
= nbl
->sci
[nbl
->nsci
-1].sci
;
2374 nbl
->sci
[sci
].shift
= nbl
->sci
[nbl
->nsci
-1].shift
;
2375 nbl
->sci
[sci
].cj4_ind_start
= cj4
;
2377 nsp_cj4_e
= nsp_cj4_p
;
2383 /* Put the remaining cj4's in the last sci entry */
2384 nbl
->sci
[sci
].cj4_ind_end
= cj4_end
;
2386 /* Possibly balance out the last two sci's
2387 * by moving the last cj4 of the second last sci.
2389 if (nsp_sci
- nsp_cj4_e
>= nsp
+ nsp_cj4_e
)
2391 nbl
->sci
[sci
-1].cj4_ind_end
--;
2392 nbl
->sci
[sci
].cj4_ind_start
--;
2399 /* Clost this super/sub list i entry */
2400 static void close_ci_entry_supersub(nbnxn_pairlist_t
*nbl
,
2402 gmx_bool progBal
, float nsp_tot_est
,
2403 int thread
, int nthread
)
2405 /* All content of the new ci entry have already been filled correctly,
2406 * we only need to increase the count here (for non empty lists).
2408 int j4len
= nbl
->sci
[nbl
->nsci
].cj4_ind_end
- nbl
->sci
[nbl
->nsci
].cj4_ind_start
;
2411 /* We can only have complete blocks of 4 j-entries in a list,
2412 * so round the count up before closing.
2414 nbl
->ncj4
= (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
2415 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2421 /* Measure the size of the new entry and potentially split it */
2422 split_sci_entry(nbl
, nsp_max_av
, progBal
, nsp_tot_est
,
2428 /* Syncs the working array before adding another grid pair to the list */
2429 static void sync_work(nbnxn_pairlist_t
*nbl
)
2433 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2434 nbl
->work
->cj4_init
= nbl
->ncj4
;
2438 /* Clears an nbnxn_pairlist_t data structure */
2439 static void clear_pairlist(nbnxn_pairlist_t
*nbl
)
2450 nbl
->work
->ncj_noq
= 0;
2451 nbl
->work
->ncj_hlj
= 0;
2454 /* Clears a group scheme pair list */
2455 static void clear_pairlist_fep(t_nblist
*nl
)
2459 if (nl
->jindex
== nullptr)
2461 snew(nl
->jindex
, 1);
2466 /* Sets a simple list i-cell bounding box, including PBC shift */
2467 static inline void set_icell_bb_simple(gmx::ArrayRef
<const nbnxn_bb_t
> bb
,
2469 real shx
, real shy
, real shz
,
2472 bb_ci
->lower
[BB_X
] = bb
[ci
].lower
[BB_X
] + shx
;
2473 bb_ci
->lower
[BB_Y
] = bb
[ci
].lower
[BB_Y
] + shy
;
2474 bb_ci
->lower
[BB_Z
] = bb
[ci
].lower
[BB_Z
] + shz
;
2475 bb_ci
->upper
[BB_X
] = bb
[ci
].upper
[BB_X
] + shx
;
2476 bb_ci
->upper
[BB_Y
] = bb
[ci
].upper
[BB_Y
] + shy
;
2477 bb_ci
->upper
[BB_Z
] = bb
[ci
].upper
[BB_Z
] + shz
;
2481 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2482 static void set_icell_bbxxxx_supersub(gmx::ArrayRef
<const float> bb
,
2484 real shx
, real shy
, real shz
,
2487 int ia
= ci
*(c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
;
2488 for (int m
= 0; m
< (c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
; m
+= NNBSBB_XXXX
)
2490 for (int i
= 0; i
< STRIDE_PBB
; i
++)
2492 bb_ci
[m
+0*STRIDE_PBB
+i
] = bb
[ia
+m
+0*STRIDE_PBB
+i
] + shx
;
2493 bb_ci
[m
+1*STRIDE_PBB
+i
] = bb
[ia
+m
+1*STRIDE_PBB
+i
] + shy
;
2494 bb_ci
[m
+2*STRIDE_PBB
+i
] = bb
[ia
+m
+2*STRIDE_PBB
+i
] + shz
;
2495 bb_ci
[m
+3*STRIDE_PBB
+i
] = bb
[ia
+m
+3*STRIDE_PBB
+i
] + shx
;
2496 bb_ci
[m
+4*STRIDE_PBB
+i
] = bb
[ia
+m
+4*STRIDE_PBB
+i
] + shy
;
2497 bb_ci
[m
+5*STRIDE_PBB
+i
] = bb
[ia
+m
+5*STRIDE_PBB
+i
] + shz
;
2503 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2504 gmx_unused
static void set_icell_bb_supersub(gmx::ArrayRef
<const nbnxn_bb_t
> bb
,
2506 real shx
, real shy
, real shz
,
2509 for (int i
= 0; i
< c_gpuNumClusterPerCell
; i
++)
2511 set_icell_bb_simple(bb
, ci
*c_gpuNumClusterPerCell
+i
,
2517 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2518 static void icell_set_x_simple(int ci
,
2519 real shx
, real shy
, real shz
,
2520 int stride
, const real
*x
,
2521 nbnxn_list_work_t
*work
)
2523 int ia
= ci
*NBNXN_CPU_CLUSTER_I_SIZE
;
2525 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
; i
++)
2527 work
->x_ci
[i
*STRIDE_XYZ
+XX
] = x
[(ia
+i
)*stride
+XX
] + shx
;
2528 work
->x_ci
[i
*STRIDE_XYZ
+YY
] = x
[(ia
+i
)*stride
+YY
] + shy
;
2529 work
->x_ci
[i
*STRIDE_XYZ
+ZZ
] = x
[(ia
+i
)*stride
+ZZ
] + shz
;
2533 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2534 static void icell_set_x_supersub(int ci
,
2535 real shx
, real shy
, real shz
,
2536 int stride
, const real
*x
,
2537 nbnxn_list_work_t
*work
)
2539 #if !GMX_SIMD4_HAVE_REAL
2541 real
* x_ci
= work
->x_ci
;
2543 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
;
2544 for (int i
= 0; i
< c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
; i
++)
2546 x_ci
[i
*DIM
+ XX
] = x
[(ia
+i
)*stride
+ XX
] + shx
;
2547 x_ci
[i
*DIM
+ YY
] = x
[(ia
+i
)*stride
+ YY
] + shy
;
2548 x_ci
[i
*DIM
+ ZZ
] = x
[(ia
+i
)*stride
+ ZZ
] + shz
;
2551 #else /* !GMX_SIMD4_HAVE_REAL */
2553 real
* x_ci
= work
->x_ci_simd
;
2555 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2557 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
+= GMX_SIMD4_WIDTH
)
2559 int io
= si
*c_nbnxnGpuClusterSize
+ i
;
2560 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
+ io
;
2561 for (int j
= 0; j
< GMX_SIMD4_WIDTH
; j
++)
2563 x_ci
[io
*DIM
+ j
+ XX
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ XX
] + shx
;
2564 x_ci
[io
*DIM
+ j
+ YY
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ YY
] + shy
;
2565 x_ci
[io
*DIM
+ j
+ ZZ
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ ZZ
] + shz
;
2570 #endif /* !GMX_SIMD4_HAVE_REAL */
2573 static real
minimum_subgrid_size_xy(const nbnxn_grid_t
*grid
)
2577 return std::min(grid
->cellSize
[XX
], grid
->cellSize
[YY
]);
2581 return std::min(grid
->cellSize
[XX
]/c_gpuNumClusterPerCellX
,
2582 grid
->cellSize
[YY
]/c_gpuNumClusterPerCellY
);
2586 static real
effective_buffer_1x1_vs_MxN(const nbnxn_grid_t
*gridi
,
2587 const nbnxn_grid_t
*gridj
)
2589 const real eff_1x1_buffer_fac_overest
= 0.1;
2591 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2592 * to be added to rlist (including buffer) used for MxN.
2593 * This is for converting an MxN list to a 1x1 list. This means we can't
2594 * use the normal buffer estimate, as we have an MxN list in which
2595 * some atom pairs beyond rlist are missing. We want to capture
2596 * the beneficial effect of buffering by extra pairs just outside rlist,
2597 * while removing the useless pairs that are further away from rlist.
2598 * (Also the buffer could have been set manually not using the estimate.)
2599 * This buffer size is an overestimate.
2600 * We add 10% of the smallest grid sub-cell dimensions.
2601 * Note that the z-size differs per cell and we don't use this,
2602 * so we overestimate.
2603 * With PME, the 10% value gives a buffer that is somewhat larger
2604 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2605 * Smaller tolerances or using RF lead to a smaller effective buffer,
2606 * so 10% gives a safe overestimate.
2608 return eff_1x1_buffer_fac_overest
*(minimum_subgrid_size_xy(gridi
) +
2609 minimum_subgrid_size_xy(gridj
));
2612 /* Clusters at the cut-off only increase rlist by 60% of their size */
2613 static real nbnxn_rlist_inc_outside_fac
= 0.6;
2615 /* Due to the cluster size the effective pair-list is longer than
2616 * that of a simple atom pair-list. This function gives the extra distance.
2618 real
nbnxn_get_rlist_effective_inc(int cluster_size_j
, real atom_density
)
2621 real vol_inc_i
, vol_inc_j
;
2623 /* We should get this from the setup, but currently it's the same for
2624 * all setups, including GPUs.
2626 cluster_size_i
= NBNXN_CPU_CLUSTER_I_SIZE
;
2628 vol_inc_i
= (cluster_size_i
- 1)/atom_density
;
2629 vol_inc_j
= (cluster_size_j
- 1)/atom_density
;
2631 return nbnxn_rlist_inc_outside_fac
*std::cbrt(vol_inc_i
+ vol_inc_j
);
2634 /* Estimates the interaction volume^2 for non-local interactions */
2635 static real
nonlocal_vol2(const struct gmx_domdec_zones_t
*zones
, rvec ls
, real r
)
2643 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2644 * not home interaction volume^2. As these volumes are not additive,
2645 * this is an overestimate, but it would only be significant in the limit
2646 * of small cells, where we anyhow need to split the lists into
2647 * as small parts as possible.
2650 for (int z
= 0; z
< zones
->n
; z
++)
2652 if (zones
->shift
[z
][XX
] + zones
->shift
[z
][YY
] + zones
->shift
[z
][ZZ
] == 1)
2657 for (int d
= 0; d
< DIM
; d
++)
2659 if (zones
->shift
[z
][d
] == 0)
2663 za
*= zones
->size
[z
].x1
[d
] - zones
->size
[z
].x0
[d
];
2667 /* 4 octants of a sphere */
2668 vold_est
= 0.25*M_PI
*r
*r
*r
*r
;
2669 /* 4 quarter pie slices on the edges */
2670 vold_est
+= 4*cl
*M_PI
/6.0*r
*r
*r
;
2671 /* One rectangular volume on a face */
2672 vold_est
+= ca
*0.5*r
*r
;
2674 vol2_est_tot
+= vold_est
*za
;
2678 return vol2_est_tot
;
2681 /* Estimates the average size of a full j-list for super/sub setup */
2682 static void get_nsubpair_target(const nbnxn_search_t nbs
,
2685 int min_ci_balanced
,
2686 int *nsubpair_target
,
2687 float *nsubpair_tot_est
)
2689 /* The target value of 36 seems to be the optimum for Kepler.
2690 * Maxwell is less sensitive to the exact value.
2692 const int nsubpair_target_min
= 36;
2693 const nbnxn_grid_t
*grid
;
2695 real r_eff_sup
, vol_est
, nsp_est
, nsp_est_nl
;
2697 grid
= &nbs
->grid
[0];
2699 /* We don't need to balance list sizes if:
2700 * - We didn't request balancing.
2701 * - The number of grid cells >= the number of lists requested,
2702 * since we will always generate at least #cells lists.
2703 * - We don't have any cells, since then there won't be any lists.
2705 if (min_ci_balanced
<= 0 || grid
->nc
>= min_ci_balanced
|| grid
->nc
== 0)
2707 /* nsubpair_target==0 signals no balancing */
2708 *nsubpair_target
= 0;
2709 *nsubpair_tot_est
= 0;
2714 ls
[XX
] = (grid
->c1
[XX
] - grid
->c0
[XX
])/(grid
->numCells
[XX
]*c_gpuNumClusterPerCellX
);
2715 ls
[YY
] = (grid
->c1
[YY
] - grid
->c0
[YY
])/(grid
->numCells
[YY
]*c_gpuNumClusterPerCellY
);
2716 ls
[ZZ
] = grid
->na_c
/(grid
->atom_density
*ls
[XX
]*ls
[YY
]);
2718 /* The average length of the diagonal of a sub cell */
2719 real diagonal
= std::sqrt(ls
[XX
]*ls
[XX
] + ls
[YY
]*ls
[YY
] + ls
[ZZ
]*ls
[ZZ
]);
2721 /* The formulas below are a heuristic estimate of the average nsj per si*/
2722 r_eff_sup
= rlist
+ nbnxn_rlist_inc_outside_fac
*gmx::square((grid
->na_c
- 1.0)/grid
->na_c
)*0.5*diagonal
;
2724 if (!nbs
->DomDec
|| nbs
->zones
->n
== 1)
2731 gmx::square(grid
->atom_density
/grid
->na_c
)*
2732 nonlocal_vol2(nbs
->zones
, ls
, r_eff_sup
);
2737 /* Sub-cell interacts with itself */
2738 vol_est
= ls
[XX
]*ls
[YY
]*ls
[ZZ
];
2739 /* 6/2 rectangular volume on the faces */
2740 vol_est
+= (ls
[XX
]*ls
[YY
] + ls
[XX
]*ls
[ZZ
] + ls
[YY
]*ls
[ZZ
])*r_eff_sup
;
2741 /* 12/2 quarter pie slices on the edges */
2742 vol_est
+= 2*(ls
[XX
] + ls
[YY
] + ls
[ZZ
])*0.25*M_PI
*gmx::square(r_eff_sup
);
2743 /* 4 octants of a sphere */
2744 vol_est
+= 0.5*4.0/3.0*M_PI
*gmx::power3(r_eff_sup
);
2746 /* Estimate the number of cluster pairs as the local number of
2747 * clusters times the volume they interact with times the density.
2749 nsp_est
= grid
->nsubc_tot
*vol_est
*grid
->atom_density
/grid
->na_c
;
2751 /* Subtract the non-local pair count */
2752 nsp_est
-= nsp_est_nl
;
2754 /* For small cut-offs nsp_est will be an underesimate.
2755 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2756 * So to avoid too small or negative nsp_est we set a minimum of
2757 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2758 * This might be a slight overestimate for small non-periodic groups of
2759 * atoms as will occur for a local domain with DD, but for small
2760 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2761 * so this overestimation will not matter.
2763 nsp_est
= std::max(nsp_est
, grid
->nsubc_tot
*static_cast<real
>(14));
2767 fprintf(debug
, "nsp_est local %5.1f non-local %5.1f\n",
2768 nsp_est
, nsp_est_nl
);
2773 nsp_est
= nsp_est_nl
;
2776 /* Thus the (average) maximum j-list size should be as follows.
2777 * Since there is overhead, we shouldn't make the lists too small
2778 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2780 *nsubpair_target
= std::max(nsubpair_target_min
,
2781 static_cast<int>(nsp_est
/min_ci_balanced
+ 0.5));
2782 *nsubpair_tot_est
= static_cast<int>(nsp_est
);
2786 fprintf(debug
, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2787 nsp_est
, *nsubpair_target
);
2791 /* Debug list print function */
2792 static void print_nblist_ci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2794 for (int i
= 0; i
< nbl
->nci
; i
++)
2796 fprintf(fp
, "ci %4d shift %2d ncj %3d\n",
2797 nbl
->ci
[i
].ci
, nbl
->ci
[i
].shift
,
2798 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
);
2800 for (int j
= nbl
->ci
[i
].cj_ind_start
; j
< nbl
->ci
[i
].cj_ind_end
; j
++)
2802 fprintf(fp
, " cj %5d imask %x\n",
2809 /* Debug list print function */
2810 static void print_nblist_sci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2812 for (int i
= 0; i
< nbl
->nsci
; i
++)
2814 fprintf(fp
, "ci %4d shift %2d ncj4 %2d\n",
2815 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2816 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
);
2819 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
2821 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
2823 fprintf(fp
, " sj %5d imask %x\n",
2825 nbl
->cj4
[j4
].imei
[0].imask
);
2826 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2828 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
2835 fprintf(fp
, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2836 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2837 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
,
2842 /* Combine pair lists *nbl generated on multiple threads nblc */
2843 static void combine_nblists(int nnbl
, nbnxn_pairlist_t
**nbl
,
2844 nbnxn_pairlist_t
*nblc
)
2846 int nsci
, ncj4
, nexcl
;
2850 gmx_incons("combine_nblists does not support simple lists");
2855 nexcl
= nblc
->nexcl
;
2856 for (int i
= 0; i
< nnbl
; i
++)
2858 nsci
+= nbl
[i
]->nsci
;
2859 ncj4
+= nbl
[i
]->ncj4
;
2860 nexcl
+= nbl
[i
]->nexcl
;
2863 if (nsci
> nblc
->sci_nalloc
)
2865 nb_realloc_sci(nblc
, nsci
);
2867 if (ncj4
> nblc
->cj4_nalloc
)
2869 nblc
->cj4_nalloc
= over_alloc_small(ncj4
);
2870 nbnxn_realloc_void((void **)&nblc
->cj4
,
2871 nblc
->ncj4
*sizeof(*nblc
->cj4
),
2872 nblc
->cj4_nalloc
*sizeof(*nblc
->cj4
),
2873 nblc
->alloc
, nblc
->free
);
2875 if (nexcl
> nblc
->excl_nalloc
)
2877 nblc
->excl_nalloc
= over_alloc_small(nexcl
);
2878 nbnxn_realloc_void((void **)&nblc
->excl
,
2879 nblc
->nexcl
*sizeof(*nblc
->excl
),
2880 nblc
->excl_nalloc
*sizeof(*nblc
->excl
),
2881 nblc
->alloc
, nblc
->free
);
2884 /* Each thread should copy its own data to the combined arrays,
2885 * as otherwise data will go back and forth between different caches.
2887 #if GMX_OPENMP && !(defined __clang_analyzer__)
2888 // cppcheck-suppress unreadVariable
2889 int nthreads
= gmx_omp_nthreads_get(emntPairsearch
);
2892 #pragma omp parallel for num_threads(nthreads) schedule(static)
2893 for (int n
= 0; n
< nnbl
; n
++)
2900 const nbnxn_pairlist_t
*nbli
;
2902 /* Determine the offset in the combined data for our thread */
2903 sci_offset
= nblc
->nsci
;
2904 cj4_offset
= nblc
->ncj4
;
2905 excl_offset
= nblc
->nexcl
;
2907 for (int i
= 0; i
< n
; i
++)
2909 sci_offset
+= nbl
[i
]->nsci
;
2910 cj4_offset
+= nbl
[i
]->ncj4
;
2911 excl_offset
+= nbl
[i
]->nexcl
;
2916 for (int i
= 0; i
< nbli
->nsci
; i
++)
2918 nblc
->sci
[sci_offset
+i
] = nbli
->sci
[i
];
2919 nblc
->sci
[sci_offset
+i
].cj4_ind_start
+= cj4_offset
;
2920 nblc
->sci
[sci_offset
+i
].cj4_ind_end
+= cj4_offset
;
2923 for (int j4
= 0; j4
< nbli
->ncj4
; j4
++)
2925 nblc
->cj4
[cj4_offset
+j4
] = nbli
->cj4
[j4
];
2926 nblc
->cj4
[cj4_offset
+j4
].imei
[0].excl_ind
+= excl_offset
;
2927 nblc
->cj4
[cj4_offset
+j4
].imei
[1].excl_ind
+= excl_offset
;
2930 for (int j4
= 0; j4
< nbli
->nexcl
; j4
++)
2932 nblc
->excl
[excl_offset
+j4
] = nbli
->excl
[j4
];
2935 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2938 for (int n
= 0; n
< nnbl
; n
++)
2940 nblc
->nsci
+= nbl
[n
]->nsci
;
2941 nblc
->ncj4
+= nbl
[n
]->ncj4
;
2942 nblc
->nci_tot
+= nbl
[n
]->nci_tot
;
2943 nblc
->nexcl
+= nbl
[n
]->nexcl
;
2947 static void balance_fep_lists(const nbnxn_search_t nbs
,
2948 nbnxn_pairlist_set_t
*nbl_lists
)
2951 int nri_tot
, nrj_tot
, nrj_target
;
2955 nnbl
= nbl_lists
->nnbl
;
2959 /* Nothing to balance */
2963 /* Count the total i-lists and pairs */
2966 for (int th
= 0; th
< nnbl
; th
++)
2968 nri_tot
+= nbl_lists
->nbl_fep
[th
]->nri
;
2969 nrj_tot
+= nbl_lists
->nbl_fep
[th
]->nrj
;
2972 nrj_target
= (nrj_tot
+ nnbl
- 1)/nnbl
;
2974 assert(gmx_omp_nthreads_get(emntNonbonded
) == nnbl
);
2976 #pragma omp parallel for schedule(static) num_threads(nnbl)
2977 for (int th
= 0; th
< nnbl
; th
++)
2981 t_nblist
*nbl
= nbs
->work
[th
].nbl_fep
.get();
2983 /* Note that here we allocate for the total size, instead of
2984 * a per-thread esimate (which is hard to obtain).
2986 if (nri_tot
> nbl
->maxnri
)
2988 nbl
->maxnri
= over_alloc_large(nri_tot
);
2989 reallocate_nblist(nbl
);
2991 if (nri_tot
> nbl
->maxnri
|| nrj_tot
> nbl
->maxnrj
)
2993 nbl
->maxnrj
= over_alloc_small(nrj_tot
);
2994 srenew(nbl
->jjnr
, nbl
->maxnrj
);
2995 srenew(nbl
->excl_fep
, nbl
->maxnrj
);
2998 clear_pairlist_fep(nbl
);
3000 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
3003 /* Loop over the source lists and assign and copy i-entries */
3005 nbld
= nbs
->work
[th_dest
].nbl_fep
.get();
3006 for (int th
= 0; th
< nnbl
; th
++)
3010 nbls
= nbl_lists
->nbl_fep
[th
];
3012 for (int i
= 0; i
< nbls
->nri
; i
++)
3016 /* The number of pairs in this i-entry */
3017 nrj
= nbls
->jindex
[i
+1] - nbls
->jindex
[i
];
3019 /* Decide if list th_dest is too large and we should procede
3020 * to the next destination list.
3022 if (th_dest
+1 < nnbl
&& nbld
->nrj
> 0 &&
3023 nbld
->nrj
+ nrj
- nrj_target
> nrj_target
- nbld
->nrj
)
3026 nbld
= nbs
->work
[th_dest
].nbl_fep
.get();
3029 nbld
->iinr
[nbld
->nri
] = nbls
->iinr
[i
];
3030 nbld
->gid
[nbld
->nri
] = nbls
->gid
[i
];
3031 nbld
->shift
[nbld
->nri
] = nbls
->shift
[i
];
3033 for (int j
= nbls
->jindex
[i
]; j
< nbls
->jindex
[i
+1]; j
++)
3035 nbld
->jjnr
[nbld
->nrj
] = nbls
->jjnr
[j
];
3036 nbld
->excl_fep
[nbld
->nrj
] = nbls
->excl_fep
[j
];
3040 nbld
->jindex
[nbld
->nri
] = nbld
->nrj
;
3044 /* Swap the list pointers */
3045 for (int th
= 0; th
< nnbl
; th
++)
3047 t_nblist
*nbl_tmp
= nbs
->work
[th
].nbl_fep
.release();
3048 nbs
->work
[th
].nbl_fep
.reset(nbl_lists
->nbl_fep
[th
]);
3049 nbl_lists
->nbl_fep
[th
] = nbl_tmp
;
3053 fprintf(debug
, "nbl_fep[%d] nri %4d nrj %4d\n",
3055 nbl_lists
->nbl_fep
[th
]->nri
,
3056 nbl_lists
->nbl_fep
[th
]->nrj
);
3061 /* Returns the next ci to be processes by our thread */
3062 static gmx_bool
next_ci(const nbnxn_grid_t
*grid
,
3063 int nth
, int ci_block
,
3064 int *ci_x
, int *ci_y
,
3070 if (*ci_b
== ci_block
)
3072 /* Jump to the next block assigned to this task */
3073 *ci
+= (nth
- 1)*ci_block
;
3077 if (*ci
>= grid
->nc
)
3082 while (*ci
>= grid
->cxy_ind
[*ci_x
*grid
->numCells
[YY
] + *ci_y
+ 1])
3085 if (*ci_y
== grid
->numCells
[YY
])
3095 /* Returns the distance^2 for which we put cell pairs in the list
3096 * without checking atom pair distances. This is usually < rlist^2.
3098 static float boundingbox_only_distance2(const nbnxn_grid_t
*gridi
,
3099 const nbnxn_grid_t
*gridj
,
3103 /* If the distance between two sub-cell bounding boxes is less
3104 * than this distance, do not check the distance between
3105 * all particle pairs in the sub-cell, since then it is likely
3106 * that the box pair has atom pairs within the cut-off.
3107 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3108 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3109 * Using more than 0.5 gains at most 0.5%.
3110 * If forces are calculated more than twice, the performance gain
3111 * in the force calculation outweighs the cost of checking.
3112 * Note that with subcell lists, the atom-pair distance check
3113 * is only performed when only 1 out of 8 sub-cells in within range,
3114 * this is because the GPU is much faster than the cpu.
3119 bbx
= 0.5*(gridi
->cellSize
[XX
] + gridj
->cellSize
[XX
]);
3120 bby
= 0.5*(gridi
->cellSize
[YY
] + gridj
->cellSize
[YY
]);
3123 bbx
/= c_gpuNumClusterPerCellX
;
3124 bby
/= c_gpuNumClusterPerCellY
;
3127 rbb2
= std::max(0.0, rlist
- 0.5*std::sqrt(bbx
*bbx
+ bby
*bby
));
3133 return (float)((1+GMX_FLOAT_EPS
)*rbb2
);
3137 static int get_ci_block_size(const nbnxn_grid_t
*gridi
,
3138 gmx_bool bDomDec
, int nth
)
3140 const int ci_block_enum
= 5;
3141 const int ci_block_denom
= 11;
3142 const int ci_block_min_atoms
= 16;
3145 /* Here we decide how to distribute the blocks over the threads.
3146 * We use prime numbers to try to avoid that the grid size becomes
3147 * a multiple of the number of threads, which would lead to some
3148 * threads getting "inner" pairs and others getting boundary pairs,
3149 * which in turns will lead to load imbalance between threads.
3150 * Set the block size as 5/11/ntask times the average number of cells
3151 * in a y,z slab. This should ensure a quite uniform distribution
3152 * of the grid parts of the different thread along all three grid
3153 * zone boundaries with 3D domain decomposition. At the same time
3154 * the blocks will not become too small.
3156 ci_block
= (gridi
->nc
*ci_block_enum
)/(ci_block_denom
*gridi
->numCells
[XX
]*nth
);
3158 /* Ensure the blocks are not too small: avoids cache invalidation */
3159 if (ci_block
*gridi
->na_sc
< ci_block_min_atoms
)
3161 ci_block
= (ci_block_min_atoms
+ gridi
->na_sc
- 1)/gridi
->na_sc
;
3164 /* Without domain decomposition
3165 * or with less than 3 blocks per task, divide in nth blocks.
3167 if (!bDomDec
|| nth
*3*ci_block
> gridi
->nc
)
3169 ci_block
= (gridi
->nc
+ nth
- 1)/nth
;
3172 if (ci_block
> 1 && (nth
- 1)*ci_block
>= gridi
->nc
)
3174 /* Some threads have no work. Although reducing the block size
3175 * does not decrease the block count on the first few threads,
3176 * with GPUs better mixing of "upper" cells that have more empty
3177 * clusters results in a somewhat lower max load over all threads.
3178 * Without GPUs the regime of so few atoms per thread is less
3179 * performance relevant, but with 8-wide SIMD the same reasoning
3180 * applies, since the pair list uses 4 i-atom "sub-clusters".
3188 /* Returns the number of bits to right-shift a cluster index to obtain
3189 * the corresponding force buffer flag index.
3191 static int getBufferFlagShift(int numAtomsPerCluster
)
3193 int bufferFlagShift
= 0;
3194 while ((numAtomsPerCluster
<< bufferFlagShift
) < NBNXN_BUFFERFLAG_SIZE
)
3199 return bufferFlagShift
;
3202 /* Generates the part of pair-list nbl assigned to our thread */
3203 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs
,
3204 const nbnxn_grid_t
*gridi
,
3205 const nbnxn_grid_t
*gridj
,
3206 nbnxn_search_work_t
*work
,
3207 const nbnxn_atomdata_t
*nbat
,
3208 const t_blocka
&exclusions
,
3212 gmx_bool bFBufferFlag
,
3215 float nsubpair_tot_est
,
3217 nbnxn_pairlist_t
*nbl
,
3222 real rlist2
, rl_fep2
= 0;
3224 int ci_b
, ci
, ci_x
, ci_y
, ci_xy
, cj
;
3228 real bx0
, bx1
, by0
, by1
, bz0
, bz1
;
3230 real d2cx
, d2z
, d2z_cx
, d2z_cy
, d2zx
, d2zxy
, d2xy
;
3231 int cxf
, cxl
, cyf
, cyf_x
, cyl
;
3232 int numDistanceChecks
;
3233 int gridi_flag_shift
= 0, gridj_flag_shift
= 0;
3234 gmx_bitmask_t
*gridj_flag
= nullptr;
3235 int ncj_old_i
, ncj_old_j
;
3237 nbs_cycle_start(&work
->cc
[enbsCCsearch
]);
3239 if (gridj
->bSimple
!= nbl
->bSimple
)
3241 gmx_incons("Grid incompatible with pair-list");
3245 nbl
->na_sc
= gridj
->na_sc
;
3246 nbl
->na_ci
= gridj
->na_c
;
3247 nbl
->na_cj
= nbnxn_kernel_to_cluster_j_size(nb_kernel_type
);
3248 na_cj_2log
= get_2log(nbl
->na_cj
);
3254 /* Determine conversion of clusters to flag blocks */
3255 gridi_flag_shift
= getBufferFlagShift(nbl
->na_ci
);
3256 gridj_flag_shift
= getBufferFlagShift(nbl
->na_cj
);
3258 gridj_flag
= work
->buffer_flags
.flag
;
3261 copy_mat(nbs
->box
, box
);
3263 rlist2
= nbl
->rlist
*nbl
->rlist
;
3265 if (nbs
->bFEP
&& !nbl
->bSimple
)
3267 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3268 * We should not simply use rlist, since then we would not have
3269 * the small, effective buffering of the NxN lists.
3270 * The buffer is on overestimate, but the resulting cost for pairs
3271 * beyond rlist is neglible compared to the FEP pairs within rlist.
3273 rl_fep2
= nbl
->rlist
+ effective_buffer_1x1_vs_MxN(gridi
, gridj
);
3277 fprintf(debug
, "nbl_fep atom-pair rlist %f\n", rl_fep2
);
3279 rl_fep2
= rl_fep2
*rl_fep2
;
3282 rbb2
= boundingbox_only_distance2(gridi
, gridj
, nbl
->rlist
, nbl
->bSimple
);
3286 fprintf(debug
, "nbl bounding box only distance %f\n", std::sqrt(rbb2
));
3289 /* Set the shift range */
3290 for (int d
= 0; d
< DIM
; d
++)
3292 /* Check if we need periodicity shifts.
3293 * Without PBC or with domain decomposition we don't need them.
3295 if (d
>= ePBC2npbcdim(nbs
->ePBC
) || nbs
->dd_dim
[d
])
3302 box
[XX
][XX
] - fabs(box
[YY
][XX
]) - fabs(box
[ZZ
][XX
]) < std::sqrt(rlist2
))
3313 gmx::ArrayRef
<const nbnxn_bb_t
> bb_i
;
3315 gmx::ArrayRef
<const float> pbb_i
;
3325 /* We use the normal bounding box format for both grid types */
3328 gmx::ArrayRef
<const float> bbcz_i
= gridi
->bbcz
;
3329 gmx::ArrayRef
<const int> flags_i
= gridi
->flags
;
3330 gmx::ArrayRef
<const float> bbcz_j
= gridj
->bbcz
;
3331 int cell0_i
= gridi
->cell0
;
3335 fprintf(debug
, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3336 gridi
->nc
, gridi
->nc
/(double)(gridi
->numCells
[XX
]*gridi
->numCells
[YY
]), ci_block
);
3339 numDistanceChecks
= 0;
3341 /* Initially ci_b and ci to 1 before where we want them to start,
3342 * as they will both be incremented in next_ci.
3345 ci
= th
*ci_block
- 1;
3348 while (next_ci(gridi
, nth
, ci_block
, &ci_x
, &ci_y
, &ci_b
, &ci
))
3350 if (nbl
->bSimple
&& flags_i
[ci
] == 0)
3355 ncj_old_i
= nbl
->ncj
;
3358 if (gridj
!= gridi
&& shp
[XX
] == 0)
3362 bx1
= bb_i
[ci
].upper
[BB_X
];
3366 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->cellSize
[XX
];
3368 if (bx1
< gridj
->c0
[XX
])
3370 d2cx
= gmx::square(gridj
->c0
[XX
] - bx1
);
3379 ci_xy
= ci_x
*gridi
->numCells
[YY
] + ci_y
;
3381 /* Loop over shift vectors in three dimensions */
3382 for (int tz
= -shp
[ZZ
]; tz
<= shp
[ZZ
]; tz
++)
3384 shz
= tz
*box
[ZZ
][ZZ
];
3386 bz0
= bbcz_i
[ci
*NNBSBB_D
] + shz
;
3387 bz1
= bbcz_i
[ci
*NNBSBB_D
+1] + shz
;
3395 d2z
= gmx::square(bz1
);
3399 d2z
= gmx::square(bz0
- box
[ZZ
][ZZ
]);
3402 d2z_cx
= d2z
+ d2cx
;
3404 if (d2z_cx
>= rlist2
)
3409 bz1_frac
= bz1
/(gridi
->cxy_ind
[ci_xy
+1] - gridi
->cxy_ind
[ci_xy
]);
3414 /* The check with bz1_frac close to or larger than 1 comes later */
3416 for (int ty
= -shp
[YY
]; ty
<= shp
[YY
]; ty
++)
3418 shy
= ty
*box
[YY
][YY
] + tz
*box
[ZZ
][YY
];
3422 by0
= bb_i
[ci
].lower
[BB_Y
] + shy
;
3423 by1
= bb_i
[ci
].upper
[BB_Y
] + shy
;
3427 by0
= gridi
->c0
[YY
] + (ci_y
)*gridi
->cellSize
[YY
] + shy
;
3428 by1
= gridi
->c0
[YY
] + (ci_y
+1)*gridi
->cellSize
[YY
] + shy
;
3431 get_cell_range
<YY
>(by0
, by1
,
3442 if (by1
< gridj
->c0
[YY
])
3444 d2z_cy
+= gmx::square(gridj
->c0
[YY
] - by1
);
3446 else if (by0
> gridj
->c1
[YY
])
3448 d2z_cy
+= gmx::square(by0
- gridj
->c1
[YY
]);
3451 for (int tx
= -shp
[XX
]; tx
<= shp
[XX
]; tx
++)
3453 shift
= XYZ2IS(tx
, ty
, tz
);
3455 if (c_pbcShiftBackward
&& gridi
== gridj
&& shift
> CENTRAL
)
3460 shx
= tx
*box
[XX
][XX
] + ty
*box
[YY
][XX
] + tz
*box
[ZZ
][XX
];
3464 bx0
= bb_i
[ci
].lower
[BB_X
] + shx
;
3465 bx1
= bb_i
[ci
].upper
[BB_X
] + shx
;
3469 bx0
= gridi
->c0
[XX
] + (ci_x
)*gridi
->cellSize
[XX
] + shx
;
3470 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->cellSize
[XX
] + shx
;
3473 get_cell_range
<XX
>(bx0
, bx1
,
3485 new_ci_entry(nbl
, cell0_i
+ci
, shift
, flags_i
[ci
]);
3489 new_sci_entry(nbl
, cell0_i
+ci
, shift
);
3492 if ((!c_pbcShiftBackward
|| (shift
== CENTRAL
&&
3496 /* Leave the pairs with i > j.
3497 * x is the major index, so skip half of it.
3504 set_icell_bb_simple(bb_i
, ci
, shx
, shy
, shz
,
3510 set_icell_bbxxxx_supersub(pbb_i
, ci
, shx
, shy
, shz
,
3513 set_icell_bb_supersub(bb_i
, ci
, shx
, shy
, shz
,
3518 nbs
->icell_set_x(cell0_i
+ci
, shx
, shy
, shz
,
3519 nbat
->xstride
, nbat
->x
,
3522 for (int cx
= cxf
; cx
<= cxl
; cx
++)
3525 if (gridj
->c0
[XX
] + cx
*gridj
->cellSize
[XX
] > bx1
)
3527 d2zx
+= gmx::square(gridj
->c0
[XX
] + cx
*gridj
->cellSize
[XX
] - bx1
);
3529 else if (gridj
->c0
[XX
] + (cx
+1)*gridj
->cellSize
[XX
] < bx0
)
3531 d2zx
+= gmx::square(gridj
->c0
[XX
] + (cx
+1)*gridj
->cellSize
[XX
] - bx0
);
3534 if (gridi
== gridj
&&
3536 (!c_pbcShiftBackward
|| shift
== CENTRAL
) &&
3539 /* Leave the pairs with i > j.
3540 * Skip half of y when i and j have the same x.
3549 for (int cy
= cyf_x
; cy
<= cyl
; cy
++)
3551 const int columnStart
= gridj
->cxy_ind
[cx
*gridj
->numCells
[YY
] + cy
];
3552 const int columnEnd
= gridj
->cxy_ind
[cx
*gridj
->numCells
[YY
] + cy
+ 1];
3555 if (gridj
->c0
[YY
] + cy
*gridj
->cellSize
[YY
] > by1
)
3557 d2zxy
+= gmx::square(gridj
->c0
[YY
] + cy
*gridj
->cellSize
[YY
] - by1
);
3559 else if (gridj
->c0
[YY
] + (cy
+1)*gridj
->cellSize
[YY
] < by0
)
3561 d2zxy
+= gmx::square(gridj
->c0
[YY
] + (cy
+1)*gridj
->cellSize
[YY
] - by0
);
3563 if (columnStart
< columnEnd
&& d2zxy
< rlist2
)
3565 /* To improve efficiency in the common case
3566 * of a homogeneous particle distribution,
3567 * we estimate the index of the middle cell
3568 * in range (midCell). We search down and up
3569 * starting from this index.
3571 * Note that the bbcz_j array contains bounds
3572 * for i-clusters, thus for clusters of 4 atoms.
3573 * For the common case where the j-cluster size
3574 * is 8, we could step with a stride of 2,
3575 * but we do not do this because it would
3576 * complicate this code even more.
3578 int midCell
= columnStart
+ static_cast<int>(bz1_frac
*(columnEnd
- columnStart
));
3579 if (midCell
>= columnEnd
)
3581 midCell
= columnEnd
- 1;
3586 /* Find the lowest cell that can possibly
3588 * Check if we hit the bottom of the grid,
3589 * if the j-cell is below the i-cell and if so,
3590 * if it is within range.
3592 int downTestCell
= midCell
;
3593 while (downTestCell
>= columnStart
&&
3594 (bbcz_j
[downTestCell
*NNBSBB_D
+ 1] >= bz0
||
3595 d2xy
+ gmx::square(bbcz_j
[downTestCell
*NNBSBB_D
+ 1] - bz0
) < rlist2
))
3599 int firstCell
= downTestCell
+ 1;
3601 /* Find the highest cell that can possibly
3603 * Check if we hit the top of the grid,
3604 * if the j-cell is above the i-cell and if so,
3605 * if it is within range.
3607 int upTestCell
= midCell
+ 1;
3608 while (upTestCell
< columnEnd
&&
3609 (bbcz_j
[upTestCell
*NNBSBB_D
] <= bz1
||
3610 d2xy
+ gmx::square(bbcz_j
[upTestCell
*NNBSBB_D
] - bz1
) < rlist2
))
3614 int lastCell
= upTestCell
- 1;
3616 #define NBNXN_REFCODE 0
3619 /* Simple reference code, for debugging,
3620 * overrides the more complex code above.
3622 firstCell
= columnEnd
;
3624 for (int k
= columnStart
; k
< columnEnd
; k
++)
3626 if (d2xy
+ gmx::square(bbcz_j
[k
*NNBSBB_D
+ 1] - bz0
) < rlist2
&&
3631 if (d2xy
+ gmx::square(bbcz_j
[k
*NNBSBB_D
] - bz1
) < rlist2
&&
3642 /* We want each atom/cell pair only once,
3643 * only use cj >= ci.
3645 if (!c_pbcShiftBackward
|| shift
== CENTRAL
)
3647 firstCell
= std::max(firstCell
, ci
);
3651 if (firstCell
<= lastCell
)
3653 GMX_ASSERT(firstCell
>= columnStart
&& lastCell
< columnEnd
, "The range should reside within the current grid column");
3655 /* For f buffer flags with simple lists */
3656 ncj_old_j
= nbl
->ncj
;
3660 /* We have a maximum of 2 j-clusters
3661 * per i-cluster sized cell.
3663 check_cell_list_space_simple(nbl
, 2*(lastCell
- firstCell
+ 1));
3667 check_cell_list_space_supersub(nbl
, lastCell
- firstCell
+ 1);
3670 switch (nb_kernel_type
)
3672 case nbnxnk4x4_PlainC
:
3673 makeClusterListSimple(gridj
,
3674 nbl
, ci
, firstCell
, lastCell
,
3675 (gridi
== gridj
&& shift
== CENTRAL
),
3678 &numDistanceChecks
);
3680 #ifdef GMX_NBNXN_SIMD_4XN
3681 case nbnxnk4xN_SIMD_4xN
:
3682 makeClusterListSimd4xn(gridj
,
3683 nbl
, ci
, firstCell
, lastCell
,
3684 (gridi
== gridj
&& shift
== CENTRAL
),
3687 &numDistanceChecks
);
3690 #ifdef GMX_NBNXN_SIMD_2XNN
3691 case nbnxnk4xN_SIMD_2xNN
:
3692 makeClusterListSimd2xnn(gridj
,
3693 nbl
, ci
, firstCell
, lastCell
,
3694 (gridi
== gridj
&& shift
== CENTRAL
),
3697 &numDistanceChecks
);
3700 case nbnxnk8x8x8_PlainC
:
3701 case nbnxnk8x8x8_GPU
:
3702 for (cj
= firstCell
; cj
<= lastCell
; cj
++)
3704 make_cluster_list_supersub(gridi
, gridj
,
3706 (gridi
== gridj
&& shift
== CENTRAL
&& ci
== cj
),
3707 nbat
->xstride
, nbat
->x
,
3709 &numDistanceChecks
);
3714 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_j
)
3716 int cbf
= nbl
->cj
[ncj_old_j
].cj
>> gridj_flag_shift
;
3717 int cbl
= nbl
->cj
[nbl
->ncj
-1].cj
>> gridj_flag_shift
;
3718 for (int cb
= cbf
; cb
<= cbl
; cb
++)
3720 bitmask_init_bit(&gridj_flag
[cb
], th
);
3724 nbl
->ncjInUse
+= nbl
->ncj
- ncj_old_j
;
3730 /* Set the exclusions for this ci list */
3733 setExclusionsForSimpleIentry(nbs
,
3735 shift
== CENTRAL
&& gridi
== gridj
,
3742 make_fep_list(nbs
, nbat
, nbl
,
3743 shift
== CENTRAL
&& gridi
== gridj
,
3744 &(nbl
->ci
[nbl
->nci
]),
3745 gridi
, gridj
, nbl_fep
);
3750 setExclusionsForGpuIentry(nbs
,
3752 shift
== CENTRAL
&& gridi
== gridj
,
3753 nbl
->sci
[nbl
->nsci
],
3758 make_fep_list_supersub(nbs
, nbat
, nbl
,
3759 shift
== CENTRAL
&& gridi
== gridj
,
3760 &(nbl
->sci
[nbl
->nsci
]),
3763 gridi
, gridj
, nbl_fep
);
3767 /* Close this ci list */
3770 close_ci_entry_simple(nbl
);
3774 close_ci_entry_supersub(nbl
,
3776 progBal
, nsubpair_tot_est
,
3783 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_i
)
3785 bitmask_init_bit(&(work
->buffer_flags
.flag
[(gridi
->cell0
+ci
)>>gridi_flag_shift
]), th
);
3789 work
->ndistc
= numDistanceChecks
;
3791 nbs_cycle_stop(&work
->cc
[enbsCCsearch
]);
3793 GMX_ASSERT(nbl
->ncjInUse
== nbl
->ncj
|| nbs
->bFEP
, "Without free-energy all cj pair-list entries should be in use. Note that subsequent code does not make use of the equality, this check is only here to catch bugs");
3797 fprintf(debug
, "number of distance checks %d\n", numDistanceChecks
);
3801 print_nblist_statistics_simple(debug
, nbl
, nbs
, rlist
);
3805 print_nblist_statistics_supersub(debug
, nbl
, nbs
, rlist
);
3810 fprintf(debug
, "nbl FEP list pairs: %d\n", nbl_fep
->nrj
);
3815 static void reduce_buffer_flags(const nbnxn_search_t nbs
,
3817 const nbnxn_buffer_flags_t
*dest
)
3819 for (int s
= 0; s
< nsrc
; s
++)
3821 gmx_bitmask_t
* flag
= nbs
->work
[s
].buffer_flags
.flag
;
3823 for (int b
= 0; b
< dest
->nflag
; b
++)
3825 bitmask_union(&(dest
->flag
[b
]), flag
[b
]);
3830 static void print_reduction_cost(const nbnxn_buffer_flags_t
*flags
, int nout
)
3832 int nelem
, nkeep
, ncopy
, nred
, out
;
3833 gmx_bitmask_t mask_0
;
3839 bitmask_init_bit(&mask_0
, 0);
3840 for (int b
= 0; b
< flags
->nflag
; b
++)
3842 if (bitmask_is_equal(flags
->flag
[b
], mask_0
))
3844 /* Only flag 0 is set, no copy of reduction required */
3848 else if (!bitmask_is_zero(flags
->flag
[b
]))
3851 for (out
= 0; out
< nout
; out
++)
3853 if (bitmask_is_set(flags
->flag
[b
], out
))
3870 fprintf(debug
, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3872 nelem
/(double)(flags
->nflag
),
3873 nkeep
/(double)(flags
->nflag
),
3874 ncopy
/(double)(flags
->nflag
),
3875 nred
/(double)(flags
->nflag
));
3878 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3879 * *cjGlobal is updated with the cj count in src.
3880 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3882 template<bool setFlags
>
3883 static void copySelectedListRange(const nbnxn_ci_t
* gmx_restrict srcCi
,
3884 const nbnxn_pairlist_t
* gmx_restrict src
,
3885 nbnxn_pairlist_t
* gmx_restrict dest
,
3886 gmx_bitmask_t
*flag
,
3887 int iFlagShift
, int jFlagShift
, int t
)
3889 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3891 if (dest
->nci
+ 1 >= dest
->ci_nalloc
)
3893 nb_realloc_ci(dest
, dest
->nci
+ 1);
3895 check_cell_list_space_simple(dest
, ncj
);
3897 dest
->ci
[dest
->nci
] = *srcCi
;
3898 dest
->ci
[dest
->nci
].cj_ind_start
= dest
->ncj
;
3899 dest
->ci
[dest
->nci
].cj_ind_end
= dest
->ncj
+ ncj
;
3903 bitmask_init_bit(&flag
[srcCi
->ci
>> iFlagShift
], t
);
3906 for (int j
= srcCi
->cj_ind_start
; j
< srcCi
->cj_ind_end
; j
++)
3908 dest
->cj
[dest
->ncj
++] = src
->cj
[j
];
3912 /* NOTE: This is relatively expensive, since this
3913 * operation is done for all elements in the list,
3914 * whereas at list generation this is done only
3915 * once for each flag entry.
3917 bitmask_init_bit(&flag
[src
->cj
[j
].cj
>> jFlagShift
], t
);
3924 /* This routine re-balances the pairlists such that all are nearly equally
3925 * sized. Only whole i-entries are moved between lists. These are moved
3926 * between the ends of the lists, such that the buffer reduction cost should
3927 * not change significantly.
3928 * Note that all original reduction flags are currently kept. This can lead
3929 * to reduction of parts of the force buffer that could be avoided. But since
3930 * the original lists are quite balanced, this will only give minor overhead.
3932 static void rebalanceSimpleLists(int numLists
,
3933 nbnxn_pairlist_t
* const * const srcSet
,
3934 nbnxn_pairlist_t
**destSet
,
3935 gmx::ArrayRef
<nbnxn_search_work_t
> searchWork
)
3938 for (int s
= 0; s
< numLists
; s
++)
3940 ncjTotal
+= srcSet
[s
]->ncjInUse
;
3942 int ncjTarget
= (ncjTotal
+ numLists
- 1)/numLists
;
3944 #pragma omp parallel num_threads(numLists)
3946 int t
= gmx_omp_get_thread_num();
3948 int cjStart
= ncjTarget
* t
;
3949 int cjEnd
= ncjTarget
*(t
+ 1);
3951 /* The destination pair-list for task/thread t */
3952 nbnxn_pairlist_t
*dest
= destSet
[t
];
3954 clear_pairlist(dest
);
3955 dest
->bSimple
= srcSet
[0]->bSimple
;
3956 dest
->na_ci
= srcSet
[0]->na_ci
;
3957 dest
->na_cj
= srcSet
[0]->na_cj
;
3959 /* Note that the flags in the work struct (still) contain flags
3960 * for all entries that are present in srcSet->nbl[t].
3962 gmx_bitmask_t
*flag
= searchWork
[t
].buffer_flags
.flag
;
3964 int iFlagShift
= getBufferFlagShift(dest
->na_ci
);
3965 int jFlagShift
= getBufferFlagShift(dest
->na_cj
);
3968 for (int s
= 0; s
< numLists
&& cjGlobal
< cjEnd
; s
++)
3970 const nbnxn_pairlist_t
*src
= srcSet
[s
];
3972 if (cjGlobal
+ src
->ncjInUse
> cjStart
)
3974 for (int i
= 0; i
< src
->nci
&& cjGlobal
< cjEnd
; i
++)
3976 const nbnxn_ci_t
*srcCi
= &src
->ci
[i
];
3977 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3978 if (cjGlobal
>= cjStart
)
3980 /* If the source list is not our own, we need to set
3981 * extra flags (the template bool parameter).
3985 copySelectedListRange
3988 flag
, iFlagShift
, jFlagShift
, t
);
3992 copySelectedListRange
3995 dest
, flag
, iFlagShift
, jFlagShift
, t
);
4003 cjGlobal
+= src
->ncjInUse
;
4007 dest
->ncjInUse
= dest
->ncj
;
4011 int ncjTotalNew
= 0;
4012 for (int s
= 0; s
< numLists
; s
++)
4014 ncjTotalNew
+= destSet
[s
]->ncjInUse
;
4016 GMX_RELEASE_ASSERT(ncjTotalNew
== ncjTotal
, "The total size of the lists before and after rebalancing should match");
4020 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
4021 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t
*listSet
)
4023 int numLists
= listSet
->nnbl
;
4026 for (int s
= 0; s
< numLists
; s
++)
4028 ncjMax
= std::max(ncjMax
, listSet
->nbl
[s
]->ncjInUse
);
4029 ncjTotal
+= listSet
->nbl
[s
]->ncjInUse
;
4033 fprintf(debug
, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax
, ncjTotal
);
4035 /* The rebalancing adds 3% extra time to the search. Heuristically we
4036 * determined that under common conditions the non-bonded kernel balance
4037 * improvement will outweigh this when the imbalance is more than 3%.
4038 * But this will, obviously, depend on search vs kernel time and nstlist.
4040 const real rebalanceTolerance
= 1.03;
4042 return numLists
*ncjMax
> ncjTotal
*rebalanceTolerance
;
4045 /* Perform a count (linear) sort to sort the smaller lists to the end.
4046 * This avoids load imbalance on the GPU, as large lists will be
4047 * scheduled and executed first and the smaller lists later.
4048 * Load balancing between multi-processors only happens at the end
4049 * and there smaller lists lead to more effective load balancing.
4050 * The sorting is done on the cj4 count, not on the actual pair counts.
4051 * Not only does this make the sort faster, but it also results in
4052 * better load balancing than using a list sorted on exact load.
4053 * This function swaps the pointer in the pair list to avoid a copy operation.
4055 static void sort_sci(nbnxn_pairlist_t
*nbl
)
4057 nbnxn_list_work_t
*work
;
4059 nbnxn_sci_t
*sci_sort
;
4061 if (nbl
->ncj4
<= nbl
->nsci
)
4063 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4069 /* We will distinguish differences up to double the average */
4070 m
= (2*nbl
->ncj4
)/nbl
->nsci
;
4072 if (m
+ 1 > work
->sort_nalloc
)
4074 work
->sort_nalloc
= over_alloc_large(m
+ 1);
4075 srenew(work
->sort
, work
->sort_nalloc
);
4078 if (work
->sci_sort_nalloc
!= nbl
->sci_nalloc
)
4080 work
->sci_sort_nalloc
= nbl
->sci_nalloc
;
4081 nbnxn_realloc_void((void **)&work
->sci_sort
,
4083 work
->sci_sort_nalloc
*sizeof(*work
->sci_sort
),
4084 nbl
->alloc
, nbl
->free
);
4087 /* Count the entries of each size */
4088 for (int i
= 0; i
<= m
; i
++)
4092 for (int s
= 0; s
< nbl
->nsci
; s
++)
4094 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4097 /* Calculate the offset for each count */
4100 for (int i
= m
- 1; i
>= 0; i
--)
4103 work
->sort
[i
] = work
->sort
[i
+ 1] + s0
;
4107 /* Sort entries directly into place */
4108 sci_sort
= work
->sci_sort
;
4109 for (int s
= 0; s
< nbl
->nsci
; s
++)
4111 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4112 sci_sort
[work
->sort
[i
]++] = nbl
->sci
[s
];
4115 /* Swap the sci pointers so we use the new, sorted list */
4116 work
->sci_sort
= nbl
->sci
;
4117 nbl
->sci
= sci_sort
;
4120 /* Make a local or non-local pair-list, depending on iloc */
4121 void nbnxn_make_pairlist(const nbnxn_search_t nbs
,
4122 nbnxn_atomdata_t
*nbat
,
4123 const t_blocka
*excl
,
4125 int min_ci_balanced
,
4126 nbnxn_pairlist_set_t
*nbl_list
,
4131 nbnxn_grid_t
*gridi
, *gridj
;
4133 int nsubpair_target
;
4134 float nsubpair_tot_est
;
4136 nbnxn_pairlist_t
**nbl
;
4138 gmx_bool CombineNBLists
;
4140 int np_tot
, np_noq
, np_hlj
, nap
;
4142 nnbl
= nbl_list
->nnbl
;
4143 nbl
= nbl_list
->nbl
;
4144 CombineNBLists
= nbl_list
->bCombined
;
4148 fprintf(debug
, "ns making %d nblists\n", nnbl
);
4151 nbat
->bUseBufferFlags
= (nbat
->nout
> 1);
4152 /* We should re-init the flags before making the first list */
4153 if (nbat
->bUseBufferFlags
&& LOCAL_I(iloc
))
4155 init_buffer_flags(&nbat
->buffer_flags
, nbat
->natoms
);
4158 if (nbl_list
->bSimple
)
4161 switch (nb_kernel_type
)
4163 #ifdef GMX_NBNXN_SIMD_4XN
4164 case nbnxnk4xN_SIMD_4xN
:
4165 nbs
->icell_set_x
= icell_set_x_simd_4xn
;
4168 #ifdef GMX_NBNXN_SIMD_2XNN
4169 case nbnxnk4xN_SIMD_2xNN
:
4170 nbs
->icell_set_x
= icell_set_x_simd_2xnn
;
4174 nbs
->icell_set_x
= icell_set_x_simple
;
4178 /* MSVC 2013 complains about switch statements without case */
4179 nbs
->icell_set_x
= icell_set_x_simple
;
4184 nbs
->icell_set_x
= icell_set_x_supersub
;
4189 /* Only zone (grid) 0 vs 0 */
4196 nzi
= nbs
->zones
->nizone
;
4199 if (!nbl_list
->bSimple
&& min_ci_balanced
> 0)
4201 get_nsubpair_target(nbs
, iloc
, rlist
, min_ci_balanced
,
4202 &nsubpair_target
, &nsubpair_tot_est
);
4206 nsubpair_target
= 0;
4207 nsubpair_tot_est
= 0;
4210 /* Clear all pair-lists */
4211 for (int th
= 0; th
< nnbl
; th
++)
4213 clear_pairlist(nbl
[th
]);
4217 clear_pairlist_fep(nbl_list
->nbl_fep
[th
]);
4221 for (int zi
= 0; zi
< nzi
; zi
++)
4223 gridi
= &nbs
->grid
[zi
];
4225 if (NONLOCAL_I(iloc
))
4227 zj0
= nbs
->zones
->izone
[zi
].j0
;
4228 zj1
= nbs
->zones
->izone
[zi
].j1
;
4234 for (int zj
= zj0
; zj
< zj1
; zj
++)
4236 gridj
= &nbs
->grid
[zj
];
4240 fprintf(debug
, "ns search grid %d vs %d\n", zi
, zj
);
4243 nbs_cycle_start(&nbs
->cc
[enbsCCsearch
]);
4245 ci_block
= get_ci_block_size(gridi
, nbs
->DomDec
, nnbl
);
4247 /* With GPU: generate progressively smaller lists for
4248 * load balancing for local only or non-local with 2 zones.
4250 progBal
= (LOCAL_I(iloc
) || nbs
->zones
->n
<= 2);
4252 #pragma omp parallel for num_threads(nnbl) schedule(static)
4253 for (int th
= 0; th
< nnbl
; th
++)
4257 /* Re-init the thread-local work flag data before making
4258 * the first list (not an elegant conditional).
4260 if (nbat
->bUseBufferFlags
&& ((zi
== 0 && zj
== 0)))
4262 init_buffer_flags(&nbs
->work
[th
].buffer_flags
, nbat
->natoms
);
4265 if (CombineNBLists
&& th
> 0)
4267 clear_pairlist(nbl
[th
]);
4270 /* Divide the i super cell equally over the nblists */
4271 nbnxn_make_pairlist_part(nbs
, gridi
, gridj
,
4272 &nbs
->work
[th
], nbat
, *excl
,
4276 nbat
->bUseBufferFlags
,
4278 progBal
, nsubpair_tot_est
,
4281 nbl_list
->nbl_fep
[th
]);
4283 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4285 nbs_cycle_stop(&nbs
->cc
[enbsCCsearch
]);
4290 for (int th
= 0; th
< nnbl
; th
++)
4292 inc_nrnb(nrnb
, eNR_NBNXN_DIST2
, nbs
->work
[th
].ndistc
);
4294 if (nbl_list
->bSimple
)
4296 np_tot
+= nbl
[th
]->ncj
;
4297 np_noq
+= nbl
[th
]->work
->ncj_noq
;
4298 np_hlj
+= nbl
[th
]->work
->ncj_hlj
;
4302 /* This count ignores potential subsequent pair pruning */
4303 np_tot
+= nbl
[th
]->nci_tot
;
4306 nap
= nbl
[0]->na_ci
*nbl
[0]->na_cj
;
4307 nbl_list
->natpair_ljq
= (np_tot
- np_noq
)*nap
- np_hlj
*nap
/2;
4308 nbl_list
->natpair_lj
= np_noq
*nap
;
4309 nbl_list
->natpair_q
= np_hlj
*nap
/2;
4311 if (CombineNBLists
&& nnbl
> 1)
4313 nbs_cycle_start(&nbs
->cc
[enbsCCcombine
]);
4315 combine_nblists(nnbl
-1, nbl
+1, nbl
[0]);
4317 nbs_cycle_stop(&nbs
->cc
[enbsCCcombine
]);
4322 if (nbl_list
->bSimple
)
4324 if (nnbl
> 1 && checkRebalanceSimpleLists(nbl_list
))
4326 rebalanceSimpleLists(nbl_list
->nnbl
, nbl_list
->nbl
, nbl_list
->nbl_work
, nbs
->work
);
4328 /* Swap the pointer of the sets of pair lists */
4329 nbnxn_pairlist_t
**tmp
= nbl_list
->nbl
;
4330 nbl_list
->nbl
= nbl_list
->nbl_work
;
4331 nbl_list
->nbl_work
= tmp
;
4336 /* Sort the entries on size, large ones first */
4337 if (CombineNBLists
|| nnbl
== 1)
4343 #pragma omp parallel for num_threads(nnbl) schedule(static)
4344 for (int th
= 0; th
< nnbl
; th
++)
4350 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4355 if (nbat
->bUseBufferFlags
)
4357 reduce_buffer_flags(nbs
, nbl_list
->nnbl
, &nbat
->buffer_flags
);
4362 /* Balance the free-energy lists over all the threads */
4363 balance_fep_lists(nbs
, nbl_list
);
4366 /* This is a fresh list, so not pruned, stored using ci and nci.
4367 * ciOuter and nciOuter are invalid at this point.
4369 GMX_ASSERT(nbl_list
->nbl
[0]->nciOuter
== -1, "nciOuter should have been set to -1 to signal that it is invalid");
4371 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4374 nbs
->search_count
++;
4376 if (nbs
->print_cycles
&&
4377 (!nbs
->DomDec
|| !LOCAL_I(iloc
)) &&
4378 nbs
->search_count
% 100 == 0)
4380 nbs_cycle_print(stderr
, nbs
);
4383 /* If we have more than one list, they either got rebalancing (CPU)
4384 * or combined (GPU), so we should dump the final result to debug.
4386 if (debug
&& nbl_list
->nnbl
> 1)
4388 if (nbl_list
->bSimple
)
4390 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4392 print_nblist_statistics_simple(debug
, nbl_list
->nbl
[t
], nbs
, rlist
);
4397 print_nblist_statistics_supersub(debug
, nbl_list
->nbl
[0], nbs
, rlist
);
4405 if (nbl_list
->bSimple
)
4407 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4409 print_nblist_ci_cj(debug
, nbl_list
->nbl
[t
]);
4414 print_nblist_sci_cj(debug
, nbl_list
->nbl
[0]);
4418 if (nbat
->bUseBufferFlags
)
4420 print_reduction_cost(&nbat
->buffer_flags
, nbl_list
->nnbl
);
4425 void nbnxnPrepareListForDynamicPruning(nbnxn_pairlist_set_t
*listSet
)
4427 /* TODO: Restructure the lists so we have actual outer and inner
4428 * list objects so we can set a single pointer instead of
4429 * swapping several pointers.
4432 for (int i
= 0; i
< listSet
->nnbl
; i
++)
4434 /* The search produced a list in ci/cj.
4435 * Swap the list pointers so we get the outer list is ciOuter,cjOuter
4436 * and we can prune that to get an inner list in ci/cj.
4438 nbnxn_pairlist_t
*list
= listSet
->nbl
[i
];
4439 list
->nciOuter
= list
->nci
;
4441 nbnxn_ci_t
*ciTmp
= list
->ciOuter
;
4442 list
->ciOuter
= list
->ci
;
4445 nbnxn_cj_t
*cjTmp
= list
->cjOuter
;
4446 list
->cjOuter
= list
->cj
;
4449 /* Signal that this inner list is currently invalid */