2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/smalloc.h"
73 using namespace gmx
; // TODO: Remove when this file is moved into gmx namespace
76 /* We shift the i-particles backward for PBC.
77 * This leads to more conditionals than shifting forward.
78 * We do this to get more balanced pair lists.
80 static const bool pbc_shift_backward
= true;
83 static void nbs_cycle_clear(nbnxn_cycle_t
*cc
)
85 for (int i
= 0; i
< enbsCCnr
; i
++)
92 static double Mcyc_av(const nbnxn_cycle_t
*cc
)
94 return (double)cc
->c
*1e-6/cc
->count
;
97 static void nbs_cycle_print(FILE *fp
, const nbnxn_search_t nbs
)
100 fprintf(fp
, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
101 nbs
->cc
[enbsCCgrid
].count
,
102 Mcyc_av(&nbs
->cc
[enbsCCgrid
]),
103 Mcyc_av(&nbs
->cc
[enbsCCsearch
]),
104 Mcyc_av(&nbs
->cc
[enbsCCreducef
]));
106 if (nbs
->nthread_max
> 1)
108 if (nbs
->cc
[enbsCCcombine
].count
> 0)
110 fprintf(fp
, " comb %5.2f",
111 Mcyc_av(&nbs
->cc
[enbsCCcombine
]));
113 fprintf(fp
, " s. th");
114 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
116 fprintf(fp
, " %4.1f",
117 Mcyc_av(&nbs
->work
[t
].cc
[enbsCCsearch
]));
123 static gmx_inline
int ci_to_cj(int ci
, int na_cj_2log
)
127 case 2: return ci
; break;
128 case 3: return (ci
>>1); break;
129 case 1: return (ci
<<1); break;
137 /* Returns the j-cluster index corresponding to the i-cluster index */
138 template<int cj_size
> static gmx_inline
int ci_to_cj(int ci
)
144 else if (cj_size
== 4)
148 else if (cj_size
== 8)
154 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
159 /* Returns the index in the coordinate array corresponding to the i-cluster index */
160 template<int cj_size
> static gmx_inline
int x_ind_ci(int ci
)
164 /* Coordinates are stored packed in groups of 4 */
167 else if (cj_size
== 8)
169 /* Coordinates packed in 8, i-cluster size is half the packing width */
170 return (ci
>> 1)*STRIDE_P8
+ (ci
& 1)*(c_packX8
>> 1);
174 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
179 /* Returns the index in the coordinate array corresponding to the j-cluster index */
180 template<int cj_size
> static gmx_inline
int x_ind_cj(int cj
)
184 /* Coordinates are stored packed in groups of 4 */
185 return (cj
>> 1)*STRIDE_P4
+ (cj
& 1)*(c_packX4
>> 1);
187 else if (cj_size
<= 4)
189 /* Coordinates are stored packed in groups of 4 */
192 else if (cj_size
== 8)
194 /* Coordinates are stored packed in groups of 8 */
199 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
204 /* The 6 functions below are only introduced to make the code more readable */
206 static gmx_inline
int ci_to_cj_simd_4xn(int ci
)
208 return ci_to_cj
<GMX_SIMD_REAL_WIDTH
>(ci
);
211 static gmx_inline
int x_ind_ci_simd_4xn(int ci
)
213 return x_ind_ci
<GMX_SIMD_REAL_WIDTH
>(ci
);
216 static gmx_inline
int x_ind_cj_simd_4xn(int cj
)
218 return x_ind_cj
<GMX_SIMD_REAL_WIDTH
>(cj
);
221 static gmx_inline
int ci_to_cj_simd_2xnn(int ci
)
223 return ci_to_cj
<GMX_SIMD_REAL_WIDTH
/2>(ci
);
226 static gmx_inline
int x_ind_ci_simd_2xnn(int ci
)
228 return x_ind_ci
<GMX_SIMD_REAL_WIDTH
/2>(ci
);
231 static gmx_inline
int x_ind_cj_simd_2xnn(int cj
)
233 return x_ind_cj
<GMX_SIMD_REAL_WIDTH
/2>(cj
);
238 gmx_bool
nbnxn_kernel_pairlist_simple(int nb_kernel_type
)
240 if (nb_kernel_type
== nbnxnkNotSet
)
242 gmx_fatal(FARGS
, "Non-bonded kernel type not set for Verlet-style pair-list.");
245 switch (nb_kernel_type
)
247 case nbnxnk8x8x8_GPU
:
248 case nbnxnk8x8x8_PlainC
:
251 case nbnxnk4x4_PlainC
:
252 case nbnxnk4xN_SIMD_4xN
:
253 case nbnxnk4xN_SIMD_2xNN
:
257 gmx_incons("Invalid nonbonded kernel type passed!");
262 /* Initializes a single nbnxn_pairlist_t data structure */
263 static void nbnxn_init_pairlist_fep(t_nblist
*nl
)
265 nl
->type
= GMX_NBLIST_INTERACTION_FREE_ENERGY
;
266 nl
->igeometry
= GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE
;
267 /* The interaction functions are set in the free energy kernel fuction */
286 void nbnxn_init_search(nbnxn_search_t
* nbs_ptr
,
288 struct gmx_domdec_zones_t
*zones
,
300 nbs
->DomDec
= (n_dd_cells
!= NULL
);
302 clear_ivec(nbs
->dd_dim
);
308 for (int d
= 0; d
< DIM
; d
++)
310 if ((*n_dd_cells
)[d
] > 1)
313 /* Each grid matches a DD zone */
319 nbnxn_grids_init(nbs
, ngrid
);
322 nbs
->cell_nalloc
= 0;
326 nbs
->nthread_max
= nthread_max
;
328 /* Initialize the work data structures for each thread */
329 snew(nbs
->work
, nbs
->nthread_max
);
330 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
332 nbs
->work
[t
].cxy_na
= NULL
;
333 nbs
->work
[t
].cxy_na_nalloc
= 0;
334 nbs
->work
[t
].sort_work
= NULL
;
335 nbs
->work
[t
].sort_work_nalloc
= 0;
337 snew(nbs
->work
[t
].nbl_fep
, 1);
338 nbnxn_init_pairlist_fep(nbs
->work
[t
].nbl_fep
);
341 /* Initialize detailed nbsearch cycle counting */
342 nbs
->print_cycles
= (getenv("GMX_NBNXN_CYCLE") != 0);
343 nbs
->search_count
= 0;
344 nbs_cycle_clear(nbs
->cc
);
345 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
347 nbs_cycle_clear(nbs
->work
[t
].cc
);
351 static void init_buffer_flags(nbnxn_buffer_flags_t
*flags
,
354 flags
->nflag
= (natoms
+ NBNXN_BUFFERFLAG_SIZE
- 1)/NBNXN_BUFFERFLAG_SIZE
;
355 if (flags
->nflag
> flags
->flag_nalloc
)
357 flags
->flag_nalloc
= over_alloc_large(flags
->nflag
);
358 srenew(flags
->flag
, flags
->flag_nalloc
);
360 for (int b
= 0; b
< flags
->nflag
; b
++)
362 bitmask_clear(&(flags
->flag
[b
]));
366 /* Determines the cell range along one dimension that
367 * the bounding box b0 - b1 sees.
369 static void get_cell_range(real b0
, real b1
,
370 int nc
, real c0
, real s
, real invs
,
371 real d2
, real r2
, int *cf
, int *cl
)
373 *cf
= std::max(static_cast<int>((b0
- c0
)*invs
), 0);
375 while (*cf
> 0 && d2
+ gmx::square((b0
- c0
) - (*cf
-1+1)*s
) < r2
)
380 *cl
= std::min(static_cast<int>((b1
- c0
)*invs
), nc
-1);
381 while (*cl
< nc
-1 && d2
+ gmx::square((*cl
+1)*s
- (b1
- c0
)) < r2
)
387 /* Reference code calculating the distance^2 between two bounding boxes */
388 static float box_dist2(float bx0
, float bx1
, float by0
,
389 float by1
, float bz0
, float bz1
,
390 const nbnxn_bb_t
*bb
)
393 float dl
, dh
, dm
, dm0
;
397 dl
= bx0
- bb
->upper
[BB_X
];
398 dh
= bb
->lower
[BB_X
] - bx1
;
399 dm
= std::max(dl
, dh
);
400 dm0
= std::max(dm
, 0.0f
);
403 dl
= by0
- bb
->upper
[BB_Y
];
404 dh
= bb
->lower
[BB_Y
] - by1
;
405 dm
= std::max(dl
, dh
);
406 dm0
= std::max(dm
, 0.0f
);
409 dl
= bz0
- bb
->upper
[BB_Z
];
410 dh
= bb
->lower
[BB_Z
] - bz1
;
411 dm
= std::max(dl
, dh
);
412 dm0
= std::max(dm
, 0.0f
);
418 /* Plain C code calculating the distance^2 between two bounding boxes */
419 static float subc_bb_dist2(int si
, const nbnxn_bb_t
*bb_i_ci
,
420 int csj
, const nbnxn_bb_t
*bb_j_all
)
422 const nbnxn_bb_t
*bb_i
, *bb_j
;
424 float dl
, dh
, dm
, dm0
;
427 bb_j
= bb_j_all
+ csj
;
431 dl
= bb_i
->lower
[BB_X
] - bb_j
->upper
[BB_X
];
432 dh
= bb_j
->lower
[BB_X
] - bb_i
->upper
[BB_X
];
433 dm
= std::max(dl
, dh
);
434 dm0
= std::max(dm
, 0.0f
);
437 dl
= bb_i
->lower
[BB_Y
] - bb_j
->upper
[BB_Y
];
438 dh
= bb_j
->lower
[BB_Y
] - bb_i
->upper
[BB_Y
];
439 dm
= std::max(dl
, dh
);
440 dm0
= std::max(dm
, 0.0f
);
443 dl
= bb_i
->lower
[BB_Z
] - bb_j
->upper
[BB_Z
];
444 dh
= bb_j
->lower
[BB_Z
] - bb_i
->upper
[BB_Z
];
445 dm
= std::max(dl
, dh
);
446 dm0
= std::max(dm
, 0.0f
);
452 #if NBNXN_SEARCH_BB_SIMD4
454 /* 4-wide SIMD code for bb distance for bb format xyz0 */
455 static float subc_bb_dist2_simd4(int si
, const nbnxn_bb_t
*bb_i_ci
,
456 int csj
, const nbnxn_bb_t
*bb_j_all
)
458 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
461 Simd4Float bb_i_S0
, bb_i_S1
;
462 Simd4Float bb_j_S0
, bb_j_S1
;
468 bb_i_S0
= load4(&bb_i_ci
[si
].lower
[0]);
469 bb_i_S1
= load4(&bb_i_ci
[si
].upper
[0]);
470 bb_j_S0
= load4(&bb_j_all
[csj
].lower
[0]);
471 bb_j_S1
= load4(&bb_j_all
[csj
].upper
[0]);
473 dl_S
= bb_i_S0
- bb_j_S1
;
474 dh_S
= bb_j_S0
- bb_i_S1
;
476 dm_S
= max(dl_S
, dh_S
);
477 dm0_S
= max(dm_S
, simd4SetZeroF());
479 return dotProduct(dm0_S
, dm0_S
);
482 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
483 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
487 Simd4Float dx_0, dy_0, dz_0; \
488 Simd4Float dx_1, dy_1, dz_1; \
490 Simd4Float mx, my, mz; \
491 Simd4Float m0x, m0y, m0z; \
493 Simd4Float d2x, d2y, d2z; \
494 Simd4Float d2s, d2t; \
496 shi = si*NNBSBB_D*DIM; \
498 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
499 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
500 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
501 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
502 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
503 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
505 dx_0 = xi_l - xj_h; \
506 dy_0 = yi_l - yj_h; \
507 dz_0 = zi_l - zj_h; \
509 dx_1 = xj_l - xi_h; \
510 dy_1 = yj_l - yi_h; \
511 dz_1 = zj_l - zi_h; \
513 mx = max(dx_0, dx_1); \
514 my = max(dy_0, dy_1); \
515 mz = max(dz_0, dz_1); \
517 m0x = max(mx, zero); \
518 m0y = max(my, zero); \
519 m0z = max(mz, zero); \
528 store4(d2+si, d2t); \
531 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
532 static void subc_bb_dist2_simd4_xxxx(const float *bb_j
,
533 int nsi
, const float *bb_i
,
536 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
539 Simd4Float xj_l
, yj_l
, zj_l
;
540 Simd4Float xj_h
, yj_h
, zj_h
;
541 Simd4Float xi_l
, yi_l
, zi_l
;
542 Simd4Float xi_h
, yi_h
, zi_h
;
548 xj_l
= Simd4Float(bb_j
[0*STRIDE_PBB
]);
549 yj_l
= Simd4Float(bb_j
[1*STRIDE_PBB
]);
550 zj_l
= Simd4Float(bb_j
[2*STRIDE_PBB
]);
551 xj_h
= Simd4Float(bb_j
[3*STRIDE_PBB
]);
552 yj_h
= Simd4Float(bb_j
[4*STRIDE_PBB
]);
553 zj_h
= Simd4Float(bb_j
[5*STRIDE_PBB
]);
555 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
556 * But as we know the number of iterations is 1 or 2, we unroll manually.
558 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i
, d2
);
559 if (STRIDE_PBB
< nsi
)
561 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB
, bb_i
, d2
);
565 #endif /* NBNXN_SEARCH_BB_SIMD4 */
568 /* Returns if any atom pair from two clusters is within distance sqrt(rl2) */
569 static gmx_inline gmx_bool
570 clusterpair_in_range(const nbnxn_list_work_t
*work
,
572 int csj
, int stride
, const real
*x_j
,
575 #if !GMX_SIMD4_HAVE_REAL
578 * All coordinates are stored as xyzxyz...
581 const real
*x_i
= work
->x_ci
;
583 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
++)
585 int i0
= (si
*c_nbnxnGpuClusterSize
+ i
)*DIM
;
586 for (int j
= 0; j
< c_nbnxnGpuClusterSize
; j
++)
588 int j0
= (csj
*c_nbnxnGpuClusterSize
+ j
)*stride
;
590 real d2
= gmx::square(x_i
[i0
] - x_j
[j0
]) + gmx::square(x_i
[i0
+1] - x_j
[j0
+1]) + gmx::square(x_i
[i0
+2] - x_j
[j0
+2]);
601 #else /* !GMX_SIMD4_HAVE_REAL */
603 /* 4-wide SIMD version.
604 * A cluster is hard-coded to 8 atoms.
605 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
606 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
608 assert(c_nbnxnGpuClusterSize
== 8);
610 Simd4Real rc2_S
= Simd4Real(rl2
);
612 const real
*x_i
= work
->x_ci_simd
;
614 int dim_stride
= c_nbnxnGpuClusterSize
*DIM
;
615 Simd4Real ix_S0
= load4(x_i
+ si
*dim_stride
+ 0*GMX_SIMD4_WIDTH
);
616 Simd4Real iy_S0
= load4(x_i
+ si
*dim_stride
+ 1*GMX_SIMD4_WIDTH
);
617 Simd4Real iz_S0
= load4(x_i
+ si
*dim_stride
+ 2*GMX_SIMD4_WIDTH
);
618 Simd4Real ix_S1
= load4(x_i
+ si
*dim_stride
+ 3*GMX_SIMD4_WIDTH
);
619 Simd4Real iy_S1
= load4(x_i
+ si
*dim_stride
+ 4*GMX_SIMD4_WIDTH
);
620 Simd4Real iz_S1
= load4(x_i
+ si
*dim_stride
+ 5*GMX_SIMD4_WIDTH
);
622 /* We loop from the outer to the inner particles to maximize
623 * the chance that we find a pair in range quickly and return.
625 int j0
= csj
*c_nbnxnGpuClusterSize
;
626 int j1
= j0
+ c_nbnxnGpuClusterSize
- 1;
629 Simd4Real jx0_S
, jy0_S
, jz0_S
;
630 Simd4Real jx1_S
, jy1_S
, jz1_S
;
632 Simd4Real dx_S0
, dy_S0
, dz_S0
;
633 Simd4Real dx_S1
, dy_S1
, dz_S1
;
634 Simd4Real dx_S2
, dy_S2
, dz_S2
;
635 Simd4Real dx_S3
, dy_S3
, dz_S3
;
646 Simd4Bool wco_any_S01
, wco_any_S23
, wco_any_S
;
648 jx0_S
= Simd4Real(x_j
[j0
*stride
+0]);
649 jy0_S
= Simd4Real(x_j
[j0
*stride
+1]);
650 jz0_S
= Simd4Real(x_j
[j0
*stride
+2]);
652 jx1_S
= Simd4Real(x_j
[j1
*stride
+0]);
653 jy1_S
= Simd4Real(x_j
[j1
*stride
+1]);
654 jz1_S
= Simd4Real(x_j
[j1
*stride
+2]);
656 /* Calculate distance */
657 dx_S0
= ix_S0
- jx0_S
;
658 dy_S0
= iy_S0
- jy0_S
;
659 dz_S0
= iz_S0
- jz0_S
;
660 dx_S1
= ix_S1
- jx0_S
;
661 dy_S1
= iy_S1
- jy0_S
;
662 dz_S1
= iz_S1
- jz0_S
;
663 dx_S2
= ix_S0
- jx1_S
;
664 dy_S2
= iy_S0
- jy1_S
;
665 dz_S2
= iz_S0
- jz1_S
;
666 dx_S3
= ix_S1
- jx1_S
;
667 dy_S3
= iy_S1
- jy1_S
;
668 dz_S3
= iz_S1
- jz1_S
;
670 /* rsq = dx*dx+dy*dy+dz*dz */
671 rsq_S0
= norm2(dx_S0
, dy_S0
, dz_S0
);
672 rsq_S1
= norm2(dx_S1
, dy_S1
, dz_S1
);
673 rsq_S2
= norm2(dx_S2
, dy_S2
, dz_S2
);
674 rsq_S3
= norm2(dx_S3
, dy_S3
, dz_S3
);
676 wco_S0
= (rsq_S0
< rc2_S
);
677 wco_S1
= (rsq_S1
< rc2_S
);
678 wco_S2
= (rsq_S2
< rc2_S
);
679 wco_S3
= (rsq_S3
< rc2_S
);
681 wco_any_S01
= wco_S0
|| wco_S1
;
682 wco_any_S23
= wco_S2
|| wco_S3
;
683 wco_any_S
= wco_any_S01
|| wco_any_S23
;
685 if (anyTrue(wco_any_S
))
696 #endif /* !GMX_SIMD4_HAVE_REAL */
699 /* Returns the j sub-cell for index cj_ind */
700 static int nbl_cj(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
702 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].cj
[cj_ind
& (c_nbnxnGpuJgroupSize
- 1)];
705 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
706 static unsigned int nbl_imask0(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
708 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].imei
[0].imask
;
711 /* Ensures there is enough space for extra extra exclusion masks */
712 static void check_excl_space(nbnxn_pairlist_t
*nbl
, int extra
)
714 if (nbl
->nexcl
+extra
> nbl
->excl_nalloc
)
716 nbl
->excl_nalloc
= over_alloc_small(nbl
->nexcl
+extra
);
717 nbnxn_realloc_void((void **)&nbl
->excl
,
718 nbl
->nexcl
*sizeof(*nbl
->excl
),
719 nbl
->excl_nalloc
*sizeof(*nbl
->excl
),
720 nbl
->alloc
, nbl
->free
);
724 /* Ensures there is enough space for ncell extra j-cells in the list */
725 static void check_cell_list_space_simple(nbnxn_pairlist_t
*nbl
,
730 cj_max
= nbl
->ncj
+ ncell
;
732 if (cj_max
> nbl
->cj_nalloc
)
734 nbl
->cj_nalloc
= over_alloc_small(cj_max
);
735 nbnxn_realloc_void((void **)&nbl
->cj
,
736 nbl
->ncj
*sizeof(*nbl
->cj
),
737 nbl
->cj_nalloc
*sizeof(*nbl
->cj
),
738 nbl
->alloc
, nbl
->free
);
742 /* Ensures there is enough space for ncell extra j-clusters in the list */
743 static void check_cell_list_space_supersub(nbnxn_pairlist_t
*nbl
,
748 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
749 /* We can store 4 j-subcell - i-supercell pairs in one struct.
750 * since we round down, we need one extra entry.
752 ncj4_max
= ((nbl
->work
->cj_ind
+ ncell
*c_gpuNumClusterPerCell
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
);
754 if (ncj4_max
> nbl
->cj4_nalloc
)
756 nbl
->cj4_nalloc
= over_alloc_small(ncj4_max
);
757 nbnxn_realloc_void((void **)&nbl
->cj4
,
758 nbl
->work
->cj4_init
*sizeof(*nbl
->cj4
),
759 nbl
->cj4_nalloc
*sizeof(*nbl
->cj4
),
760 nbl
->alloc
, nbl
->free
);
763 if (ncj4_max
> nbl
->work
->cj4_init
)
765 for (int j4
= nbl
->work
->cj4_init
; j4
< ncj4_max
; j4
++)
767 /* No i-subcells and no excl's in the list initially */
768 for (w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
770 nbl
->cj4
[j4
].imei
[w
].imask
= 0U;
771 nbl
->cj4
[j4
].imei
[w
].excl_ind
= 0;
775 nbl
->work
->cj4_init
= ncj4_max
;
779 /* Set all excl masks for one GPU warp no exclusions */
780 static void set_no_excls(nbnxn_excl_t
*excl
)
782 for (int t
= 0; t
< c_nbnxnGpuExclSize
; t
++)
784 /* Turn all interaction bits on */
785 excl
->pair
[t
] = NBNXN_INTERACTION_MASK_ALL
;
789 /* Initializes a single nbnxn_pairlist_t data structure */
790 static void nbnxn_init_pairlist(nbnxn_pairlist_t
*nbl
,
792 nbnxn_alloc_t
*alloc
,
797 nbl
->alloc
= nbnxn_alloc_aligned
;
805 nbl
->free
= nbnxn_free_aligned
;
812 nbl
->bSimple
= bSimple
;
826 /* We need one element extra in sj, so alloc initially with 1 */
833 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster
== c_gpuNumClusterPerCell
, "The search code assumes that the a super-cluster matches a search grid cell");
835 GMX_ASSERT(sizeof(nbl
->cj4
[0].imei
[0].imask
)*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
836 GMX_ASSERT(sizeof(nbl
->excl
[0])*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The GPU exclusion mask does not contain a sufficient number of bits");
839 nbl
->excl_nalloc
= 0;
841 check_excl_space(nbl
, 1);
843 set_no_excls(&nbl
->excl
[0]);
849 snew_aligned(nbl
->work
->bb_ci
, 1, NBNXN_SEARCH_BB_MEM_ALIGN
);
854 snew_aligned(nbl
->work
->pbb_ci
, c_gpuNumClusterPerCell
/STRIDE_PBB
*NNBSBB_XXXX
, NBNXN_SEARCH_BB_MEM_ALIGN
);
856 snew_aligned(nbl
->work
->bb_ci
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
859 int gpu_clusterpair_nc
= c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
*DIM
;
860 snew(nbl
->work
->x_ci
, gpu_clusterpair_nc
);
862 snew_aligned(nbl
->work
->x_ci_simd
,
863 std::max(NBNXN_CPU_CLUSTER_I_SIZE
*DIM
*GMX_SIMD_REAL_WIDTH
,
865 GMX_SIMD_REAL_WIDTH
);
867 snew_aligned(nbl
->work
->d2
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
869 nbl
->work
->sort
= NULL
;
870 nbl
->work
->sort_nalloc
= 0;
871 nbl
->work
->sci_sort
= NULL
;
872 nbl
->work
->sci_sort_nalloc
= 0;
875 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t
*nbl_list
,
876 gmx_bool bSimple
, gmx_bool bCombined
,
877 nbnxn_alloc_t
*alloc
,
880 nbl_list
->bSimple
= bSimple
;
881 nbl_list
->bCombined
= bCombined
;
883 nbl_list
->nnbl
= gmx_omp_nthreads_get(emntNonbonded
);
885 if (!nbl_list
->bCombined
&&
886 nbl_list
->nnbl
> NBNXN_BUFFERFLAG_MAX_THREADS
)
888 gmx_fatal(FARGS
, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
889 nbl_list
->nnbl
, NBNXN_BUFFERFLAG_MAX_THREADS
, NBNXN_BUFFERFLAG_MAX_THREADS
);
892 snew(nbl_list
->nbl
, nbl_list
->nnbl
);
893 snew(nbl_list
->nbl_fep
, nbl_list
->nnbl
);
894 /* Execute in order to avoid memory interleaving between threads */
895 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
896 for (int i
= 0; i
< nbl_list
->nnbl
; i
++)
900 /* Allocate the nblist data structure locally on each thread
901 * to optimize memory access for NUMA architectures.
903 snew(nbl_list
->nbl
[i
], 1);
905 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
908 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, alloc
, free
);
912 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, NULL
, NULL
);
915 snew(nbl_list
->nbl_fep
[i
], 1);
916 nbnxn_init_pairlist_fep(nbl_list
->nbl_fep
[i
]);
918 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
922 /* Print statistics of a pair list, used for debug output */
923 static void print_nblist_statistics_simple(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
924 const nbnxn_search_t nbs
, real rl
)
926 const nbnxn_grid_t
*grid
;
930 /* This code only produces correct statistics with domain decomposition */
931 grid
= &nbs
->grid
[0];
933 fprintf(fp
, "nbl nci %d ncj %d\n",
935 fprintf(fp
, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
936 nbl
->na_sc
, rl
, nbl
->ncj
, nbl
->ncj
/(double)grid
->nc
,
937 nbl
->ncj
/(double)grid
->nc
*grid
->na_sc
,
938 nbl
->ncj
/(double)grid
->nc
*grid
->na_sc
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nc
*grid
->na_sc
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
940 fprintf(fp
, "nbl average j cell list length %.1f\n",
941 0.25*nbl
->ncj
/(double)std::max(nbl
->nci
, 1));
943 for (int s
= 0; s
< SHIFTS
; s
++)
948 for (int i
= 0; i
< nbl
->nci
; i
++)
950 cs
[nbl
->ci
[i
].shift
& NBNXN_CI_SHIFT
] +=
951 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
;
953 int j
= nbl
->ci
[i
].cj_ind_start
;
954 while (j
< nbl
->ci
[i
].cj_ind_end
&&
955 nbl
->cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
961 fprintf(fp
, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
962 nbl
->ncj
, npexcl
, 100*npexcl
/(double)std::max(nbl
->ncj
, 1));
963 for (int s
= 0; s
< SHIFTS
; s
++)
967 fprintf(fp
, "nbl shift %2d ncj %3d\n", s
, cs
[s
]);
972 /* Print statistics of a pair lists, used for debug output */
973 static void print_nblist_statistics_supersub(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
974 const nbnxn_search_t nbs
, real rl
)
976 const nbnxn_grid_t
*grid
;
978 int c
[c_gpuNumClusterPerCell
+ 1];
979 double sum_nsp
, sum_nsp2
;
982 /* This code only produces correct statistics with domain decomposition */
983 grid
= &nbs
->grid
[0];
985 fprintf(fp
, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
986 nbl
->nsci
, nbl
->ncj4
, nbl
->nci_tot
, nbl
->nexcl
);
987 fprintf(fp
, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
988 nbl
->na_ci
, rl
, nbl
->nci_tot
, nbl
->nci_tot
/(double)grid
->nsubc_tot
,
989 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
,
990 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nsubc_tot
*grid
->na_c
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
995 for (int si
= 0; si
<= c_gpuNumClusterPerCell
; si
++)
999 for (int i
= 0; i
< nbl
->nsci
; i
++)
1004 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
1006 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
1009 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
1011 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
1021 sum_nsp2
+= nsp
*nsp
;
1022 nsp_max
= std::max(nsp_max
, nsp
);
1026 sum_nsp
/= nbl
->nsci
;
1027 sum_nsp2
/= nbl
->nsci
;
1029 fprintf(fp
, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1030 sum_nsp
, std::sqrt(sum_nsp2
- sum_nsp
*sum_nsp
), nsp_max
);
1034 for (b
= 0; b
<= c_gpuNumClusterPerCell
; b
++)
1036 fprintf(fp
, "nbl j-list #i-subcell %d %7d %4.1f\n",
1038 100.0*c
[b
]/(double)(nbl
->ncj4
*c_nbnxnGpuJgroupSize
));
1043 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1044 static void low_get_nbl_exclusions(nbnxn_pairlist_t
*nbl
, int cj4
,
1045 int warp
, nbnxn_excl_t
**excl
)
1047 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1049 /* No exclusions set, make a new list entry */
1050 nbl
->cj4
[cj4
].imei
[warp
].excl_ind
= nbl
->nexcl
;
1052 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1053 set_no_excls(*excl
);
1057 /* We already have some exclusions, new ones can be added to the list */
1058 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1062 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1063 * generates a new element and allocates extra memory, if necessary.
1065 static void get_nbl_exclusions_1(nbnxn_pairlist_t
*nbl
, int cj4
,
1066 int warp
, nbnxn_excl_t
**excl
)
1068 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1070 /* We need to make a new list entry, check if we have space */
1071 check_excl_space(nbl
, 1);
1073 low_get_nbl_exclusions(nbl
, cj4
, warp
, excl
);
1076 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1077 * generates a new element and allocates extra memory, if necessary.
1079 static void get_nbl_exclusions_2(nbnxn_pairlist_t
*nbl
, int cj4
,
1080 nbnxn_excl_t
**excl_w0
,
1081 nbnxn_excl_t
**excl_w1
)
1083 /* Check for space we might need */
1084 check_excl_space(nbl
, 2);
1086 low_get_nbl_exclusions(nbl
, cj4
, 0, excl_w0
);
1087 low_get_nbl_exclusions(nbl
, cj4
, 1, excl_w1
);
1090 /* Sets the self exclusions i=j and pair exclusions i>j */
1091 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t
*nbl
,
1092 int cj4_ind
, int sj_offset
,
1093 int i_cluster_in_cell
)
1095 nbnxn_excl_t
*excl
[c_nbnxnGpuClusterpairSplit
];
1097 /* Here we only set the set self and double pair exclusions */
1099 assert(c_nbnxnGpuClusterpairSplit
== 2);
1101 get_nbl_exclusions_2(nbl
, cj4_ind
, &excl
[0], &excl
[1]);
1103 /* Only minor < major bits set */
1104 for (int ej
= 0; ej
< nbl
->na_ci
; ej
++)
1107 for (int ei
= ej
; ei
< nbl
->na_ci
; ei
++)
1109 excl
[w
]->pair
[(ej
& (c_nbnxnGpuJgroupSize
-1))*nbl
->na_ci
+ ei
] &=
1110 ~(1U << (sj_offset
*c_gpuNumClusterPerCell
+ i_cluster_in_cell
));
1115 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1116 static unsigned int get_imask(gmx_bool rdiag
, int ci
, int cj
)
1118 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1121 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1122 static unsigned int get_imask_simd_j2(gmx_bool rdiag
, int ci
, int cj
)
1124 return (rdiag
&& ci
*2 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_0
:
1125 (rdiag
&& ci
*2+1 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_1
:
1126 NBNXN_INTERACTION_MASK_ALL
));
1129 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1130 static unsigned int get_imask_simd_j4(gmx_bool rdiag
, int ci
, int cj
)
1132 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1135 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1136 static unsigned int get_imask_simd_j8(gmx_bool rdiag
, int ci
, int cj
)
1138 return (rdiag
&& ci
== cj
*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
:
1139 (rdiag
&& ci
== cj
*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
:
1140 NBNXN_INTERACTION_MASK_ALL
));
1144 #if GMX_SIMD_REAL_WIDTH == 2
1145 #define get_imask_simd_4xn get_imask_simd_j2
1147 #if GMX_SIMD_REAL_WIDTH == 4
1148 #define get_imask_simd_4xn get_imask_simd_j4
1150 #if GMX_SIMD_REAL_WIDTH == 8
1151 #define get_imask_simd_4xn get_imask_simd_j8
1152 #define get_imask_simd_2xnn get_imask_simd_j4
1154 #if GMX_SIMD_REAL_WIDTH == 16
1155 #define get_imask_simd_2xnn get_imask_simd_j8
1159 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
1160 * Checks bounding box distances and possibly atom pair distances.
1162 static void make_cluster_list_simple(const nbnxn_grid_t
*gridj
,
1163 nbnxn_pairlist_t
*nbl
,
1164 int ci
, int cjf
, int cjl
,
1165 gmx_bool remove_sub_diag
,
1167 real rl2
, float rbb2
,
1170 const nbnxn_bb_t
*bb_ci
;
1177 bb_ci
= nbl
->work
->bb_ci
;
1178 x_ci
= nbl
->work
->x_ci
;
1181 while (!InRange
&& cjf
<= cjl
)
1183 d2
= subc_bb_dist2(0, bb_ci
, cjf
, gridj
->bb
);
1186 /* Check if the distance is within the distance where
1187 * we use only the bounding box distance rbb,
1188 * or within the cut-off and there is at least one atom pair
1189 * within the cut-off.
1197 cjf_gl
= gridj
->cell0
+ cjf
;
1198 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1200 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1202 InRange
= InRange
||
1203 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1204 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1205 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1208 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1221 while (!InRange
&& cjl
> cjf
)
1223 d2
= subc_bb_dist2(0, bb_ci
, cjl
, gridj
->bb
);
1226 /* Check if the distance is within the distance where
1227 * we use only the bounding box distance rbb,
1228 * or within the cut-off and there is at least one atom pair
1229 * within the cut-off.
1237 cjl_gl
= gridj
->cell0
+ cjl
;
1238 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1240 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1242 InRange
= InRange
||
1243 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1244 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1245 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1248 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1258 for (int cj
= cjf
; cj
<= cjl
; cj
++)
1260 /* Store cj and the interaction mask */
1261 nbl
->cj
[nbl
->ncj
].cj
= gridj
->cell0
+ cj
;
1262 nbl
->cj
[nbl
->ncj
].excl
= get_imask(remove_sub_diag
, ci
, cj
);
1265 /* Increase the closing index in i super-cell list */
1266 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
1270 #ifdef GMX_NBNXN_SIMD_4XN
1271 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1273 #ifdef GMX_NBNXN_SIMD_2XNN
1274 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1277 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1278 * Checks bounding box distances and possibly atom pair distances.
1280 static void make_cluster_list_supersub(const nbnxn_grid_t
*gridi
,
1281 const nbnxn_grid_t
*gridj
,
1282 nbnxn_pairlist_t
*nbl
,
1284 gmx_bool sci_equals_scj
,
1285 int stride
, const real
*x
,
1286 real rl2
, float rbb2
,
1289 nbnxn_list_work_t
*work
= nbl
->work
;
1292 const float *pbb_ci
= work
->pbb_ci
;
1294 const nbnxn_bb_t
*bb_ci
= work
->bb_ci
;
1297 assert(c_nbnxnGpuClusterSize
== gridi
->na_c
);
1298 assert(c_nbnxnGpuClusterSize
== gridj
->na_c
);
1300 /* We generate the pairlist mainly based on bounding-box distances
1301 * and do atom pair distance based pruning on the GPU.
1302 * Only if a j-group contains a single cluster-pair, we try to prune
1303 * that pair based on atom distances on the CPU to avoid empty j-groups.
1305 #define PRUNE_LIST_CPU_ONE 1
1306 #define PRUNE_LIST_CPU_ALL 0
1308 #if PRUNE_LIST_CPU_ONE
1312 float *d2l
= work
->d2
;
1314 for (int subc
= 0; subc
< gridj
->nsubc
[scj
]; subc
++)
1316 int cj4_ind
= nbl
->work
->cj_ind
/c_nbnxnGpuJgroupSize
;
1317 int cj_offset
= nbl
->work
->cj_ind
- cj4_ind
*c_nbnxnGpuJgroupSize
;
1318 nbnxn_cj4_t
*cj4
= &nbl
->cj4
[cj4_ind
];
1320 int cj
= scj
*c_gpuNumClusterPerCell
+ subc
;
1322 int cj_gl
= gridj
->cell0
*c_gpuNumClusterPerCell
+ cj
;
1324 /* Initialize this j-subcell i-subcell list */
1325 cj4
->cj
[cj_offset
] = cj_gl
;
1334 ci1
= gridi
->nsubc
[sci
];
1338 /* Determine all ci1 bb distances in one call with SIMD4 */
1339 subc_bb_dist2_simd4_xxxx(gridj
->pbb
+(cj
>>STRIDE_PBB_2LOG
)*NNBSBB_XXXX
+(cj
& (STRIDE_PBB
-1)),
1341 *ndistc
+= c_nbnxnGpuClusterSize
*2;
1345 unsigned int imask
= 0;
1346 /* We use a fixed upper-bound instead of ci1 to help optimization */
1347 for (int ci
= 0; ci
< c_gpuNumClusterPerCell
; ci
++)
1355 /* Determine the bb distance between ci and cj */
1356 d2l
[ci
] = subc_bb_dist2(ci
, bb_ci
, cj
, gridj
->bb
);
1361 #if PRUNE_LIST_CPU_ALL
1362 /* Check if the distance is within the distance where
1363 * we use only the bounding box distance rbb,
1364 * or within the cut-off and there is at least one atom pair
1365 * within the cut-off. This check is very costly.
1367 *ndistc
+= c_nbnxnGpuClusterSize
*c_nbnxnGpuClusterSize
;
1370 clusterpair_in_range(work
, ci
, cj_gl
, stride
, x
, rl2
)))
1372 /* Check if the distance between the two bounding boxes
1373 * in within the pair-list cut-off.
1378 /* Flag this i-subcell to be taken into account */
1379 imask
|= (1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci
));
1381 #if PRUNE_LIST_CPU_ONE
1389 #if PRUNE_LIST_CPU_ONE
1390 /* If we only found 1 pair, check if any atoms are actually
1391 * within the cut-off, so we could get rid of it.
1393 if (npair
== 1 && d2l
[ci_last
] >= rbb2
&&
1394 !clusterpair_in_range(work
, ci_last
, cj_gl
, stride
, x
, rl2
))
1396 imask
&= ~(1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci_last
));
1403 /* We have a useful sj entry, close it now */
1405 /* Set the exclucions for the ci== sj entry.
1406 * Here we don't bother to check if this entry is actually flagged,
1407 * as it will nearly always be in the list.
1411 set_self_and_newton_excls_supersub(nbl
, cj4_ind
, cj_offset
, subc
);
1414 /* Copy the cluster interaction mask to the list */
1415 for (int w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
1417 cj4
->imei
[w
].imask
|= imask
;
1420 nbl
->work
->cj_ind
++;
1422 /* Keep the count */
1423 nbl
->nci_tot
+= npair
;
1425 /* Increase the closing index in i super-cell list */
1426 nbl
->sci
[nbl
->nsci
].cj4_ind_end
=
1427 (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
1432 /* Set all atom-pair exclusions from the topology stored in excl
1433 * as masks in the pair-list for simple list i-entry nbl_ci
1435 static void set_ci_top_excls(const nbnxn_search_t nbs
,
1436 nbnxn_pairlist_t
*nbl
,
1437 gmx_bool diagRemoved
,
1440 const nbnxn_ci_t
*nbl_ci
,
1441 const t_blocka
*excl
)
1445 int cj_ind_first
, cj_ind_last
;
1446 int cj_first
, cj_last
;
1448 int ai
, aj
, si
, ge
, se
;
1449 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
1451 int inner_i
, inner_e
;
1455 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1463 cj_ind_first
= nbl_ci
->cj_ind_start
;
1464 cj_ind_last
= nbl
->ncj
- 1;
1466 cj_first
= nbl
->cj
[cj_ind_first
].cj
;
1467 cj_last
= nbl
->cj
[cj_ind_last
].cj
;
1469 /* Determine how many contiguous j-cells we have starting
1470 * from the first i-cell. This number can be used to directly
1471 * calculate j-cell indices for excluded atoms.
1474 if (na_ci_2log
== na_cj_2log
)
1476 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1477 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci
+ ndirect
)
1482 #if NBNXN_SEARCH_BB_SIMD4
1485 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1486 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci_to_cj(ci
, na_cj_2log
) + ndirect
)
1493 /* Loop over the atoms in the i super-cell */
1494 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1496 ai
= nbs
->a
[ci
*nbl
->na_sc
+i
];
1499 si
= (i
>>na_ci_2log
);
1501 /* Loop over the topology-based exclusions for this i-atom */
1502 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
1508 /* The self exclusion are already set, save some time */
1514 /* Without shifts we only calculate interactions j>i
1515 * for one-way pair-lists.
1517 if (diagRemoved
&& ge
<= ci
*nbl
->na_sc
+ i
)
1522 se
= (ge
>> na_cj_2log
);
1524 /* Could the cluster se be in our list? */
1525 if (se
>= cj_first
&& se
<= cj_last
)
1527 if (se
< cj_first
+ ndirect
)
1529 /* We can calculate cj_ind directly from se */
1530 found
= cj_ind_first
+ se
- cj_first
;
1534 /* Search for se using bisection */
1536 cj_ind_0
= cj_ind_first
+ ndirect
;
1537 cj_ind_1
= cj_ind_last
+ 1;
1538 while (found
== -1 && cj_ind_0
< cj_ind_1
)
1540 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
1542 cj_m
= nbl
->cj
[cj_ind_m
].cj
;
1550 cj_ind_1
= cj_ind_m
;
1554 cj_ind_0
= cj_ind_m
+ 1;
1561 inner_i
= i
- (si
<< na_ci_2log
);
1562 inner_e
= ge
- (se
<< na_cj_2log
);
1564 nbl
->cj
[found
].excl
&= ~(1U<<((inner_i
<<na_cj_2log
) + inner_e
));
1572 /* Add a new i-entry to the FEP list and copy the i-properties */
1573 static gmx_inline
void fep_list_new_nri_copy(t_nblist
*nlist
)
1575 /* Add a new i-entry */
1578 assert(nlist
->nri
< nlist
->maxnri
);
1580 /* Duplicate the last i-entry, except for jindex, which continues */
1581 nlist
->iinr
[nlist
->nri
] = nlist
->iinr
[nlist
->nri
-1];
1582 nlist
->shift
[nlist
->nri
] = nlist
->shift
[nlist
->nri
-1];
1583 nlist
->gid
[nlist
->nri
] = nlist
->gid
[nlist
->nri
-1];
1584 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1587 /* For load balancing of the free-energy lists over threads, we set
1588 * the maximum nrj size of an i-entry to 40. This leads to good
1589 * load balancing in the worst case scenario of a single perturbed
1590 * particle on 16 threads, while not introducing significant overhead.
1591 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1592 * since non perturbed i-particles will see few perturbed j-particles).
1594 const int max_nrj_fep
= 40;
1596 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1597 * singularities for overlapping particles (0/0), since the charges and
1598 * LJ parameters have been zeroed in the nbnxn data structure.
1599 * Simultaneously make a group pair list for the perturbed pairs.
1601 static void make_fep_list(const nbnxn_search_t nbs
,
1602 const nbnxn_atomdata_t
*nbat
,
1603 nbnxn_pairlist_t
*nbl
,
1604 gmx_bool bDiagRemoved
,
1606 const nbnxn_grid_t
*gridi
,
1607 const nbnxn_grid_t
*gridj
,
1610 int ci
, cj_ind_start
, cj_ind_end
, cja
, cjr
;
1612 int ngid
, gid_i
= 0, gid_j
, gid
;
1613 int egp_shift
, egp_mask
;
1615 int ind_i
, ind_j
, ai
, aj
;
1617 gmx_bool bFEP_i
, bFEP_i_all
;
1619 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1627 cj_ind_start
= nbl_ci
->cj_ind_start
;
1628 cj_ind_end
= nbl_ci
->cj_ind_end
;
1630 /* In worst case we have alternating energy groups
1631 * and create #atom-pair lists, which means we need the size
1632 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1634 nri_max
= nbl
->na_ci
*nbl
->na_cj
*(cj_ind_end
- cj_ind_start
);
1635 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1637 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1638 reallocate_nblist(nlist
);
1641 ngid
= nbat
->nenergrp
;
1643 if (static_cast<std::size_t>(ngid
*gridj
->na_cj
) > sizeof(gid_cj
)*8)
1645 gmx_fatal(FARGS
, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1646 gridi
->na_c
, gridj
->na_cj
, (sizeof(gid_cj
)*8)/gridj
->na_cj
);
1649 egp_shift
= nbat
->neg_2log
;
1650 egp_mask
= (1<<nbat
->neg_2log
) - 1;
1652 /* Loop over the atoms in the i sub-cell */
1654 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1656 ind_i
= ci
*nbl
->na_ci
+ i
;
1661 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1662 nlist
->iinr
[nri
] = ai
;
1663 /* The actual energy group pair index is set later */
1664 nlist
->gid
[nri
] = 0;
1665 nlist
->shift
[nri
] = nbl_ci
->shift
& NBNXN_CI_SHIFT
;
1667 bFEP_i
= gridi
->fep
[ci
- gridi
->cell0
] & (1 << i
);
1669 bFEP_i_all
= bFEP_i_all
&& bFEP_i
;
1671 if ((nlist
->nrj
+ cj_ind_end
- cj_ind_start
)*nbl
->na_cj
> nlist
->maxnrj
)
1673 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj_ind_end
- cj_ind_start
)*nbl
->na_cj
);
1674 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1675 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1680 gid_i
= (nbat
->energrp
[ci
] >> (egp_shift
*i
)) & egp_mask
;
1683 for (int cj_ind
= cj_ind_start
; cj_ind
< cj_ind_end
; cj_ind
++)
1685 unsigned int fep_cj
;
1687 cja
= nbl
->cj
[cj_ind
].cj
;
1689 if (gridj
->na_cj
== gridj
->na_c
)
1691 cjr
= cja
- gridj
->cell0
;
1692 fep_cj
= gridj
->fep
[cjr
];
1695 gid_cj
= nbat
->energrp
[cja
];
1698 else if (2*gridj
->na_cj
== gridj
->na_c
)
1700 cjr
= cja
- gridj
->cell0
*2;
1701 /* Extract half of the ci fep/energrp mask */
1702 fep_cj
= (gridj
->fep
[cjr
>>1] >> ((cjr
&1)*gridj
->na_cj
)) & ((1<<gridj
->na_cj
) - 1);
1705 gid_cj
= nbat
->energrp
[cja
>>1] >> ((cja
&1)*gridj
->na_cj
*egp_shift
) & ((1<<(gridj
->na_cj
*egp_shift
)) - 1);
1710 cjr
= cja
- (gridj
->cell0
>>1);
1711 /* Combine two ci fep masks/energrp */
1712 fep_cj
= gridj
->fep
[cjr
*2] + (gridj
->fep
[cjr
*2+1] << gridj
->na_c
);
1715 gid_cj
= nbat
->energrp
[cja
*2] + (nbat
->energrp
[cja
*2+1] << (gridj
->na_c
*egp_shift
));
1719 if (bFEP_i
|| fep_cj
!= 0)
1721 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1723 /* Is this interaction perturbed and not excluded? */
1724 ind_j
= cja
*nbl
->na_cj
+ j
;
1727 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1728 (!bDiagRemoved
|| ind_j
>= ind_i
))
1732 gid_j
= (gid_cj
>> (j
*egp_shift
)) & egp_mask
;
1733 gid
= GID(gid_i
, gid_j
, ngid
);
1735 if (nlist
->nrj
> nlist
->jindex
[nri
] &&
1736 nlist
->gid
[nri
] != gid
)
1738 /* Energy group pair changed: new list */
1739 fep_list_new_nri_copy(nlist
);
1742 nlist
->gid
[nri
] = gid
;
1745 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1747 fep_list_new_nri_copy(nlist
);
1751 /* Add it to the FEP list */
1752 nlist
->jjnr
[nlist
->nrj
] = aj
;
1753 nlist
->excl_fep
[nlist
->nrj
] = (nbl
->cj
[cj_ind
].excl
>> (i
*nbl
->na_cj
+ j
)) & 1;
1756 /* Exclude it from the normal list.
1757 * Note that the charge has been set to zero,
1758 * but we need to avoid 0/0, as perturbed atoms
1759 * can be on top of each other.
1761 nbl
->cj
[cj_ind
].excl
&= ~(1U << (i
*nbl
->na_cj
+ j
));
1767 if (nlist
->nrj
> nlist
->jindex
[nri
])
1769 /* Actually add this new, non-empty, list */
1771 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1778 /* All interactions are perturbed, we can skip this entry */
1779 nbl_ci
->cj_ind_end
= cj_ind_start
;
1783 /* Return the index of atom a within a cluster */
1784 static gmx_inline
int cj_mod_cj4(int cj
)
1786 return cj
& (c_nbnxnGpuJgroupSize
- 1);
1789 /* Convert a j-cluster to a cj4 group */
1790 static gmx_inline
int cj_to_cj4(int cj
)
1792 return cj
/c_nbnxnGpuJgroupSize
;
1795 /* Return the index of an j-atom within a warp */
1796 static gmx_inline
int a_mod_wj(int a
)
1798 return a
& (c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
- 1);
1801 /* As make_fep_list above, but for super/sub lists. */
1802 static void make_fep_list_supersub(const nbnxn_search_t nbs
,
1803 const nbnxn_atomdata_t
*nbat
,
1804 nbnxn_pairlist_t
*nbl
,
1805 gmx_bool bDiagRemoved
,
1806 const nbnxn_sci_t
*nbl_sci
,
1811 const nbnxn_grid_t
*gridi
,
1812 const nbnxn_grid_t
*gridj
,
1815 int sci
, cj4_ind_start
, cj4_ind_end
, cjr
;
1818 int ind_i
, ind_j
, ai
, aj
;
1822 const nbnxn_cj4_t
*cj4
;
1824 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1832 cj4_ind_start
= nbl_sci
->cj4_ind_start
;
1833 cj4_ind_end
= nbl_sci
->cj4_ind_end
;
1835 /* Here we process one super-cell, max #atoms na_sc, versus a list
1836 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1837 * of size na_cj atoms.
1838 * On the GPU we don't support energy groups (yet).
1839 * So for each of the na_sc i-atoms, we need max one FEP list
1840 * for each max_nrj_fep j-atoms.
1842 nri_max
= nbl
->na_sc
*nbl
->na_cj
*(1 + ((cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
)/max_nrj_fep
);
1843 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1845 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1846 reallocate_nblist(nlist
);
1849 /* Loop over the atoms in the i super-cluster */
1850 for (int c
= 0; c
< c_gpuNumClusterPerCell
; c
++)
1852 c_abs
= sci
*c_gpuNumClusterPerCell
+ c
;
1854 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1856 ind_i
= c_abs
*nbl
->na_ci
+ i
;
1861 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1862 nlist
->iinr
[nri
] = ai
;
1863 /* With GPUs, energy groups are not supported */
1864 nlist
->gid
[nri
] = 0;
1865 nlist
->shift
[nri
] = nbl_sci
->shift
& NBNXN_CI_SHIFT
;
1867 bFEP_i
= (gridi
->fep
[c_abs
- gridi
->cell0
*c_gpuNumClusterPerCell
] & (1 << i
));
1869 xi
= nbat
->x
[ind_i
*nbat
->xstride
+XX
] + shx
;
1870 yi
= nbat
->x
[ind_i
*nbat
->xstride
+YY
] + shy
;
1871 zi
= nbat
->x
[ind_i
*nbat
->xstride
+ZZ
] + shz
;
1873 if ((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
> nlist
->maxnrj
)
1875 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
);
1876 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1877 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1880 for (int cj4_ind
= cj4_ind_start
; cj4_ind
< cj4_ind_end
; cj4_ind
++)
1882 cj4
= &nbl
->cj4
[cj4_ind
];
1884 for (int gcj
= 0; gcj
< c_nbnxnGpuJgroupSize
; gcj
++)
1886 unsigned int fep_cj
;
1888 if ((cj4
->imei
[0].imask
& (1U << (gcj
*c_gpuNumClusterPerCell
+ c
))) == 0)
1890 /* Skip this ci for this cj */
1894 cjr
= cj4
->cj
[gcj
] - gridj
->cell0
*c_gpuNumClusterPerCell
;
1896 fep_cj
= gridj
->fep
[cjr
];
1898 if (bFEP_i
|| fep_cj
!= 0)
1900 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1902 /* Is this interaction perturbed and not excluded? */
1903 ind_j
= (gridj
->cell0
*c_gpuNumClusterPerCell
+ cjr
)*nbl
->na_cj
+ j
;
1906 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1907 (!bDiagRemoved
|| ind_j
>= ind_i
))
1911 unsigned int excl_bit
;
1914 get_nbl_exclusions_1(nbl
, cj4_ind
, j
>>2, &excl
);
1916 excl_pair
= a_mod_wj(j
)*nbl
->na_ci
+ i
;
1917 excl_bit
= (1U << (gcj
*c_gpuNumClusterPerCell
+ c
));
1919 dx
= nbat
->x
[ind_j
*nbat
->xstride
+XX
] - xi
;
1920 dy
= nbat
->x
[ind_j
*nbat
->xstride
+YY
] - yi
;
1921 dz
= nbat
->x
[ind_j
*nbat
->xstride
+ZZ
] - zi
;
1923 /* The unpruned GPU list has more than 2/3
1924 * of the atom pairs beyond rlist. Using
1925 * this list will cause a lot of overhead
1926 * in the CPU FEP kernels, especially
1927 * relative to the fast GPU kernels.
1928 * So we prune the FEP list here.
1930 if (dx
*dx
+ dy
*dy
+ dz
*dz
< rlist_fep2
)
1932 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1934 fep_list_new_nri_copy(nlist
);
1938 /* Add it to the FEP list */
1939 nlist
->jjnr
[nlist
->nrj
] = aj
;
1940 nlist
->excl_fep
[nlist
->nrj
] = (excl
->pair
[excl_pair
] & excl_bit
) ? 1 : 0;
1944 /* Exclude it from the normal list.
1945 * Note that the charge and LJ parameters have
1946 * been set to zero, but we need to avoid 0/0,
1947 * as perturbed atoms can be on top of each other.
1949 excl
->pair
[excl_pair
] &= ~excl_bit
;
1953 /* Note that we could mask out this pair in imask
1954 * if all i- and/or all j-particles are perturbed.
1955 * But since the perturbed pairs on the CPU will
1956 * take an order of magnitude more time, the GPU
1957 * will finish before the CPU and there is no gain.
1963 if (nlist
->nrj
> nlist
->jindex
[nri
])
1965 /* Actually add this new, non-empty, list */
1967 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1974 /* Set all atom-pair exclusions from the topology stored in excl
1975 * as masks in the pair-list for i-super-cell entry nbl_sci
1977 static void set_sci_top_excls(const nbnxn_search_t nbs
,
1978 nbnxn_pairlist_t
*nbl
,
1979 gmx_bool diagRemoved
,
1981 const nbnxn_sci_t
*nbl_sci
,
1982 const t_blocka
*excl
)
1987 int cj_ind_first
, cj_ind_last
;
1988 int cj_first
, cj_last
;
1990 int ai
, aj
, si
, ge
, se
;
1991 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
1993 nbnxn_excl_t
*nbl_excl
;
1994 int inner_i
, inner_e
, w
;
2000 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
2008 cj_ind_first
= nbl_sci
->cj4_ind_start
*c_nbnxnGpuJgroupSize
;
2009 cj_ind_last
= nbl
->work
->cj_ind
- 1;
2011 cj_first
= nbl
->cj4
[nbl_sci
->cj4_ind_start
].cj
[0];
2012 cj_last
= nbl_cj(nbl
, cj_ind_last
);
2014 /* Determine how many contiguous j-clusters we have starting
2015 * from the first i-cluster. This number can be used to directly
2016 * calculate j-cluster indices for excluded atoms.
2019 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
2020 nbl_cj(nbl
, cj_ind_first
+ndirect
) == sci
*c_gpuNumClusterPerCell
+ ndirect
)
2025 /* Loop over the atoms in the i super-cell */
2026 for (int i
= 0; i
< nbl
->na_sc
; i
++)
2028 ai
= nbs
->a
[sci
*nbl
->na_sc
+i
];
2031 si
= (i
>>na_c_2log
);
2033 /* Loop over the topology-based exclusions for this i-atom */
2034 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
2040 /* The self exclusion are already set, save some time */
2046 /* Without shifts we only calculate interactions j>i
2047 * for one-way pair-lists.
2049 if (diagRemoved
&& ge
<= sci
*nbl
->na_sc
+ i
)
2055 /* Could the cluster se be in our list? */
2056 if (se
>= cj_first
&& se
<= cj_last
)
2058 if (se
< cj_first
+ ndirect
)
2060 /* We can calculate cj_ind directly from se */
2061 found
= cj_ind_first
+ se
- cj_first
;
2065 /* Search for se using bisection */
2067 cj_ind_0
= cj_ind_first
+ ndirect
;
2068 cj_ind_1
= cj_ind_last
+ 1;
2069 while (found
== -1 && cj_ind_0
< cj_ind_1
)
2071 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
2073 cj_m
= nbl_cj(nbl
, cj_ind_m
);
2081 cj_ind_1
= cj_ind_m
;
2085 cj_ind_0
= cj_ind_m
+ 1;
2092 inner_i
= i
- si
*na_c
;
2093 inner_e
= ge
- se
*na_c
;
2095 if (nbl_imask0(nbl
, found
) & (1U << (cj_mod_cj4(found
)*c_gpuNumClusterPerCell
+ si
)))
2099 get_nbl_exclusions_1(nbl
, cj_to_cj4(found
), w
, &nbl_excl
);
2101 nbl_excl
->pair
[a_mod_wj(inner_e
)*nbl
->na_ci
+inner_i
] &=
2102 ~(1U << (cj_mod_cj4(found
)*c_gpuNumClusterPerCell
+ si
));
2111 /* Reallocate the simple ci list for at least n entries */
2112 static void nb_realloc_ci(nbnxn_pairlist_t
*nbl
, int n
)
2114 nbl
->ci_nalloc
= over_alloc_small(n
);
2115 nbnxn_realloc_void((void **)&nbl
->ci
,
2116 nbl
->nci
*sizeof(*nbl
->ci
),
2117 nbl
->ci_nalloc
*sizeof(*nbl
->ci
),
2118 nbl
->alloc
, nbl
->free
);
2121 /* Reallocate the super-cell sci list for at least n entries */
2122 static void nb_realloc_sci(nbnxn_pairlist_t
*nbl
, int n
)
2124 nbl
->sci_nalloc
= over_alloc_small(n
);
2125 nbnxn_realloc_void((void **)&nbl
->sci
,
2126 nbl
->nsci
*sizeof(*nbl
->sci
),
2127 nbl
->sci_nalloc
*sizeof(*nbl
->sci
),
2128 nbl
->alloc
, nbl
->free
);
2131 /* Make a new ci entry at index nbl->nci */
2132 static void new_ci_entry(nbnxn_pairlist_t
*nbl
, int ci
, int shift
, int flags
)
2134 if (nbl
->nci
+ 1 > nbl
->ci_nalloc
)
2136 nb_realloc_ci(nbl
, nbl
->nci
+1);
2138 nbl
->ci
[nbl
->nci
].ci
= ci
;
2139 nbl
->ci
[nbl
->nci
].shift
= shift
;
2140 /* Store the interaction flags along with the shift */
2141 nbl
->ci
[nbl
->nci
].shift
|= flags
;
2142 nbl
->ci
[nbl
->nci
].cj_ind_start
= nbl
->ncj
;
2143 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
2146 /* Make a new sci entry at index nbl->nsci */
2147 static void new_sci_entry(nbnxn_pairlist_t
*nbl
, int sci
, int shift
)
2149 if (nbl
->nsci
+ 1 > nbl
->sci_nalloc
)
2151 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2153 nbl
->sci
[nbl
->nsci
].sci
= sci
;
2154 nbl
->sci
[nbl
->nsci
].shift
= shift
;
2155 nbl
->sci
[nbl
->nsci
].cj4_ind_start
= nbl
->ncj4
;
2156 nbl
->sci
[nbl
->nsci
].cj4_ind_end
= nbl
->ncj4
;
2159 /* Sort the simple j-list cj on exclusions.
2160 * Entries with exclusions will all be sorted to the beginning of the list.
2162 static void sort_cj_excl(nbnxn_cj_t
*cj
, int ncj
,
2163 nbnxn_list_work_t
*work
)
2167 if (ncj
> work
->cj_nalloc
)
2169 work
->cj_nalloc
= over_alloc_large(ncj
);
2170 srenew(work
->cj
, work
->cj_nalloc
);
2173 /* Make a list of the j-cells involving exclusions */
2175 for (int j
= 0; j
< ncj
; j
++)
2177 if (cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
2179 work
->cj
[jnew
++] = cj
[j
];
2182 /* Check if there are exclusions at all or not just the first entry */
2183 if (!((jnew
== 0) ||
2184 (jnew
== 1 && cj
[0].excl
!= NBNXN_INTERACTION_MASK_ALL
)))
2186 for (int j
= 0; j
< ncj
; j
++)
2188 if (cj
[j
].excl
== NBNXN_INTERACTION_MASK_ALL
)
2190 work
->cj
[jnew
++] = cj
[j
];
2193 for (int j
= 0; j
< ncj
; j
++)
2195 cj
[j
] = work
->cj
[j
];
2200 /* Close this simple list i entry */
2201 static void close_ci_entry_simple(nbnxn_pairlist_t
*nbl
)
2205 /* All content of the new ci entry have already been filled correctly,
2206 * we only need to increase the count here (for non empty lists).
2208 jlen
= nbl
->ci
[nbl
->nci
].cj_ind_end
- nbl
->ci
[nbl
->nci
].cj_ind_start
;
2211 sort_cj_excl(nbl
->cj
+nbl
->ci
[nbl
->nci
].cj_ind_start
, jlen
, nbl
->work
);
2213 /* The counts below are used for non-bonded pair/flop counts
2214 * and should therefore match the available kernel setups.
2216 if (!(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_COUL(0)))
2218 nbl
->work
->ncj_noq
+= jlen
;
2220 else if ((nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_HALF_LJ(0)) ||
2221 !(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_LJ(0)))
2223 nbl
->work
->ncj_hlj
+= jlen
;
2230 /* Split sci entry for load balancing on the GPU.
2231 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2232 * With progBal we generate progressively smaller lists, which improves
2233 * load balancing. As we only know the current count on our own thread,
2234 * we will need to estimate the current total amount of i-entries.
2235 * As the lists get concatenated later, this estimate depends
2236 * both on nthread and our own thread index.
2238 static void split_sci_entry(nbnxn_pairlist_t
*nbl
,
2240 gmx_bool progBal
, float nsp_tot_est
,
2241 int thread
, int nthread
)
2244 int cj4_start
, cj4_end
, j4len
;
2246 int nsp
, nsp_sci
, nsp_cj4
, nsp_cj4_e
, nsp_cj4_p
;
2252 /* Estimate the total numbers of ci's of the nblist combined
2253 * over all threads using the target number of ci's.
2255 nsp_est
= (nsp_tot_est
*thread
)/nthread
+ nbl
->nci_tot
;
2257 /* The first ci blocks should be larger, to avoid overhead.
2258 * The last ci blocks should be smaller, to improve load balancing.
2259 * The factor 3/2 makes the first block 3/2 times the target average
2260 * and ensures that the total number of blocks end up equal to
2261 * that of equally sized blocks of size nsp_target_av.
2263 nsp_max
= static_cast<int>(nsp_target_av
*(nsp_tot_est
*1.5/(nsp_est
+ nsp_tot_est
)));
2267 nsp_max
= nsp_target_av
;
2270 /* Since nsp_max is a maximum/cut-off (this avoids high outliers,
2271 * which lead to load imbalance), not an average, we add half the
2272 * number of pairs in a cj4 block to get the average about right.
2274 nsp_max
+= c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
/2;
2276 cj4_start
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_start
;
2277 cj4_end
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_end
;
2278 j4len
= cj4_end
- cj4_start
;
2280 if (j4len
> 1 && j4len
*c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
> nsp_max
)
2282 /* Remove the last ci entry and process the cj4's again */
2290 for (int cj4
= cj4_start
; cj4
< cj4_end
; cj4
++)
2292 nsp_cj4_p
= nsp_cj4
;
2293 /* Count the number of cluster pairs in this cj4 group */
2295 for (int p
= 0; p
< c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
; p
++)
2297 nsp_cj4
+= (nbl
->cj4
[cj4
].imei
[0].imask
>> p
) & 1;
2300 /* Check if we should split at this cj4 to get a list of size nsp */
2301 if (nsp
> 0 && nsp
+ nsp_cj4
> nsp_max
)
2303 /* Split the list at cj4 */
2304 nbl
->sci
[sci
].cj4_ind_end
= cj4
;
2305 /* Create a new sci entry */
2308 if (nbl
->nsci
+1 > nbl
->sci_nalloc
)
2310 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2312 nbl
->sci
[sci
].sci
= nbl
->sci
[nbl
->nsci
-1].sci
;
2313 nbl
->sci
[sci
].shift
= nbl
->sci
[nbl
->nsci
-1].shift
;
2314 nbl
->sci
[sci
].cj4_ind_start
= cj4
;
2316 nsp_cj4_e
= nsp_cj4_p
;
2322 /* Put the remaining cj4's in the last sci entry */
2323 nbl
->sci
[sci
].cj4_ind_end
= cj4_end
;
2325 /* Possibly balance out the last two sci's
2326 * by moving the last cj4 of the second last sci.
2328 if (nsp_sci
- nsp_cj4_e
>= nsp
+ nsp_cj4_e
)
2330 nbl
->sci
[sci
-1].cj4_ind_end
--;
2331 nbl
->sci
[sci
].cj4_ind_start
--;
2338 /* Clost this super/sub list i entry */
2339 static void close_ci_entry_supersub(nbnxn_pairlist_t
*nbl
,
2341 gmx_bool progBal
, float nsp_tot_est
,
2342 int thread
, int nthread
)
2344 /* All content of the new ci entry have already been filled correctly,
2345 * we only need to increase the count here (for non empty lists).
2347 int j4len
= nbl
->sci
[nbl
->nsci
].cj4_ind_end
- nbl
->sci
[nbl
->nsci
].cj4_ind_start
;
2350 /* We can only have complete blocks of 4 j-entries in a list,
2351 * so round the count up before closing.
2353 nbl
->ncj4
= (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
2354 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2360 /* Measure the size of the new entry and potentially split it */
2361 split_sci_entry(nbl
, nsp_max_av
, progBal
, nsp_tot_est
,
2367 /* Syncs the working array before adding another grid pair to the list */
2368 static void sync_work(nbnxn_pairlist_t
*nbl
)
2372 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2373 nbl
->work
->cj4_init
= nbl
->ncj4
;
2377 /* Clears an nbnxn_pairlist_t data structure */
2378 static void clear_pairlist(nbnxn_pairlist_t
*nbl
)
2387 nbl
->work
->ncj_noq
= 0;
2388 nbl
->work
->ncj_hlj
= 0;
2391 /* Clears a group scheme pair list */
2392 static void clear_pairlist_fep(t_nblist
*nl
)
2396 if (nl
->jindex
== NULL
)
2398 snew(nl
->jindex
, 1);
2403 /* Sets a simple list i-cell bounding box, including PBC shift */
2404 static gmx_inline
void set_icell_bb_simple(const nbnxn_bb_t
*bb
, int ci
,
2405 real shx
, real shy
, real shz
,
2408 bb_ci
->lower
[BB_X
] = bb
[ci
].lower
[BB_X
] + shx
;
2409 bb_ci
->lower
[BB_Y
] = bb
[ci
].lower
[BB_Y
] + shy
;
2410 bb_ci
->lower
[BB_Z
] = bb
[ci
].lower
[BB_Z
] + shz
;
2411 bb_ci
->upper
[BB_X
] = bb
[ci
].upper
[BB_X
] + shx
;
2412 bb_ci
->upper
[BB_Y
] = bb
[ci
].upper
[BB_Y
] + shy
;
2413 bb_ci
->upper
[BB_Z
] = bb
[ci
].upper
[BB_Z
] + shz
;
2417 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2418 static void set_icell_bbxxxx_supersub(const float *bb
, int ci
,
2419 real shx
, real shy
, real shz
,
2422 int ia
= ci
*(c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
;
2423 for (int m
= 0; m
< (c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
; m
+= NNBSBB_XXXX
)
2425 for (int i
= 0; i
< STRIDE_PBB
; i
++)
2427 bb_ci
[m
+0*STRIDE_PBB
+i
] = bb
[ia
+m
+0*STRIDE_PBB
+i
] + shx
;
2428 bb_ci
[m
+1*STRIDE_PBB
+i
] = bb
[ia
+m
+1*STRIDE_PBB
+i
] + shy
;
2429 bb_ci
[m
+2*STRIDE_PBB
+i
] = bb
[ia
+m
+2*STRIDE_PBB
+i
] + shz
;
2430 bb_ci
[m
+3*STRIDE_PBB
+i
] = bb
[ia
+m
+3*STRIDE_PBB
+i
] + shx
;
2431 bb_ci
[m
+4*STRIDE_PBB
+i
] = bb
[ia
+m
+4*STRIDE_PBB
+i
] + shy
;
2432 bb_ci
[m
+5*STRIDE_PBB
+i
] = bb
[ia
+m
+5*STRIDE_PBB
+i
] + shz
;
2438 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2439 static void set_icell_bb_supersub(const nbnxn_bb_t
*bb
, int ci
,
2440 real shx
, real shy
, real shz
,
2443 for (int i
= 0; i
< c_gpuNumClusterPerCell
; i
++)
2445 set_icell_bb_simple(bb
, ci
*c_gpuNumClusterPerCell
+i
,
2451 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2452 static void icell_set_x_simple(int ci
,
2453 real shx
, real shy
, real shz
,
2454 int stride
, const real
*x
,
2455 nbnxn_list_work_t
*work
)
2457 int ia
= ci
*NBNXN_CPU_CLUSTER_I_SIZE
;
2459 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
; i
++)
2461 work
->x_ci
[i
*STRIDE_XYZ
+XX
] = x
[(ia
+i
)*stride
+XX
] + shx
;
2462 work
->x_ci
[i
*STRIDE_XYZ
+YY
] = x
[(ia
+i
)*stride
+YY
] + shy
;
2463 work
->x_ci
[i
*STRIDE_XYZ
+ZZ
] = x
[(ia
+i
)*stride
+ZZ
] + shz
;
2467 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2468 static void icell_set_x_supersub(int ci
,
2469 real shx
, real shy
, real shz
,
2470 int stride
, const real
*x
,
2471 nbnxn_list_work_t
*work
)
2473 #if !GMX_SIMD4_HAVE_REAL
2475 real
* x_ci
= work
->x_ci
;
2477 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
;
2478 for (int i
= 0; i
< c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
; i
++)
2480 x_ci
[i
*DIM
+ XX
] = x
[(ia
+i
)*stride
+ XX
] + shx
;
2481 x_ci
[i
*DIM
+ YY
] = x
[(ia
+i
)*stride
+ YY
] + shy
;
2482 x_ci
[i
*DIM
+ ZZ
] = x
[(ia
+i
)*stride
+ ZZ
] + shz
;
2485 #else /* !GMX_SIMD4_HAVE_REAL */
2487 real
* x_ci
= work
->x_ci_simd
;
2489 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2491 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
+= GMX_SIMD4_WIDTH
)
2493 int io
= si
*c_nbnxnGpuClusterSize
+ i
;
2494 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
+ io
;
2495 for (int j
= 0; j
< GMX_SIMD4_WIDTH
; j
++)
2497 x_ci
[io
*DIM
+ j
+ XX
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ XX
] + shx
;
2498 x_ci
[io
*DIM
+ j
+ YY
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ YY
] + shy
;
2499 x_ci
[io
*DIM
+ j
+ ZZ
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ ZZ
] + shz
;
2504 #endif /* !GMX_SIMD4_HAVE_REAL */
2507 static real
minimum_subgrid_size_xy(const nbnxn_grid_t
*grid
)
2511 return std::min(grid
->sx
, grid
->sy
);
2515 return std::min(grid
->sx
/c_gpuNumClusterPerCellX
,
2516 grid
->sy
/c_gpuNumClusterPerCellY
);
2520 static real
effective_buffer_1x1_vs_MxN(const nbnxn_grid_t
*gridi
,
2521 const nbnxn_grid_t
*gridj
)
2523 const real eff_1x1_buffer_fac_overest
= 0.1;
2525 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2526 * to be added to rlist (including buffer) used for MxN.
2527 * This is for converting an MxN list to a 1x1 list. This means we can't
2528 * use the normal buffer estimate, as we have an MxN list in which
2529 * some atom pairs beyond rlist are missing. We want to capture
2530 * the beneficial effect of buffering by extra pairs just outside rlist,
2531 * while removing the useless pairs that are further away from rlist.
2532 * (Also the buffer could have been set manually not using the estimate.)
2533 * This buffer size is an overestimate.
2534 * We add 10% of the smallest grid sub-cell dimensions.
2535 * Note that the z-size differs per cell and we don't use this,
2536 * so we overestimate.
2537 * With PME, the 10% value gives a buffer that is somewhat larger
2538 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2539 * Smaller tolerances or using RF lead to a smaller effective buffer,
2540 * so 10% gives a safe overestimate.
2542 return eff_1x1_buffer_fac_overest
*(minimum_subgrid_size_xy(gridi
) +
2543 minimum_subgrid_size_xy(gridj
));
2546 /* Clusters at the cut-off only increase rlist by 60% of their size */
2547 static real nbnxn_rlist_inc_outside_fac
= 0.6;
2549 /* Due to the cluster size the effective pair-list is longer than
2550 * that of a simple atom pair-list. This function gives the extra distance.
2552 real
nbnxn_get_rlist_effective_inc(int cluster_size_j
, real atom_density
)
2555 real vol_inc_i
, vol_inc_j
;
2557 /* We should get this from the setup, but currently it's the same for
2558 * all setups, including GPUs.
2560 cluster_size_i
= NBNXN_CPU_CLUSTER_I_SIZE
;
2562 vol_inc_i
= (cluster_size_i
- 1)/atom_density
;
2563 vol_inc_j
= (cluster_size_j
- 1)/atom_density
;
2565 return nbnxn_rlist_inc_outside_fac
*std::cbrt(vol_inc_i
+ vol_inc_j
);
2568 /* Estimates the interaction volume^2 for non-local interactions */
2569 static real
nonlocal_vol2(const struct gmx_domdec_zones_t
*zones
, rvec ls
, real r
)
2577 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2578 * not home interaction volume^2. As these volumes are not additive,
2579 * this is an overestimate, but it would only be significant in the limit
2580 * of small cells, where we anyhow need to split the lists into
2581 * as small parts as possible.
2584 for (int z
= 0; z
< zones
->n
; z
++)
2586 if (zones
->shift
[z
][XX
] + zones
->shift
[z
][YY
] + zones
->shift
[z
][ZZ
] == 1)
2591 for (int d
= 0; d
< DIM
; d
++)
2593 if (zones
->shift
[z
][d
] == 0)
2597 za
*= zones
->size
[z
].x1
[d
] - zones
->size
[z
].x0
[d
];
2601 /* 4 octants of a sphere */
2602 vold_est
= 0.25*M_PI
*r
*r
*r
*r
;
2603 /* 4 quarter pie slices on the edges */
2604 vold_est
+= 4*cl
*M_PI
/6.0*r
*r
*r
;
2605 /* One rectangular volume on a face */
2606 vold_est
+= ca
*0.5*r
*r
;
2608 vol2_est_tot
+= vold_est
*za
;
2612 return vol2_est_tot
;
2615 /* Estimates the average size of a full j-list for super/sub setup */
2616 static void get_nsubpair_target(const nbnxn_search_t nbs
,
2619 int min_ci_balanced
,
2620 int *nsubpair_target
,
2621 float *nsubpair_tot_est
)
2623 /* The target value of 36 seems to be the optimum for Kepler.
2624 * Maxwell is less sensitive to the exact value.
2626 const int nsubpair_target_min
= 36;
2627 const nbnxn_grid_t
*grid
;
2629 real xy_diag2
, r_eff_sup
, vol_est
, nsp_est
, nsp_est_nl
;
2631 grid
= &nbs
->grid
[0];
2633 /* We don't need to balance list sizes if:
2634 * - We didn't request balancing.
2635 * - The number of grid cells >= the number of lists requested,
2636 * since we will always generate at least #cells lists.
2637 * - We don't have any cells, since then there won't be any lists.
2639 if (min_ci_balanced
<= 0 || grid
->nc
>= min_ci_balanced
|| grid
->nc
== 0)
2641 /* nsubpair_target==0 signals no balancing */
2642 *nsubpair_target
= 0;
2643 *nsubpair_tot_est
= 0;
2648 ls
[XX
] = (grid
->c1
[XX
] - grid
->c0
[XX
])/(grid
->ncx
*c_gpuNumClusterPerCellX
);
2649 ls
[YY
] = (grid
->c1
[YY
] - grid
->c0
[YY
])/(grid
->ncy
*c_gpuNumClusterPerCellY
);
2650 ls
[ZZ
] = grid
->na_c
/(grid
->atom_density
*ls
[XX
]*ls
[YY
]);
2652 /* The average squared length of the diagonal of a sub cell */
2653 xy_diag2
= ls
[XX
]*ls
[XX
] + ls
[YY
]*ls
[YY
] + ls
[ZZ
]*ls
[ZZ
];
2655 /* The formulas below are a heuristic estimate of the average nsj per si*/
2656 r_eff_sup
= rlist
+ nbnxn_rlist_inc_outside_fac
*gmx::square((grid
->na_c
- 1.0)/grid
->na_c
)*std::sqrt(xy_diag2
/3);
2658 if (!nbs
->DomDec
|| nbs
->zones
->n
== 1)
2665 gmx::square(grid
->atom_density
/grid
->na_c
)*
2666 nonlocal_vol2(nbs
->zones
, ls
, r_eff_sup
);
2671 /* Sub-cell interacts with itself */
2672 vol_est
= ls
[XX
]*ls
[YY
]*ls
[ZZ
];
2673 /* 6/2 rectangular volume on the faces */
2674 vol_est
+= (ls
[XX
]*ls
[YY
] + ls
[XX
]*ls
[ZZ
] + ls
[YY
]*ls
[ZZ
])*r_eff_sup
;
2675 /* 12/2 quarter pie slices on the edges */
2676 vol_est
+= 2*(ls
[XX
] + ls
[YY
] + ls
[ZZ
])*0.25*M_PI
*gmx::square(r_eff_sup
);
2677 /* 4 octants of a sphere */
2678 vol_est
+= 0.5*4.0/3.0*M_PI
*gmx::power3(r_eff_sup
);
2680 /* Estimate the number of cluster pairs as the local number of
2681 * clusters times the volume they interact with times the density.
2683 nsp_est
= grid
->nsubc_tot
*vol_est
*grid
->atom_density
/grid
->na_c
;
2685 /* Subtract the non-local pair count */
2686 nsp_est
-= nsp_est_nl
;
2688 /* For small cut-offs nsp_est will be an underesimate.
2689 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2690 * So to avoid too small or negative nsp_est we set a minimum of
2691 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2692 * This might be a slight overestimate for small non-periodic groups of
2693 * atoms as will occur for a local domain with DD, but for small
2694 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2695 * so this overestimation will not matter.
2697 nsp_est
= std::max(nsp_est
, grid
->nsubc_tot
*static_cast<real
>(14));
2701 fprintf(debug
, "nsp_est local %5.1f non-local %5.1f\n",
2702 nsp_est
, nsp_est_nl
);
2707 nsp_est
= nsp_est_nl
;
2710 /* Thus the (average) maximum j-list size should be as follows.
2711 * Since there is overhead, we shouldn't make the lists too small
2712 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2714 *nsubpair_target
= std::max(nsubpair_target_min
,
2715 static_cast<int>(nsp_est
/min_ci_balanced
+ 0.5));
2716 *nsubpair_tot_est
= static_cast<int>(nsp_est
);
2720 fprintf(debug
, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2721 nsp_est
, *nsubpair_target
);
2725 /* Debug list print function */
2726 static void print_nblist_ci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2728 for (int i
= 0; i
< nbl
->nci
; i
++)
2730 fprintf(fp
, "ci %4d shift %2d ncj %3d\n",
2731 nbl
->ci
[i
].ci
, nbl
->ci
[i
].shift
,
2732 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
);
2734 for (int j
= nbl
->ci
[i
].cj_ind_start
; j
< nbl
->ci
[i
].cj_ind_end
; j
++)
2736 fprintf(fp
, " cj %5d imask %x\n",
2743 /* Debug list print function */
2744 static void print_nblist_sci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2746 for (int i
= 0; i
< nbl
->nsci
; i
++)
2748 fprintf(fp
, "ci %4d shift %2d ncj4 %2d\n",
2749 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2750 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
);
2753 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
2755 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
2757 fprintf(fp
, " sj %5d imask %x\n",
2759 nbl
->cj4
[j4
].imei
[0].imask
);
2760 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2762 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
2769 fprintf(fp
, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2770 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2771 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
,
2776 /* Combine pair lists *nbl generated on multiple threads nblc */
2777 static void combine_nblists(int nnbl
, nbnxn_pairlist_t
**nbl
,
2778 nbnxn_pairlist_t
*nblc
)
2780 int nsci
, ncj4
, nexcl
;
2784 gmx_incons("combine_nblists does not support simple lists");
2789 nexcl
= nblc
->nexcl
;
2790 for (int i
= 0; i
< nnbl
; i
++)
2792 nsci
+= nbl
[i
]->nsci
;
2793 ncj4
+= nbl
[i
]->ncj4
;
2794 nexcl
+= nbl
[i
]->nexcl
;
2797 if (nsci
> nblc
->sci_nalloc
)
2799 nb_realloc_sci(nblc
, nsci
);
2801 if (ncj4
> nblc
->cj4_nalloc
)
2803 nblc
->cj4_nalloc
= over_alloc_small(ncj4
);
2804 nbnxn_realloc_void((void **)&nblc
->cj4
,
2805 nblc
->ncj4
*sizeof(*nblc
->cj4
),
2806 nblc
->cj4_nalloc
*sizeof(*nblc
->cj4
),
2807 nblc
->alloc
, nblc
->free
);
2809 if (nexcl
> nblc
->excl_nalloc
)
2811 nblc
->excl_nalloc
= over_alloc_small(nexcl
);
2812 nbnxn_realloc_void((void **)&nblc
->excl
,
2813 nblc
->nexcl
*sizeof(*nblc
->excl
),
2814 nblc
->excl_nalloc
*sizeof(*nblc
->excl
),
2815 nblc
->alloc
, nblc
->free
);
2818 /* Each thread should copy its own data to the combined arrays,
2819 * as otherwise data will go back and forth between different caches.
2821 #if GMX_OPENMP && !(defined __clang_analyzer__)
2822 // cppcheck-suppress unreadVariable
2823 int nthreads
= gmx_omp_nthreads_get(emntPairsearch
);
2826 #pragma omp parallel for num_threads(nthreads) schedule(static)
2827 for (int n
= 0; n
< nnbl
; n
++)
2834 const nbnxn_pairlist_t
*nbli
;
2836 /* Determine the offset in the combined data for our thread */
2837 sci_offset
= nblc
->nsci
;
2838 cj4_offset
= nblc
->ncj4
;
2839 excl_offset
= nblc
->nexcl
;
2841 for (int i
= 0; i
< n
; i
++)
2843 sci_offset
+= nbl
[i
]->nsci
;
2844 cj4_offset
+= nbl
[i
]->ncj4
;
2845 excl_offset
+= nbl
[i
]->nexcl
;
2850 for (int i
= 0; i
< nbli
->nsci
; i
++)
2852 nblc
->sci
[sci_offset
+i
] = nbli
->sci
[i
];
2853 nblc
->sci
[sci_offset
+i
].cj4_ind_start
+= cj4_offset
;
2854 nblc
->sci
[sci_offset
+i
].cj4_ind_end
+= cj4_offset
;
2857 for (int j4
= 0; j4
< nbli
->ncj4
; j4
++)
2859 nblc
->cj4
[cj4_offset
+j4
] = nbli
->cj4
[j4
];
2860 nblc
->cj4
[cj4_offset
+j4
].imei
[0].excl_ind
+= excl_offset
;
2861 nblc
->cj4
[cj4_offset
+j4
].imei
[1].excl_ind
+= excl_offset
;
2864 for (int j4
= 0; j4
< nbli
->nexcl
; j4
++)
2866 nblc
->excl
[excl_offset
+j4
] = nbli
->excl
[j4
];
2869 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2872 for (int n
= 0; n
< nnbl
; n
++)
2874 nblc
->nsci
+= nbl
[n
]->nsci
;
2875 nblc
->ncj4
+= nbl
[n
]->ncj4
;
2876 nblc
->nci_tot
+= nbl
[n
]->nci_tot
;
2877 nblc
->nexcl
+= nbl
[n
]->nexcl
;
2881 static void balance_fep_lists(const nbnxn_search_t nbs
,
2882 nbnxn_pairlist_set_t
*nbl_lists
)
2885 int nri_tot
, nrj_tot
, nrj_target
;
2889 nnbl
= nbl_lists
->nnbl
;
2893 /* Nothing to balance */
2897 /* Count the total i-lists and pairs */
2900 for (int th
= 0; th
< nnbl
; th
++)
2902 nri_tot
+= nbl_lists
->nbl_fep
[th
]->nri
;
2903 nrj_tot
+= nbl_lists
->nbl_fep
[th
]->nrj
;
2906 nrj_target
= (nrj_tot
+ nnbl
- 1)/nnbl
;
2908 assert(gmx_omp_nthreads_get(emntNonbonded
) == nnbl
);
2910 #pragma omp parallel for schedule(static) num_threads(nnbl)
2911 for (int th
= 0; th
< nnbl
; th
++)
2917 nbl
= nbs
->work
[th
].nbl_fep
;
2919 /* Note that here we allocate for the total size, instead of
2920 * a per-thread esimate (which is hard to obtain).
2922 if (nri_tot
> nbl
->maxnri
)
2924 nbl
->maxnri
= over_alloc_large(nri_tot
);
2925 reallocate_nblist(nbl
);
2927 if (nri_tot
> nbl
->maxnri
|| nrj_tot
> nbl
->maxnrj
)
2929 nbl
->maxnrj
= over_alloc_small(nrj_tot
);
2930 srenew(nbl
->jjnr
, nbl
->maxnrj
);
2931 srenew(nbl
->excl_fep
, nbl
->maxnrj
);
2934 clear_pairlist_fep(nbl
);
2936 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2939 /* Loop over the source lists and assign and copy i-entries */
2941 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2942 for (int th
= 0; th
< nnbl
; th
++)
2946 nbls
= nbl_lists
->nbl_fep
[th
];
2948 for (int i
= 0; i
< nbls
->nri
; i
++)
2952 /* The number of pairs in this i-entry */
2953 nrj
= nbls
->jindex
[i
+1] - nbls
->jindex
[i
];
2955 /* Decide if list th_dest is too large and we should procede
2956 * to the next destination list.
2958 if (th_dest
+1 < nnbl
&& nbld
->nrj
> 0 &&
2959 nbld
->nrj
+ nrj
- nrj_target
> nrj_target
- nbld
->nrj
)
2962 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2965 nbld
->iinr
[nbld
->nri
] = nbls
->iinr
[i
];
2966 nbld
->gid
[nbld
->nri
] = nbls
->gid
[i
];
2967 nbld
->shift
[nbld
->nri
] = nbls
->shift
[i
];
2969 for (int j
= nbls
->jindex
[i
]; j
< nbls
->jindex
[i
+1]; j
++)
2971 nbld
->jjnr
[nbld
->nrj
] = nbls
->jjnr
[j
];
2972 nbld
->excl_fep
[nbld
->nrj
] = nbls
->excl_fep
[j
];
2976 nbld
->jindex
[nbld
->nri
] = nbld
->nrj
;
2980 /* Swap the list pointers */
2981 for (int th
= 0; th
< nnbl
; th
++)
2985 nbl_tmp
= nbl_lists
->nbl_fep
[th
];
2986 nbl_lists
->nbl_fep
[th
] = nbs
->work
[th
].nbl_fep
;
2987 nbs
->work
[th
].nbl_fep
= nbl_tmp
;
2991 fprintf(debug
, "nbl_fep[%d] nri %4d nrj %4d\n",
2993 nbl_lists
->nbl_fep
[th
]->nri
,
2994 nbl_lists
->nbl_fep
[th
]->nrj
);
2999 /* Returns the next ci to be processes by our thread */
3000 static gmx_bool
next_ci(const nbnxn_grid_t
*grid
,
3002 int nth
, int ci_block
,
3003 int *ci_x
, int *ci_y
,
3009 if (*ci_b
== ci_block
)
3011 /* Jump to the next block assigned to this task */
3012 *ci
+= (nth
- 1)*ci_block
;
3016 if (*ci
>= grid
->nc
*conv
)
3021 while (*ci
>= grid
->cxy_ind
[*ci_x
*grid
->ncy
+ *ci_y
+ 1]*conv
)
3024 if (*ci_y
== grid
->ncy
)
3034 /* Returns the distance^2 for which we put cell pairs in the list
3035 * without checking atom pair distances. This is usually < rlist^2.
3037 static float boundingbox_only_distance2(const nbnxn_grid_t
*gridi
,
3038 const nbnxn_grid_t
*gridj
,
3042 /* If the distance between two sub-cell bounding boxes is less
3043 * than this distance, do not check the distance between
3044 * all particle pairs in the sub-cell, since then it is likely
3045 * that the box pair has atom pairs within the cut-off.
3046 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3047 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3048 * Using more than 0.5 gains at most 0.5%.
3049 * If forces are calculated more than twice, the performance gain
3050 * in the force calculation outweighs the cost of checking.
3051 * Note that with subcell lists, the atom-pair distance check
3052 * is only performed when only 1 out of 8 sub-cells in within range,
3053 * this is because the GPU is much faster than the cpu.
3058 bbx
= 0.5*(gridi
->sx
+ gridj
->sx
);
3059 bby
= 0.5*(gridi
->sy
+ gridj
->sy
);
3062 bbx
/= c_gpuNumClusterPerCellX
;
3063 bby
/= c_gpuNumClusterPerCellY
;
3066 rbb2
= std::max(0.0, rlist
- 0.5*std::sqrt(bbx
*bbx
+ bby
*bby
));
3072 return (float)((1+GMX_FLOAT_EPS
)*rbb2
);
3076 static int get_ci_block_size(const nbnxn_grid_t
*gridi
,
3077 gmx_bool bDomDec
, int nth
)
3079 const int ci_block_enum
= 5;
3080 const int ci_block_denom
= 11;
3081 const int ci_block_min_atoms
= 16;
3084 /* Here we decide how to distribute the blocks over the threads.
3085 * We use prime numbers to try to avoid that the grid size becomes
3086 * a multiple of the number of threads, which would lead to some
3087 * threads getting "inner" pairs and others getting boundary pairs,
3088 * which in turns will lead to load imbalance between threads.
3089 * Set the block size as 5/11/ntask times the average number of cells
3090 * in a y,z slab. This should ensure a quite uniform distribution
3091 * of the grid parts of the different thread along all three grid
3092 * zone boundaries with 3D domain decomposition. At the same time
3093 * the blocks will not become too small.
3095 ci_block
= (gridi
->nc
*ci_block_enum
)/(ci_block_denom
*gridi
->ncx
*nth
);
3097 /* Ensure the blocks are not too small: avoids cache invalidation */
3098 if (ci_block
*gridi
->na_sc
< ci_block_min_atoms
)
3100 ci_block
= (ci_block_min_atoms
+ gridi
->na_sc
- 1)/gridi
->na_sc
;
3103 /* Without domain decomposition
3104 * or with less than 3 blocks per task, divide in nth blocks.
3106 if (!bDomDec
|| nth
*3*ci_block
> gridi
->nc
)
3108 ci_block
= (gridi
->nc
+ nth
- 1)/nth
;
3111 if (ci_block
> 1 && (nth
- 1)*ci_block
>= gridi
->nc
)
3113 /* Some threads have no work. Although reducing the block size
3114 * does not decrease the block count on the first few threads,
3115 * with GPUs better mixing of "upper" cells that have more empty
3116 * clusters results in a somewhat lower max load over all threads.
3117 * Without GPUs the regime of so few atoms per thread is less
3118 * performance relevant, but with 8-wide SIMD the same reasoning
3119 * applies, since the pair list uses 4 i-atom "sub-clusters".
3127 /* Generates the part of pair-list nbl assigned to our thread */
3128 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs
,
3129 const nbnxn_grid_t
*gridi
,
3130 const nbnxn_grid_t
*gridj
,
3131 nbnxn_search_work_t
*work
,
3132 const nbnxn_atomdata_t
*nbat
,
3133 const t_blocka
*excl
,
3137 gmx_bool bFBufferFlag
,
3140 float nsubpair_tot_est
,
3142 nbnxn_pairlist_t
*nbl
,
3147 real rl2
, rl_fep2
= 0;
3149 int ci_b
, ci
, ci_x
, ci_y
, ci_xy
, cj
;
3153 int conv_i
, cell0_i
;
3154 const nbnxn_bb_t
*bb_i
= NULL
;
3156 const float *pbb_i
= NULL
;
3158 const float *bbcz_i
, *bbcz_j
;
3160 real bx0
, bx1
, by0
, by1
, bz0
, bz1
;
3162 real d2cx
, d2z
, d2z_cx
, d2z_cy
, d2zx
, d2zxy
, d2xy
;
3163 int cxf
, cxl
, cyf
, cyf_x
, cyl
;
3164 int c0
, c1
, cs
, cf
, cl
;
3167 int gridi_flag_shift
= 0, gridj_flag_shift
= 0;
3168 gmx_bitmask_t
*gridj_flag
= NULL
;
3169 int ncj_old_i
, ncj_old_j
;
3171 nbs_cycle_start(&work
->cc
[enbsCCsearch
]);
3173 if (gridj
->bSimple
!= nbl
->bSimple
)
3175 gmx_incons("Grid incompatible with pair-list");
3179 nbl
->na_sc
= gridj
->na_sc
;
3180 nbl
->na_ci
= gridj
->na_c
;
3181 nbl
->na_cj
= nbnxn_kernel_to_cluster_j_size(nb_kernel_type
);
3182 na_cj_2log
= get_2log(nbl
->na_cj
);
3188 /* Determine conversion of clusters to flag blocks */
3189 gridi_flag_shift
= 0;
3190 while ((nbl
->na_ci
<<gridi_flag_shift
) < NBNXN_BUFFERFLAG_SIZE
)
3194 gridj_flag_shift
= 0;
3195 while ((nbl
->na_cj
<<gridj_flag_shift
) < NBNXN_BUFFERFLAG_SIZE
)
3200 gridj_flag
= work
->buffer_flags
.flag
;
3203 copy_mat(nbs
->box
, box
);
3205 rl2
= nbl
->rlist
*nbl
->rlist
;
3207 if (nbs
->bFEP
&& !nbl
->bSimple
)
3209 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3210 * We should not simply use rlist, since then we would not have
3211 * the small, effective buffering of the NxN lists.
3212 * The buffer is on overestimate, but the resulting cost for pairs
3213 * beyond rlist is neglible compared to the FEP pairs within rlist.
3215 rl_fep2
= nbl
->rlist
+ effective_buffer_1x1_vs_MxN(gridi
, gridj
);
3219 fprintf(debug
, "nbl_fep atom-pair rlist %f\n", rl_fep2
);
3221 rl_fep2
= rl_fep2
*rl_fep2
;
3224 rbb2
= boundingbox_only_distance2(gridi
, gridj
, nbl
->rlist
, nbl
->bSimple
);
3228 fprintf(debug
, "nbl bounding box only distance %f\n", std::sqrt(rbb2
));
3231 /* Set the shift range */
3232 for (int d
= 0; d
< DIM
; d
++)
3234 /* Check if we need periodicity shifts.
3235 * Without PBC or with domain decomposition we don't need them.
3237 if (d
>= ePBC2npbcdim(nbs
->ePBC
) || nbs
->dd_dim
[d
])
3244 box
[XX
][XX
] - fabs(box
[YY
][XX
]) - fabs(box
[ZZ
][XX
]) < std::sqrt(rl2
))
3255 if (nbl
->bSimple
&& !gridi
->bSimple
)
3257 conv_i
= gridi
->na_sc
/gridj
->na_sc
;
3258 bb_i
= gridi
->bb_simple
;
3259 bbcz_i
= gridi
->bbcz_simple
;
3260 flags_i
= gridi
->flags_simple
;
3275 /* We use the normal bounding box format for both grid types */
3278 bbcz_i
= gridi
->bbcz
;
3279 flags_i
= gridi
->flags
;
3281 cell0_i
= gridi
->cell0
*conv_i
;
3283 bbcz_j
= gridj
->bbcz
;
3287 /* Blocks of the conversion factor - 1 give a large repeat count
3288 * combined with a small block size. This should result in good
3289 * load balancing for both small and large domains.
3291 ci_block
= conv_i
- 1;
3295 fprintf(debug
, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3296 gridi
->nc
, gridi
->nc
/(double)(gridi
->ncx
*gridi
->ncy
), ci_block
);
3302 /* Initially ci_b and ci to 1 before where we want them to start,
3303 * as they will both be incremented in next_ci.
3306 ci
= th
*ci_block
- 1;
3309 while (next_ci(gridi
, conv_i
, nth
, ci_block
, &ci_x
, &ci_y
, &ci_b
, &ci
))
3311 if (nbl
->bSimple
&& flags_i
[ci
] == 0)
3316 ncj_old_i
= nbl
->ncj
;
3319 if (gridj
!= gridi
&& shp
[XX
] == 0)
3323 bx1
= bb_i
[ci
].upper
[BB_X
];
3327 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
;
3329 if (bx1
< gridj
->c0
[XX
])
3331 d2cx
= gmx::square(gridj
->c0
[XX
] - bx1
);
3340 ci_xy
= ci_x
*gridi
->ncy
+ ci_y
;
3342 /* Loop over shift vectors in three dimensions */
3343 for (int tz
= -shp
[ZZ
]; tz
<= shp
[ZZ
]; tz
++)
3345 shz
= tz
*box
[ZZ
][ZZ
];
3347 bz0
= bbcz_i
[ci
*NNBSBB_D
] + shz
;
3348 bz1
= bbcz_i
[ci
*NNBSBB_D
+1] + shz
;
3356 d2z
= gmx::square(bz1
);
3360 d2z
= gmx::square(bz0
- box
[ZZ
][ZZ
]);
3363 d2z_cx
= d2z
+ d2cx
;
3370 bz1_frac
= bz1
/(gridi
->cxy_ind
[ci_xy
+1] - gridi
->cxy_ind
[ci_xy
]);
3375 /* The check with bz1_frac close to or larger than 1 comes later */
3377 for (int ty
= -shp
[YY
]; ty
<= shp
[YY
]; ty
++)
3379 shy
= ty
*box
[YY
][YY
] + tz
*box
[ZZ
][YY
];
3383 by0
= bb_i
[ci
].lower
[BB_Y
] + shy
;
3384 by1
= bb_i
[ci
].upper
[BB_Y
] + shy
;
3388 by0
= gridi
->c0
[YY
] + (ci_y
)*gridi
->sy
+ shy
;
3389 by1
= gridi
->c0
[YY
] + (ci_y
+1)*gridi
->sy
+ shy
;
3392 get_cell_range(by0
, by1
,
3393 gridj
->ncy
, gridj
->c0
[YY
], gridj
->sy
, gridj
->inv_sy
,
3403 if (by1
< gridj
->c0
[YY
])
3405 d2z_cy
+= gmx::square(gridj
->c0
[YY
] - by1
);
3407 else if (by0
> gridj
->c1
[YY
])
3409 d2z_cy
+= gmx::square(by0
- gridj
->c1
[YY
]);
3412 for (int tx
= -shp
[XX
]; tx
<= shp
[XX
]; tx
++)
3414 shift
= XYZ2IS(tx
, ty
, tz
);
3416 if (pbc_shift_backward
&& gridi
== gridj
&& shift
> CENTRAL
)
3421 shx
= tx
*box
[XX
][XX
] + ty
*box
[YY
][XX
] + tz
*box
[ZZ
][XX
];
3425 bx0
= bb_i
[ci
].lower
[BB_X
] + shx
;
3426 bx1
= bb_i
[ci
].upper
[BB_X
] + shx
;
3430 bx0
= gridi
->c0
[XX
] + (ci_x
)*gridi
->sx
+ shx
;
3431 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
+ shx
;
3434 get_cell_range(bx0
, bx1
,
3435 gridj
->ncx
, gridj
->c0
[XX
], gridj
->sx
, gridj
->inv_sx
,
3446 new_ci_entry(nbl
, cell0_i
+ci
, shift
, flags_i
[ci
]);
3450 new_sci_entry(nbl
, cell0_i
+ci
, shift
);
3453 if ((!pbc_shift_backward
|| (shift
== CENTRAL
&&
3457 /* Leave the pairs with i > j.
3458 * x is the major index, so skip half of it.
3465 set_icell_bb_simple(bb_i
, ci
, shx
, shy
, shz
,
3471 set_icell_bbxxxx_supersub(pbb_i
, ci
, shx
, shy
, shz
,
3474 set_icell_bb_supersub(bb_i
, ci
, shx
, shy
, shz
,
3479 nbs
->icell_set_x(cell0_i
+ci
, shx
, shy
, shz
,
3480 nbat
->xstride
, nbat
->x
,
3483 for (int cx
= cxf
; cx
<= cxl
; cx
++)
3486 if (gridj
->c0
[XX
] + cx
*gridj
->sx
> bx1
)
3488 d2zx
+= gmx::square(gridj
->c0
[XX
] + cx
*gridj
->sx
- bx1
);
3490 else if (gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
< bx0
)
3492 d2zx
+= gmx::square(gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
- bx0
);
3495 if (gridi
== gridj
&&
3497 (!pbc_shift_backward
|| shift
== CENTRAL
) &&
3500 /* Leave the pairs with i > j.
3501 * Skip half of y when i and j have the same x.
3510 for (int cy
= cyf_x
; cy
<= cyl
; cy
++)
3512 c0
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
];
3513 c1
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
+1];
3515 if (pbc_shift_backward
&&
3517 shift
== CENTRAL
&& c0
< ci
)
3523 if (gridj
->c0
[YY
] + cy
*gridj
->sy
> by1
)
3525 d2zxy
+= gmx::square(gridj
->c0
[YY
] + cy
*gridj
->sy
- by1
);
3527 else if (gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
< by0
)
3529 d2zxy
+= gmx::square(gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
- by0
);
3531 if (c1
> c0
&& d2zxy
< rl2
)
3533 cs
= c0
+ static_cast<int>(bz1_frac
*(c1
- c0
));
3541 /* Find the lowest cell that can possibly
3546 (bbcz_j
[cf
*NNBSBB_D
+1] >= bz0
||
3547 d2xy
+ gmx::square(bbcz_j
[cf
*NNBSBB_D
+1] - bz0
) < rl2
))
3552 /* Find the highest cell that can possibly
3557 (bbcz_j
[cl
*NNBSBB_D
] <= bz1
||
3558 d2xy
+ gmx::square(bbcz_j
[cl
*NNBSBB_D
] - bz1
) < rl2
))
3563 #ifdef NBNXN_REFCODE
3565 /* Simple reference code, for debugging,
3566 * overrides the more complex code above.
3570 for (int k
= c0
; k
< c1
; k
++)
3572 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3577 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3588 /* We want each atom/cell pair only once,
3589 * only use cj >= ci.
3591 if (!pbc_shift_backward
|| shift
== CENTRAL
)
3593 cf
= std::max(cf
, ci
);
3599 /* For f buffer flags with simple lists */
3600 ncj_old_j
= nbl
->ncj
;
3602 switch (nb_kernel_type
)
3604 case nbnxnk4x4_PlainC
:
3605 check_cell_list_space_simple(nbl
, cl
-cf
+1);
3607 make_cluster_list_simple(gridj
,
3609 (gridi
== gridj
&& shift
== CENTRAL
),
3614 #ifdef GMX_NBNXN_SIMD_4XN
3615 case nbnxnk4xN_SIMD_4xN
:
3616 check_cell_list_space_simple(nbl
, ci_to_cj_simd_4xn(cl
- cf
) + 2);
3617 make_cluster_list_simd_4xn(gridj
,
3619 (gridi
== gridj
&& shift
== CENTRAL
),
3625 #ifdef GMX_NBNXN_SIMD_2XNN
3626 case nbnxnk4xN_SIMD_2xNN
:
3627 check_cell_list_space_simple(nbl
, ci_to_cj_simd_2xnn(cl
- cf
) + 2);
3628 make_cluster_list_simd_2xnn(gridj
,
3630 (gridi
== gridj
&& shift
== CENTRAL
),
3636 case nbnxnk8x8x8_PlainC
:
3637 case nbnxnk8x8x8_GPU
:
3638 check_cell_list_space_supersub(nbl
, cl
-cf
+1);
3639 for (cj
= cf
; cj
<= cl
; cj
++)
3641 make_cluster_list_supersub(gridi
, gridj
,
3643 (gridi
== gridj
&& shift
== CENTRAL
&& ci
== cj
),
3644 nbat
->xstride
, nbat
->x
,
3650 ncpcheck
+= cl
- cf
+ 1;
3652 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_j
)
3654 int cbf
= nbl
->cj
[ncj_old_j
].cj
>> gridj_flag_shift
;
3655 int cbl
= nbl
->cj
[nbl
->ncj
-1].cj
>> gridj_flag_shift
;
3656 for (int cb
= cbf
; cb
<= cbl
; cb
++)
3658 bitmask_init_bit(&gridj_flag
[cb
], th
);
3666 /* Set the exclusions for this ci list */
3669 set_ci_top_excls(nbs
,
3671 shift
== CENTRAL
&& gridi
== gridj
,
3674 &(nbl
->ci
[nbl
->nci
]),
3679 make_fep_list(nbs
, nbat
, nbl
,
3680 shift
== CENTRAL
&& gridi
== gridj
,
3681 &(nbl
->ci
[nbl
->nci
]),
3682 gridi
, gridj
, nbl_fep
);
3687 set_sci_top_excls(nbs
,
3689 shift
== CENTRAL
&& gridi
== gridj
,
3691 &(nbl
->sci
[nbl
->nsci
]),
3696 make_fep_list_supersub(nbs
, nbat
, nbl
,
3697 shift
== CENTRAL
&& gridi
== gridj
,
3698 &(nbl
->sci
[nbl
->nsci
]),
3701 gridi
, gridj
, nbl_fep
);
3705 /* Close this ci list */
3708 close_ci_entry_simple(nbl
);
3712 close_ci_entry_supersub(nbl
,
3714 progBal
, nsubpair_tot_est
,
3721 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_i
)
3723 bitmask_init_bit(&(work
->buffer_flags
.flag
[(gridi
->cell0
+ci
)>>gridi_flag_shift
]), th
);
3727 work
->ndistc
= ndistc
;
3729 nbs_cycle_stop(&work
->cc
[enbsCCsearch
]);
3733 fprintf(debug
, "number of distance checks %d\n", ndistc
);
3734 fprintf(debug
, "ncpcheck %s %d\n", gridi
== gridj
? "local" : "non-local",
3739 print_nblist_statistics_simple(debug
, nbl
, nbs
, rlist
);
3743 print_nblist_statistics_supersub(debug
, nbl
, nbs
, rlist
);
3748 fprintf(debug
, "nbl FEP list pairs: %d\n", nbl_fep
->nrj
);
3753 static void reduce_buffer_flags(const nbnxn_search_t nbs
,
3755 const nbnxn_buffer_flags_t
*dest
)
3757 for (int s
= 0; s
< nsrc
; s
++)
3759 gmx_bitmask_t
* flag
= nbs
->work
[s
].buffer_flags
.flag
;
3761 for (int b
= 0; b
< dest
->nflag
; b
++)
3763 bitmask_union(&(dest
->flag
[b
]), flag
[b
]);
3768 static void print_reduction_cost(const nbnxn_buffer_flags_t
*flags
, int nout
)
3770 int nelem
, nkeep
, ncopy
, nred
, out
;
3771 gmx_bitmask_t mask_0
;
3777 bitmask_init_bit(&mask_0
, 0);
3778 for (int b
= 0; b
< flags
->nflag
; b
++)
3780 if (bitmask_is_equal(flags
->flag
[b
], mask_0
))
3782 /* Only flag 0 is set, no copy of reduction required */
3786 else if (!bitmask_is_zero(flags
->flag
[b
]))
3789 for (out
= 0; out
< nout
; out
++)
3791 if (bitmask_is_set(flags
->flag
[b
], out
))
3808 fprintf(debug
, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3810 nelem
/(double)(flags
->nflag
),
3811 nkeep
/(double)(flags
->nflag
),
3812 ncopy
/(double)(flags
->nflag
),
3813 nred
/(double)(flags
->nflag
));
3816 /* Perform a count (linear) sort to sort the smaller lists to the end.
3817 * This avoids load imbalance on the GPU, as large lists will be
3818 * scheduled and executed first and the smaller lists later.
3819 * Load balancing between multi-processors only happens at the end
3820 * and there smaller lists lead to more effective load balancing.
3821 * The sorting is done on the cj4 count, not on the actual pair counts.
3822 * Not only does this make the sort faster, but it also results in
3823 * better load balancing than using a list sorted on exact load.
3824 * This function swaps the pointer in the pair list to avoid a copy operation.
3826 static void sort_sci(nbnxn_pairlist_t
*nbl
)
3828 nbnxn_list_work_t
*work
;
3830 nbnxn_sci_t
*sci_sort
;
3832 if (nbl
->ncj4
<= nbl
->nsci
)
3834 /* nsci = 0 or all sci have size 1, sorting won't change the order */
3840 /* We will distinguish differences up to double the average */
3841 m
= (2*nbl
->ncj4
)/nbl
->nsci
;
3843 if (m
+ 1 > work
->sort_nalloc
)
3845 work
->sort_nalloc
= over_alloc_large(m
+ 1);
3846 srenew(work
->sort
, work
->sort_nalloc
);
3849 if (work
->sci_sort_nalloc
!= nbl
->sci_nalloc
)
3851 work
->sci_sort_nalloc
= nbl
->sci_nalloc
;
3852 nbnxn_realloc_void((void **)&work
->sci_sort
,
3854 work
->sci_sort_nalloc
*sizeof(*work
->sci_sort
),
3855 nbl
->alloc
, nbl
->free
);
3858 /* Count the entries of each size */
3859 for (int i
= 0; i
<= m
; i
++)
3863 for (int s
= 0; s
< nbl
->nsci
; s
++)
3865 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
3868 /* Calculate the offset for each count */
3871 for (int i
= m
- 1; i
>= 0; i
--)
3874 work
->sort
[i
] = work
->sort
[i
+ 1] + s0
;
3878 /* Sort entries directly into place */
3879 sci_sort
= work
->sci_sort
;
3880 for (int s
= 0; s
< nbl
->nsci
; s
++)
3882 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
3883 sci_sort
[work
->sort
[i
]++] = nbl
->sci
[s
];
3886 /* Swap the sci pointers so we use the new, sorted list */
3887 work
->sci_sort
= nbl
->sci
;
3888 nbl
->sci
= sci_sort
;
3891 /* Make a local or non-local pair-list, depending on iloc */
3892 void nbnxn_make_pairlist(const nbnxn_search_t nbs
,
3893 nbnxn_atomdata_t
*nbat
,
3894 const t_blocka
*excl
,
3896 int min_ci_balanced
,
3897 nbnxn_pairlist_set_t
*nbl_list
,
3902 nbnxn_grid_t
*gridi
, *gridj
;
3905 int nsubpair_target
;
3906 float nsubpair_tot_est
;
3908 nbnxn_pairlist_t
**nbl
;
3910 gmx_bool CombineNBLists
;
3912 int np_tot
, np_noq
, np_hlj
, nap
;
3914 /* Check if we are running hybrid GPU + CPU nbnxn mode */
3915 bGPUCPU
= (!nbs
->grid
[0].bSimple
&& nbl_list
->bSimple
);
3917 nnbl
= nbl_list
->nnbl
;
3918 nbl
= nbl_list
->nbl
;
3919 CombineNBLists
= nbl_list
->bCombined
;
3923 fprintf(debug
, "ns making %d nblists\n", nnbl
);
3926 nbat
->bUseBufferFlags
= (nbat
->nout
> 1);
3927 /* We should re-init the flags before making the first list */
3928 if (nbat
->bUseBufferFlags
&& (LOCAL_I(iloc
) || bGPUCPU
))
3930 init_buffer_flags(&nbat
->buffer_flags
, nbat
->natoms
);
3933 if (nbl_list
->bSimple
)
3936 switch (nb_kernel_type
)
3938 #ifdef GMX_NBNXN_SIMD_4XN
3939 case nbnxnk4xN_SIMD_4xN
:
3940 nbs
->icell_set_x
= icell_set_x_simd_4xn
;
3943 #ifdef GMX_NBNXN_SIMD_2XNN
3944 case nbnxnk4xN_SIMD_2xNN
:
3945 nbs
->icell_set_x
= icell_set_x_simd_2xnn
;
3949 nbs
->icell_set_x
= icell_set_x_simple
;
3953 /* MSVC 2013 complains about switch statements without case */
3954 nbs
->icell_set_x
= icell_set_x_simple
;
3959 nbs
->icell_set_x
= icell_set_x_supersub
;
3964 /* Only zone (grid) 0 vs 0 */
3971 nzi
= nbs
->zones
->nizone
;
3974 if (!nbl_list
->bSimple
&& min_ci_balanced
> 0)
3976 get_nsubpair_target(nbs
, iloc
, rlist
, min_ci_balanced
,
3977 &nsubpair_target
, &nsubpair_tot_est
);
3981 nsubpair_target
= 0;
3982 nsubpair_tot_est
= 0;
3985 /* Clear all pair-lists */
3986 for (int th
= 0; th
< nnbl
; th
++)
3988 clear_pairlist(nbl
[th
]);
3992 clear_pairlist_fep(nbl_list
->nbl_fep
[th
]);
3996 for (int zi
= 0; zi
< nzi
; zi
++)
3998 gridi
= &nbs
->grid
[zi
];
4000 if (NONLOCAL_I(iloc
))
4002 zj0
= nbs
->zones
->izone
[zi
].j0
;
4003 zj1
= nbs
->zones
->izone
[zi
].j1
;
4009 for (int zj
= zj0
; zj
< zj1
; zj
++)
4011 gridj
= &nbs
->grid
[zj
];
4015 fprintf(debug
, "ns search grid %d vs %d\n", zi
, zj
);
4018 nbs_cycle_start(&nbs
->cc
[enbsCCsearch
]);
4020 if (nbl
[0]->bSimple
&& !gridi
->bSimple
)
4022 /* Hybrid list, determine blocking later */
4027 ci_block
= get_ci_block_size(gridi
, nbs
->DomDec
, nnbl
);
4030 /* With GPU: generate progressively smaller lists for
4031 * load balancing for local only or non-local with 2 zones.
4033 progBal
= (LOCAL_I(iloc
) || nbs
->zones
->n
<= 2);
4035 #pragma omp parallel for num_threads(nnbl) schedule(static)
4036 for (int th
= 0; th
< nnbl
; th
++)
4040 /* Re-init the thread-local work flag data before making
4041 * the first list (not an elegant conditional).
4043 if (nbat
->bUseBufferFlags
&& ((zi
== 0 && zj
== 0) ||
4044 (bGPUCPU
&& zi
== 0 && zj
== 1)))
4046 init_buffer_flags(&nbs
->work
[th
].buffer_flags
, nbat
->natoms
);
4049 if (CombineNBLists
&& th
> 0)
4051 clear_pairlist(nbl
[th
]);
4054 /* Divide the i super cell equally over the nblists */
4055 nbnxn_make_pairlist_part(nbs
, gridi
, gridj
,
4056 &nbs
->work
[th
], nbat
, excl
,
4060 nbat
->bUseBufferFlags
,
4062 progBal
, nsubpair_tot_est
,
4065 nbl_list
->nbl_fep
[th
]);
4067 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4069 nbs_cycle_stop(&nbs
->cc
[enbsCCsearch
]);
4074 for (int th
= 0; th
< nnbl
; th
++)
4076 inc_nrnb(nrnb
, eNR_NBNXN_DIST2
, nbs
->work
[th
].ndistc
);
4078 if (nbl_list
->bSimple
)
4080 np_tot
+= nbl
[th
]->ncj
;
4081 np_noq
+= nbl
[th
]->work
->ncj_noq
;
4082 np_hlj
+= nbl
[th
]->work
->ncj_hlj
;
4086 /* This count ignores potential subsequent pair pruning */
4087 np_tot
+= nbl
[th
]->nci_tot
;
4090 nap
= nbl
[0]->na_ci
*nbl
[0]->na_cj
;
4091 nbl_list
->natpair_ljq
= (np_tot
- np_noq
)*nap
- np_hlj
*nap
/2;
4092 nbl_list
->natpair_lj
= np_noq
*nap
;
4093 nbl_list
->natpair_q
= np_hlj
*nap
/2;
4095 if (CombineNBLists
&& nnbl
> 1)
4097 nbs_cycle_start(&nbs
->cc
[enbsCCcombine
]);
4099 combine_nblists(nnbl
-1, nbl
+1, nbl
[0]);
4101 nbs_cycle_stop(&nbs
->cc
[enbsCCcombine
]);
4106 if (!nbl_list
->bSimple
)
4108 /* Sort the entries on size, large ones first */
4109 if (CombineNBLists
|| nnbl
== 1)
4115 #pragma omp parallel for num_threads(nnbl) schedule(static)
4116 for (int th
= 0; th
< nnbl
; th
++)
4122 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4127 if (nbat
->bUseBufferFlags
)
4129 reduce_buffer_flags(nbs
, nnbl
, &nbat
->buffer_flags
);
4134 /* Balance the free-energy lists over all the threads */
4135 balance_fep_lists(nbs
, nbl_list
);
4138 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4141 nbs
->search_count
++;
4143 if (nbs
->print_cycles
&&
4144 (!nbs
->DomDec
|| !LOCAL_I(iloc
)) &&
4145 nbs
->search_count
% 100 == 0)
4147 nbs_cycle_print(stderr
, nbs
);
4150 if (debug
&& (CombineNBLists
&& nnbl
> 1))
4152 if (nbl
[0]->bSimple
)
4154 print_nblist_statistics_simple(debug
, nbl
[0], nbs
, rlist
);
4158 print_nblist_statistics_supersub(debug
, nbl
[0], nbs
, rlist
);
4166 if (nbl
[0]->bSimple
)
4168 print_nblist_ci_cj(debug
, nbl
[0]);
4172 print_nblist_sci_cj(debug
, nbl
[0]);
4176 if (nbat
->bUseBufferFlags
)
4178 print_reduction_cost(&nbat
->buffer_flags
, nnbl
);