2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
50 #include "gromacs/legacyheaders/nrnb.h"
51 #include "gromacs/legacyheaders/ns.h"
52 #include "gromacs/legacyheaders/types/commrec.h"
53 #include "gromacs/legacyheaders/types/group.h"
54 #include "gromacs/math/utilities.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/nb_verlet.h"
57 #include "gromacs/mdlib/nbnxn_atomdata.h"
58 #include "gromacs/mdlib/nbnxn_consts.h"
59 #include "gromacs/mdlib/nbnxn_grid.h"
60 #include "gromacs/mdlib/nbnxn_internal.h"
61 #include "gromacs/mdlib/nbnxn_simd.h"
62 #include "gromacs/mdlib/nbnxn_util.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/pbcutil/pbc.h"
65 #include "gromacs/simd/simd.h"
66 #include "gromacs/simd/vector_operations.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/smalloc.h"
73 /* The functions below are macros as they are performance sensitive */
75 /* 4x4 list, pack=4: no complex conversion required */
76 /* i-cluster to j-cluster conversion */
77 #define CI_TO_CJ_J4(ci) (ci)
78 /* cluster index to coordinate array index conversion */
79 #define X_IND_CI_J4(ci) ((ci)*STRIDE_P4)
80 #define X_IND_CJ_J4(cj) ((cj)*STRIDE_P4)
82 /* 4x2 list, pack=4: j-cluster size is half the packing width */
83 /* i-cluster to j-cluster conversion */
84 #define CI_TO_CJ_J2(ci) ((ci)<<1)
85 /* cluster index to coordinate array index conversion */
86 #define X_IND_CI_J2(ci) ((ci)*STRIDE_P4)
87 #define X_IND_CJ_J2(cj) (((cj)>>1)*STRIDE_P4 + ((cj) & 1)*(PACK_X4>>1))
89 /* 4x8 list, pack=8: i-cluster size is half the packing width */
90 /* i-cluster to j-cluster conversion */
91 #define CI_TO_CJ_J8(ci) ((ci)>>1)
92 /* cluster index to coordinate array index conversion */
93 #define X_IND_CI_J8(ci) (((ci)>>1)*STRIDE_P8 + ((ci) & 1)*(PACK_X8>>1))
94 #define X_IND_CJ_J8(cj) ((cj)*STRIDE_P8)
96 /* The j-cluster size is matched to the SIMD width */
97 #if GMX_SIMD_REAL_WIDTH == 2
98 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J2(ci)
99 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J2(ci)
100 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J2(cj)
102 #if GMX_SIMD_REAL_WIDTH == 4
103 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J4(ci)
104 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J4(ci)
105 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J4(cj)
107 #if GMX_SIMD_REAL_WIDTH == 8
108 #define CI_TO_CJ_SIMD_4XN(ci) CI_TO_CJ_J8(ci)
109 #define X_IND_CI_SIMD_4XN(ci) X_IND_CI_J8(ci)
110 #define X_IND_CJ_SIMD_4XN(cj) X_IND_CJ_J8(cj)
111 /* Half SIMD with j-cluster size */
112 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J4(ci)
113 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J4(ci)
114 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J4(cj)
116 #if GMX_SIMD_REAL_WIDTH == 16
117 #define CI_TO_CJ_SIMD_2XNN(ci) CI_TO_CJ_J8(ci)
118 #define X_IND_CI_SIMD_2XNN(ci) X_IND_CI_J8(ci)
119 #define X_IND_CJ_SIMD_2XNN(cj) X_IND_CJ_J8(cj)
121 #error "unsupported GMX_SIMD_REAL_WIDTH"
127 #endif /* GMX_NBNXN_SIMD */
130 /* We shift the i-particles backward for PBC.
131 * This leads to more conditionals than shifting forward.
132 * We do this to get more balanced pair lists.
134 #define NBNXN_SHIFT_BACKWARD
137 static void nbs_cycle_clear(nbnxn_cycle_t
*cc
)
139 for (int i
= 0; i
< enbsCCnr
; i
++)
146 static double Mcyc_av(const nbnxn_cycle_t
*cc
)
148 return (double)cc
->c
*1e-6/cc
->count
;
151 static void nbs_cycle_print(FILE *fp
, const nbnxn_search_t nbs
)
154 fprintf(fp
, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
155 nbs
->cc
[enbsCCgrid
].count
,
156 Mcyc_av(&nbs
->cc
[enbsCCgrid
]),
157 Mcyc_av(&nbs
->cc
[enbsCCsearch
]),
158 Mcyc_av(&nbs
->cc
[enbsCCreducef
]));
160 if (nbs
->nthread_max
> 1)
162 if (nbs
->cc
[enbsCCcombine
].count
> 0)
164 fprintf(fp
, " comb %5.2f",
165 Mcyc_av(&nbs
->cc
[enbsCCcombine
]));
167 fprintf(fp
, " s. th");
168 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
170 fprintf(fp
, " %4.1f",
171 Mcyc_av(&nbs
->work
[t
].cc
[enbsCCsearch
]));
177 static gmx_inline
int ci_to_cj(int na_cj_2log
, int ci
)
181 case 2: return ci
; break;
182 case 1: return (ci
<<1); break;
183 case 3: return (ci
>>1); break;
189 gmx_bool
nbnxn_kernel_pairlist_simple(int nb_kernel_type
)
191 if (nb_kernel_type
== nbnxnkNotSet
)
193 gmx_fatal(FARGS
, "Non-bonded kernel type not set for Verlet-style pair-list.");
196 switch (nb_kernel_type
)
198 case nbnxnk8x8x8_GPU
:
199 case nbnxnk8x8x8_PlainC
:
202 case nbnxnk4x4_PlainC
:
203 case nbnxnk4xN_SIMD_4xN
:
204 case nbnxnk4xN_SIMD_2xNN
:
208 gmx_incons("Invalid nonbonded kernel type passed!");
213 /* Initializes a single nbnxn_pairlist_t data structure */
214 static void nbnxn_init_pairlist_fep(t_nblist
*nl
)
216 nl
->type
= GMX_NBLIST_INTERACTION_FREE_ENERGY
;
217 nl
->igeometry
= GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE
;
218 /* The interaction functions are set in the free energy kernel fuction */
237 void nbnxn_init_search(nbnxn_search_t
* nbs_ptr
,
239 struct gmx_domdec_zones_t
*zones
,
251 nbs
->DomDec
= (n_dd_cells
!= NULL
);
253 clear_ivec(nbs
->dd_dim
);
259 for (int d
= 0; d
< DIM
; d
++)
261 if ((*n_dd_cells
)[d
] > 1)
264 /* Each grid matches a DD zone */
270 nbnxn_grids_init(nbs
, ngrid
);
273 nbs
->cell_nalloc
= 0;
277 nbs
->nthread_max
= nthread_max
;
279 /* Initialize the work data structures for each thread */
280 snew(nbs
->work
, nbs
->nthread_max
);
281 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
283 nbs
->work
[t
].cxy_na
= NULL
;
284 nbs
->work
[t
].cxy_na_nalloc
= 0;
285 nbs
->work
[t
].sort_work
= NULL
;
286 nbs
->work
[t
].sort_work_nalloc
= 0;
288 snew(nbs
->work
[t
].nbl_fep
, 1);
289 nbnxn_init_pairlist_fep(nbs
->work
[t
].nbl_fep
);
292 /* Initialize detailed nbsearch cycle counting */
293 nbs
->print_cycles
= (getenv("GMX_NBNXN_CYCLE") != 0);
294 nbs
->search_count
= 0;
295 nbs_cycle_clear(nbs
->cc
);
296 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
298 nbs_cycle_clear(nbs
->work
[t
].cc
);
302 static void init_buffer_flags(nbnxn_buffer_flags_t
*flags
,
305 flags
->nflag
= (natoms
+ NBNXN_BUFFERFLAG_SIZE
- 1)/NBNXN_BUFFERFLAG_SIZE
;
306 if (flags
->nflag
> flags
->flag_nalloc
)
308 flags
->flag_nalloc
= over_alloc_large(flags
->nflag
);
309 srenew(flags
->flag
, flags
->flag_nalloc
);
311 for (int b
= 0; b
< flags
->nflag
; b
++)
313 bitmask_clear(&(flags
->flag
[b
]));
317 /* Determines the cell range along one dimension that
318 * the bounding box b0 - b1 sees.
320 static void get_cell_range(real b0
, real b1
,
321 int nc
, real c0
, real s
, real invs
,
322 real d2
, real r2
, int *cf
, int *cl
)
324 *cf
= std::max(static_cast<int>((b0
- c0
)*invs
), 0);
326 while (*cf
> 0 && d2
+ sqr((b0
- c0
) - (*cf
-1+1)*s
) < r2
)
331 *cl
= std::min(static_cast<int>((b1
- c0
)*invs
), nc
-1);
332 while (*cl
< nc
-1 && d2
+ sqr((*cl
+1)*s
- (b1
- c0
)) < r2
)
338 /* Reference code calculating the distance^2 between two bounding boxes */
339 static float box_dist2(float bx0
, float bx1
, float by0
,
340 float by1
, float bz0
, float bz1
,
341 const nbnxn_bb_t
*bb
)
344 float dl
, dh
, dm
, dm0
;
348 dl
= bx0
- bb
->upper
[BB_X
];
349 dh
= bb
->lower
[BB_X
] - bx1
;
350 dm
= std::max(dl
, dh
);
351 dm0
= std::max(dm
, 0.0f
);
354 dl
= by0
- bb
->upper
[BB_Y
];
355 dh
= bb
->lower
[BB_Y
] - by1
;
356 dm
= std::max(dl
, dh
);
357 dm0
= std::max(dm
, 0.0f
);
360 dl
= bz0
- bb
->upper
[BB_Z
];
361 dh
= bb
->lower
[BB_Z
] - bz1
;
362 dm
= std::max(dl
, dh
);
363 dm0
= std::max(dm
, 0.0f
);
369 /* Plain C code calculating the distance^2 between two bounding boxes */
370 static float subc_bb_dist2(int si
, const nbnxn_bb_t
*bb_i_ci
,
371 int csj
, const nbnxn_bb_t
*bb_j_all
)
373 const nbnxn_bb_t
*bb_i
, *bb_j
;
375 float dl
, dh
, dm
, dm0
;
378 bb_j
= bb_j_all
+ csj
;
382 dl
= bb_i
->lower
[BB_X
] - bb_j
->upper
[BB_X
];
383 dh
= bb_j
->lower
[BB_X
] - bb_i
->upper
[BB_X
];
384 dm
= std::max(dl
, dh
);
385 dm0
= std::max(dm
, 0.0f
);
388 dl
= bb_i
->lower
[BB_Y
] - bb_j
->upper
[BB_Y
];
389 dh
= bb_j
->lower
[BB_Y
] - bb_i
->upper
[BB_Y
];
390 dm
= std::max(dl
, dh
);
391 dm0
= std::max(dm
, 0.0f
);
394 dl
= bb_i
->lower
[BB_Z
] - bb_j
->upper
[BB_Z
];
395 dh
= bb_j
->lower
[BB_Z
] - bb_i
->upper
[BB_Z
];
396 dm
= std::max(dl
, dh
);
397 dm0
= std::max(dm
, 0.0f
);
403 #ifdef NBNXN_SEARCH_BB_SIMD4
405 /* 4-wide SIMD code for bb distance for bb format xyz0 */
406 static float subc_bb_dist2_simd4(int si
, const nbnxn_bb_t
*bb_i_ci
,
407 int csj
, const nbnxn_bb_t
*bb_j_all
)
409 gmx_simd4_float_t bb_i_S0
, bb_i_S1
;
410 gmx_simd4_float_t bb_j_S0
, bb_j_S1
;
411 gmx_simd4_float_t dl_S
;
412 gmx_simd4_float_t dh_S
;
413 gmx_simd4_float_t dm_S
;
414 gmx_simd4_float_t dm0_S
;
416 bb_i_S0
= gmx_simd4_load_f(&bb_i_ci
[si
].lower
[0]);
417 bb_i_S1
= gmx_simd4_load_f(&bb_i_ci
[si
].upper
[0]);
418 bb_j_S0
= gmx_simd4_load_f(&bb_j_all
[csj
].lower
[0]);
419 bb_j_S1
= gmx_simd4_load_f(&bb_j_all
[csj
].upper
[0]);
421 dl_S
= gmx_simd4_sub_f(bb_i_S0
, bb_j_S1
);
422 dh_S
= gmx_simd4_sub_f(bb_j_S0
, bb_i_S1
);
424 dm_S
= gmx_simd4_max_f(dl_S
, dh_S
);
425 dm0_S
= gmx_simd4_max_f(dm_S
, gmx_simd4_setzero_f());
427 return gmx_simd4_dotproduct3_f(dm0_S
, dm0_S
);
430 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
431 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
435 gmx_simd4_float_t dx_0, dy_0, dz_0; \
436 gmx_simd4_float_t dx_1, dy_1, dz_1; \
438 gmx_simd4_float_t mx, my, mz; \
439 gmx_simd4_float_t m0x, m0y, m0z; \
441 gmx_simd4_float_t d2x, d2y, d2z; \
442 gmx_simd4_float_t d2s, d2t; \
444 shi = si*NNBSBB_D*DIM; \
446 xi_l = gmx_simd4_load_f(bb_i+shi+0*STRIDE_PBB); \
447 yi_l = gmx_simd4_load_f(bb_i+shi+1*STRIDE_PBB); \
448 zi_l = gmx_simd4_load_f(bb_i+shi+2*STRIDE_PBB); \
449 xi_h = gmx_simd4_load_f(bb_i+shi+3*STRIDE_PBB); \
450 yi_h = gmx_simd4_load_f(bb_i+shi+4*STRIDE_PBB); \
451 zi_h = gmx_simd4_load_f(bb_i+shi+5*STRIDE_PBB); \
453 dx_0 = gmx_simd4_sub_f(xi_l, xj_h); \
454 dy_0 = gmx_simd4_sub_f(yi_l, yj_h); \
455 dz_0 = gmx_simd4_sub_f(zi_l, zj_h); \
457 dx_1 = gmx_simd4_sub_f(xj_l, xi_h); \
458 dy_1 = gmx_simd4_sub_f(yj_l, yi_h); \
459 dz_1 = gmx_simd4_sub_f(zj_l, zi_h); \
461 mx = gmx_simd4_max_f(dx_0, dx_1); \
462 my = gmx_simd4_max_f(dy_0, dy_1); \
463 mz = gmx_simd4_max_f(dz_0, dz_1); \
465 m0x = gmx_simd4_max_f(mx, zero); \
466 m0y = gmx_simd4_max_f(my, zero); \
467 m0z = gmx_simd4_max_f(mz, zero); \
469 d2x = gmx_simd4_mul_f(m0x, m0x); \
470 d2y = gmx_simd4_mul_f(m0y, m0y); \
471 d2z = gmx_simd4_mul_f(m0z, m0z); \
473 d2s = gmx_simd4_add_f(d2x, d2y); \
474 d2t = gmx_simd4_add_f(d2s, d2z); \
476 gmx_simd4_store_f(d2+si, d2t); \
479 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
480 static void subc_bb_dist2_simd4_xxxx(const float *bb_j
,
481 int nsi
, const float *bb_i
,
484 gmx_simd4_float_t xj_l
, yj_l
, zj_l
;
485 gmx_simd4_float_t xj_h
, yj_h
, zj_h
;
486 gmx_simd4_float_t xi_l
, yi_l
, zi_l
;
487 gmx_simd4_float_t xi_h
, yi_h
, zi_h
;
489 gmx_simd4_float_t zero
;
491 zero
= gmx_simd4_setzero_f();
493 xj_l
= gmx_simd4_set1_f(bb_j
[0*STRIDE_PBB
]);
494 yj_l
= gmx_simd4_set1_f(bb_j
[1*STRIDE_PBB
]);
495 zj_l
= gmx_simd4_set1_f(bb_j
[2*STRIDE_PBB
]);
496 xj_h
= gmx_simd4_set1_f(bb_j
[3*STRIDE_PBB
]);
497 yj_h
= gmx_simd4_set1_f(bb_j
[4*STRIDE_PBB
]);
498 zj_h
= gmx_simd4_set1_f(bb_j
[5*STRIDE_PBB
]);
500 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
501 * But as we know the number of iterations is 1 or 2, we unroll manually.
503 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i
, d2
);
504 if (STRIDE_PBB
< nsi
)
506 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB
, bb_i
, d2
);
510 #endif /* NBNXN_SEARCH_BB_SIMD4 */
512 /* Plain C function which determines if any atom pair between two cells
513 * is within distance sqrt(rl2).
515 static gmx_bool
subc_in_range_x(int na_c
,
516 int si
, const real
*x_i
,
517 int csj
, int stride
, const real
*x_j
,
520 for (int i
= 0; i
< na_c
; i
++)
522 int i0
= (si
*na_c
+ i
)*DIM
;
523 for (int j
= 0; j
< na_c
; j
++)
525 int j0
= (csj
*na_c
+ j
)*stride
;
527 real d2
= sqr(x_i
[i0
] - x_j
[j0
]) + sqr(x_i
[i0
+1] - x_j
[j0
+1]) + sqr(x_i
[i0
+2] - x_j
[j0
+2]);
539 #ifdef NBNXN_SEARCH_SIMD4_FLOAT_X_BB
541 /* 4-wide SIMD function which determines if any atom pair between two cells,
542 * both with 8 atoms, is within distance sqrt(rl2).
543 * Using 8-wide AVX is not faster on Intel Sandy Bridge.
545 static gmx_bool
subc_in_range_simd4(int na_c
,
546 int si
, const real
*x_i
,
547 int csj
, int stride
, const real
*x_j
,
550 gmx_simd4_real_t ix_S0
, iy_S0
, iz_S0
;
551 gmx_simd4_real_t ix_S1
, iy_S1
, iz_S1
;
553 gmx_simd4_real_t rc2_S
;
558 rc2_S
= gmx_simd4_set1_r(rl2
);
560 dim_stride
= NBNXN_GPU_CLUSTER_SIZE
/STRIDE_PBB
*DIM
;
561 ix_S0
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+0)*STRIDE_PBB
);
562 iy_S0
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+1)*STRIDE_PBB
);
563 iz_S0
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+2)*STRIDE_PBB
);
564 ix_S1
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+3)*STRIDE_PBB
);
565 iy_S1
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+4)*STRIDE_PBB
);
566 iz_S1
= gmx_simd4_load_r(x_i
+(si
*dim_stride
+5)*STRIDE_PBB
);
568 /* We loop from the outer to the inner particles to maximize
569 * the chance that we find a pair in range quickly and return.
575 gmx_simd4_real_t jx0_S
, jy0_S
, jz0_S
;
576 gmx_simd4_real_t jx1_S
, jy1_S
, jz1_S
;
578 gmx_simd4_real_t dx_S0
, dy_S0
, dz_S0
;
579 gmx_simd4_real_t dx_S1
, dy_S1
, dz_S1
;
580 gmx_simd4_real_t dx_S2
, dy_S2
, dz_S2
;
581 gmx_simd4_real_t dx_S3
, dy_S3
, dz_S3
;
583 gmx_simd4_real_t rsq_S0
;
584 gmx_simd4_real_t rsq_S1
;
585 gmx_simd4_real_t rsq_S2
;
586 gmx_simd4_real_t rsq_S3
;
588 gmx_simd4_bool_t wco_S0
;
589 gmx_simd4_bool_t wco_S1
;
590 gmx_simd4_bool_t wco_S2
;
591 gmx_simd4_bool_t wco_S3
;
592 gmx_simd4_bool_t wco_any_S01
, wco_any_S23
, wco_any_S
;
594 jx0_S
= gmx_simd4_set1_r(x_j
[j0
*stride
+0]);
595 jy0_S
= gmx_simd4_set1_r(x_j
[j0
*stride
+1]);
596 jz0_S
= gmx_simd4_set1_r(x_j
[j0
*stride
+2]);
598 jx1_S
= gmx_simd4_set1_r(x_j
[j1
*stride
+0]);
599 jy1_S
= gmx_simd4_set1_r(x_j
[j1
*stride
+1]);
600 jz1_S
= gmx_simd4_set1_r(x_j
[j1
*stride
+2]);
602 /* Calculate distance */
603 dx_S0
= gmx_simd4_sub_r(ix_S0
, jx0_S
);
604 dy_S0
= gmx_simd4_sub_r(iy_S0
, jy0_S
);
605 dz_S0
= gmx_simd4_sub_r(iz_S0
, jz0_S
);
606 dx_S1
= gmx_simd4_sub_r(ix_S1
, jx0_S
);
607 dy_S1
= gmx_simd4_sub_r(iy_S1
, jy0_S
);
608 dz_S1
= gmx_simd4_sub_r(iz_S1
, jz0_S
);
609 dx_S2
= gmx_simd4_sub_r(ix_S0
, jx1_S
);
610 dy_S2
= gmx_simd4_sub_r(iy_S0
, jy1_S
);
611 dz_S2
= gmx_simd4_sub_r(iz_S0
, jz1_S
);
612 dx_S3
= gmx_simd4_sub_r(ix_S1
, jx1_S
);
613 dy_S3
= gmx_simd4_sub_r(iy_S1
, jy1_S
);
614 dz_S3
= gmx_simd4_sub_r(iz_S1
, jz1_S
);
616 /* rsq = dx*dx+dy*dy+dz*dz */
617 rsq_S0
= gmx_simd4_calc_rsq_r(dx_S0
, dy_S0
, dz_S0
);
618 rsq_S1
= gmx_simd4_calc_rsq_r(dx_S1
, dy_S1
, dz_S1
);
619 rsq_S2
= gmx_simd4_calc_rsq_r(dx_S2
, dy_S2
, dz_S2
);
620 rsq_S3
= gmx_simd4_calc_rsq_r(dx_S3
, dy_S3
, dz_S3
);
622 wco_S0
= gmx_simd4_cmplt_r(rsq_S0
, rc2_S
);
623 wco_S1
= gmx_simd4_cmplt_r(rsq_S1
, rc2_S
);
624 wco_S2
= gmx_simd4_cmplt_r(rsq_S2
, rc2_S
);
625 wco_S3
= gmx_simd4_cmplt_r(rsq_S3
, rc2_S
);
627 wco_any_S01
= gmx_simd4_or_b(wco_S0
, wco_S1
);
628 wco_any_S23
= gmx_simd4_or_b(wco_S2
, wco_S3
);
629 wco_any_S
= gmx_simd4_or_b(wco_any_S01
, wco_any_S23
);
631 if (gmx_simd4_anytrue_b(wco_any_S
))
645 /* Returns the j sub-cell for index cj_ind */
646 static int nbl_cj(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
648 return nbl
->cj4
[cj_ind
>> NBNXN_GPU_JGROUP_SIZE_2LOG
].cj
[cj_ind
& (NBNXN_GPU_JGROUP_SIZE
- 1)];
651 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
652 static unsigned int nbl_imask0(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
654 return nbl
->cj4
[cj_ind
>> NBNXN_GPU_JGROUP_SIZE_2LOG
].imei
[0].imask
;
657 /* Ensures there is enough space for extra extra exclusion masks */
658 static void check_excl_space(nbnxn_pairlist_t
*nbl
, int extra
)
660 if (nbl
->nexcl
+extra
> nbl
->excl_nalloc
)
662 nbl
->excl_nalloc
= over_alloc_small(nbl
->nexcl
+extra
);
663 nbnxn_realloc_void((void **)&nbl
->excl
,
664 nbl
->nexcl
*sizeof(*nbl
->excl
),
665 nbl
->excl_nalloc
*sizeof(*nbl
->excl
),
666 nbl
->alloc
, nbl
->free
);
670 /* Ensures there is enough space for ncell extra j-cells in the list */
671 static void check_subcell_list_space_simple(nbnxn_pairlist_t
*nbl
,
676 cj_max
= nbl
->ncj
+ ncell
;
678 if (cj_max
> nbl
->cj_nalloc
)
680 nbl
->cj_nalloc
= over_alloc_small(cj_max
);
681 nbnxn_realloc_void((void **)&nbl
->cj
,
682 nbl
->ncj
*sizeof(*nbl
->cj
),
683 nbl
->cj_nalloc
*sizeof(*nbl
->cj
),
684 nbl
->alloc
, nbl
->free
);
688 /* Ensures there is enough space for ncell extra j-subcells in the list */
689 static void check_subcell_list_space_supersub(nbnxn_pairlist_t
*nbl
,
697 /* We can have maximally nsupercell*GPU_NSUBCELL sj lists */
698 /* We can store 4 j-subcell - i-supercell pairs in one struct.
699 * since we round down, we need one extra entry.
701 ncj4_max
= ((nbl
->work
->cj_ind
+ nsupercell
*GPU_NSUBCELL
+ NBNXN_GPU_JGROUP_SIZE
- 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG
);
703 if (ncj4_max
> nbl
->cj4_nalloc
)
705 nbl
->cj4_nalloc
= over_alloc_small(ncj4_max
);
706 nbnxn_realloc_void((void **)&nbl
->cj4
,
707 nbl
->work
->cj4_init
*sizeof(*nbl
->cj4
),
708 nbl
->cj4_nalloc
*sizeof(*nbl
->cj4
),
709 nbl
->alloc
, nbl
->free
);
712 if (ncj4_max
> nbl
->work
->cj4_init
)
714 for (int j4
= nbl
->work
->cj4_init
; j4
< ncj4_max
; j4
++)
716 /* No i-subcells and no excl's in the list initially */
717 for (w
= 0; w
< NWARP
; w
++)
719 nbl
->cj4
[j4
].imei
[w
].imask
= 0U;
720 nbl
->cj4
[j4
].imei
[w
].excl_ind
= 0;
724 nbl
->work
->cj4_init
= ncj4_max
;
728 /* Set all excl masks for one GPU warp no exclusions */
729 static void set_no_excls(nbnxn_excl_t
*excl
)
731 for (int t
= 0; t
< WARP_SIZE
; t
++)
733 /* Turn all interaction bits on */
734 excl
->pair
[t
] = NBNXN_INTERACTION_MASK_ALL
;
738 /* Initializes a single nbnxn_pairlist_t data structure */
739 static void nbnxn_init_pairlist(nbnxn_pairlist_t
*nbl
,
741 nbnxn_alloc_t
*alloc
,
746 nbl
->alloc
= nbnxn_alloc_aligned
;
754 nbl
->free
= nbnxn_free_aligned
;
761 nbl
->bSimple
= bSimple
;
772 /* We need one element extra in sj, so alloc initially with 1 */
780 nbl
->excl_nalloc
= 0;
782 check_excl_space(nbl
, 1);
784 set_no_excls(&nbl
->excl
[0]);
790 snew_aligned(nbl
->work
->bb_ci
, 1, NBNXN_SEARCH_BB_MEM_ALIGN
);
795 snew_aligned(nbl
->work
->pbb_ci
, GPU_NSUBCELL
/STRIDE_PBB
*NNBSBB_XXXX
, NBNXN_SEARCH_BB_MEM_ALIGN
);
797 snew_aligned(nbl
->work
->bb_ci
, GPU_NSUBCELL
, NBNXN_SEARCH_BB_MEM_ALIGN
);
800 snew_aligned(nbl
->work
->x_ci
, NBNXN_NA_SC_MAX
*DIM
, NBNXN_SEARCH_BB_MEM_ALIGN
);
801 #ifdef GMX_NBNXN_SIMD
802 snew_aligned(nbl
->work
->x_ci_simd_4xn
, 1, NBNXN_MEM_ALIGN
);
803 snew_aligned(nbl
->work
->x_ci_simd_2xnn
, 1, NBNXN_MEM_ALIGN
);
805 snew_aligned(nbl
->work
->d2
, GPU_NSUBCELL
, NBNXN_SEARCH_BB_MEM_ALIGN
);
807 nbl
->work
->sort
= NULL
;
808 nbl
->work
->sort_nalloc
= 0;
809 nbl
->work
->sci_sort
= NULL
;
810 nbl
->work
->sci_sort_nalloc
= 0;
813 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t
*nbl_list
,
814 gmx_bool bSimple
, gmx_bool bCombined
,
815 nbnxn_alloc_t
*alloc
,
818 nbl_list
->bSimple
= bSimple
;
819 nbl_list
->bCombined
= bCombined
;
821 nbl_list
->nnbl
= gmx_omp_nthreads_get(emntNonbonded
);
823 if (!nbl_list
->bCombined
&&
824 nbl_list
->nnbl
> NBNXN_BUFFERFLAG_MAX_THREADS
)
826 gmx_fatal(FARGS
, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
827 nbl_list
->nnbl
, NBNXN_BUFFERFLAG_MAX_THREADS
, NBNXN_BUFFERFLAG_MAX_THREADS
);
830 snew(nbl_list
->nbl
, nbl_list
->nnbl
);
831 snew(nbl_list
->nbl_fep
, nbl_list
->nnbl
);
832 /* Execute in order to avoid memory interleaving between threads */
833 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
834 for (int i
= 0; i
< nbl_list
->nnbl
; i
++)
836 /* Allocate the nblist data structure locally on each thread
837 * to optimize memory access for NUMA architectures.
839 snew(nbl_list
->nbl
[i
], 1);
841 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
844 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, alloc
, free
);
848 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, NULL
, NULL
);
851 snew(nbl_list
->nbl_fep
[i
], 1);
852 nbnxn_init_pairlist_fep(nbl_list
->nbl_fep
[i
]);
856 /* Print statistics of a pair list, used for debug output */
857 static void print_nblist_statistics_simple(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
858 const nbnxn_search_t nbs
, real rl
)
860 const nbnxn_grid_t
*grid
;
864 /* This code only produces correct statistics with domain decomposition */
865 grid
= &nbs
->grid
[0];
867 fprintf(fp
, "nbl nci %d ncj %d\n",
869 fprintf(fp
, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
870 nbl
->na_sc
, rl
, nbl
->ncj
, nbl
->ncj
/(double)grid
->nc
,
871 nbl
->ncj
/(double)grid
->nc
*grid
->na_sc
,
872 nbl
->ncj
/(double)grid
->nc
*grid
->na_sc
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nc
*grid
->na_sc
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
874 fprintf(fp
, "nbl average j cell list length %.1f\n",
875 0.25*nbl
->ncj
/(double)std::max(nbl
->nci
, 1));
877 for (int s
= 0; s
< SHIFTS
; s
++)
882 for (int i
= 0; i
< nbl
->nci
; i
++)
884 cs
[nbl
->ci
[i
].shift
& NBNXN_CI_SHIFT
] +=
885 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
;
887 int j
= nbl
->ci
[i
].cj_ind_start
;
888 while (j
< nbl
->ci
[i
].cj_ind_end
&&
889 nbl
->cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
895 fprintf(fp
, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
896 nbl
->ncj
, npexcl
, 100*npexcl
/(double)std::max(nbl
->ncj
, 1));
897 for (int s
= 0; s
< SHIFTS
; s
++)
901 fprintf(fp
, "nbl shift %2d ncj %3d\n", s
, cs
[s
]);
906 /* Print statistics of a pair lists, used for debug output */
907 static void print_nblist_statistics_supersub(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
908 const nbnxn_search_t nbs
, real rl
)
910 const nbnxn_grid_t
*grid
;
912 int c
[GPU_NSUBCELL
+1];
913 double sum_nsp
, sum_nsp2
;
916 /* This code only produces correct statistics with domain decomposition */
917 grid
= &nbs
->grid
[0];
919 fprintf(fp
, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
920 nbl
->nsci
, nbl
->ncj4
, nbl
->nci_tot
, nbl
->nexcl
);
921 fprintf(fp
, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
922 nbl
->na_ci
, rl
, nbl
->nci_tot
, nbl
->nci_tot
/(double)grid
->nsubc_tot
,
923 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
,
924 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nsubc_tot
*grid
->na_c
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
929 for (int si
= 0; si
<= GPU_NSUBCELL
; si
++)
933 for (int i
= 0; i
< nbl
->nsci
; i
++)
938 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
940 for (int j
= 0; j
< NBNXN_GPU_JGROUP_SIZE
; j
++)
943 for (int si
= 0; si
< GPU_NSUBCELL
; si
++)
945 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*GPU_NSUBCELL
+ si
)))
956 nsp_max
= std::max(nsp_max
, nsp
);
960 sum_nsp
/= nbl
->nsci
;
961 sum_nsp2
/= nbl
->nsci
;
963 fprintf(fp
, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
964 sum_nsp
, std::sqrt(sum_nsp2
- sum_nsp
*sum_nsp
), nsp_max
);
968 for (b
= 0; b
<= GPU_NSUBCELL
; b
++)
970 fprintf(fp
, "nbl j-list #i-subcell %d %7d %4.1f\n",
972 100.0*c
[b
]/(double)(nbl
->ncj4
*NBNXN_GPU_JGROUP_SIZE
));
977 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
978 static void low_get_nbl_exclusions(nbnxn_pairlist_t
*nbl
, int cj4
,
979 int warp
, nbnxn_excl_t
**excl
)
981 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
983 /* No exclusions set, make a new list entry */
984 nbl
->cj4
[cj4
].imei
[warp
].excl_ind
= nbl
->nexcl
;
986 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
991 /* We already have some exclusions, new ones can be added to the list */
992 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
996 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
997 * generates a new element and allocates extra memory, if necessary.
999 static void get_nbl_exclusions_1(nbnxn_pairlist_t
*nbl
, int cj4
,
1000 int warp
, nbnxn_excl_t
**excl
)
1002 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1004 /* We need to make a new list entry, check if we have space */
1005 check_excl_space(nbl
, 1);
1007 low_get_nbl_exclusions(nbl
, cj4
, warp
, excl
);
1010 /* Returns pointers to the exclusion mask for cj4-unit cj4 for both warps,
1011 * generates a new element and allocates extra memory, if necessary.
1013 static void get_nbl_exclusions_2(nbnxn_pairlist_t
*nbl
, int cj4
,
1014 nbnxn_excl_t
**excl_w0
,
1015 nbnxn_excl_t
**excl_w1
)
1017 /* Check for space we might need */
1018 check_excl_space(nbl
, 2);
1020 low_get_nbl_exclusions(nbl
, cj4
, 0, excl_w0
);
1021 low_get_nbl_exclusions(nbl
, cj4
, 1, excl_w1
);
1024 /* Sets the self exclusions i=j and pair exclusions i>j */
1025 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t
*nbl
,
1026 int cj4_ind
, int sj_offset
,
1029 nbnxn_excl_t
*excl
[2];
1031 /* Here we only set the set self and double pair exclusions */
1033 get_nbl_exclusions_2(nbl
, cj4_ind
, &excl
[0], &excl
[1]);
1035 /* Only minor < major bits set */
1036 for (int ej
= 0; ej
< nbl
->na_ci
; ej
++)
1039 for (int ei
= ej
; ei
< nbl
->na_ci
; ei
++)
1041 excl
[w
]->pair
[(ej
& (NBNXN_GPU_JGROUP_SIZE
-1))*nbl
->na_ci
+ ei
] &=
1042 ~(1U << (sj_offset
*GPU_NSUBCELL
+ si
));
1047 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1048 static unsigned int get_imask(gmx_bool rdiag
, int ci
, int cj
)
1050 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1053 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1054 static unsigned int get_imask_simd_j2(gmx_bool rdiag
, int ci
, int cj
)
1056 return (rdiag
&& ci
*2 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_0
:
1057 (rdiag
&& ci
*2+1 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_1
:
1058 NBNXN_INTERACTION_MASK_ALL
));
1061 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1062 static unsigned int get_imask_simd_j4(gmx_bool rdiag
, int ci
, int cj
)
1064 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1067 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1068 static unsigned int get_imask_simd_j8(gmx_bool rdiag
, int ci
, int cj
)
1070 return (rdiag
&& ci
== cj
*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
:
1071 (rdiag
&& ci
== cj
*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
:
1072 NBNXN_INTERACTION_MASK_ALL
));
1075 #ifdef GMX_NBNXN_SIMD
1076 #if GMX_SIMD_REAL_WIDTH == 2
1077 #define get_imask_simd_4xn get_imask_simd_j2
1079 #if GMX_SIMD_REAL_WIDTH == 4
1080 #define get_imask_simd_4xn get_imask_simd_j4
1082 #if GMX_SIMD_REAL_WIDTH == 8
1083 #define get_imask_simd_4xn get_imask_simd_j8
1084 #define get_imask_simd_2xnn get_imask_simd_j4
1086 #if GMX_SIMD_REAL_WIDTH == 16
1087 #define get_imask_simd_2xnn get_imask_simd_j8
1091 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
1092 * Checks bounding box distances and possibly atom pair distances.
1094 static void make_cluster_list_simple(const nbnxn_grid_t
*gridj
,
1095 nbnxn_pairlist_t
*nbl
,
1096 int ci
, int cjf
, int cjl
,
1097 gmx_bool remove_sub_diag
,
1099 real rl2
, float rbb2
,
1102 const nbnxn_bb_t
*bb_ci
;
1109 bb_ci
= nbl
->work
->bb_ci
;
1110 x_ci
= nbl
->work
->x_ci
;
1113 while (!InRange
&& cjf
<= cjl
)
1115 d2
= subc_bb_dist2(0, bb_ci
, cjf
, gridj
->bb
);
1118 /* Check if the distance is within the distance where
1119 * we use only the bounding box distance rbb,
1120 * or within the cut-off and there is at least one atom pair
1121 * within the cut-off.
1129 cjf_gl
= gridj
->cell0
+ cjf
;
1130 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1132 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1134 InRange
= InRange
||
1135 (sqr(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1136 sqr(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1137 sqr(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1140 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1153 while (!InRange
&& cjl
> cjf
)
1155 d2
= subc_bb_dist2(0, bb_ci
, cjl
, gridj
->bb
);
1158 /* Check if the distance is within the distance where
1159 * we use only the bounding box distance rbb,
1160 * or within the cut-off and there is at least one atom pair
1161 * within the cut-off.
1169 cjl_gl
= gridj
->cell0
+ cjl
;
1170 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1172 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1174 InRange
= InRange
||
1175 (sqr(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1176 sqr(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1177 sqr(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1180 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1190 for (int cj
= cjf
; cj
<= cjl
; cj
++)
1192 /* Store cj and the interaction mask */
1193 nbl
->cj
[nbl
->ncj
].cj
= gridj
->cell0
+ cj
;
1194 nbl
->cj
[nbl
->ncj
].excl
= get_imask(remove_sub_diag
, ci
, cj
);
1197 /* Increase the closing index in i super-cell list */
1198 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
1202 #ifdef GMX_NBNXN_SIMD_4XN
1203 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1205 #ifdef GMX_NBNXN_SIMD_2XNN
1206 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1209 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1210 * Checks bounding box distances and possibly atom pair distances.
1212 static void make_cluster_list_supersub(const nbnxn_grid_t
*gridi
,
1213 const nbnxn_grid_t
*gridj
,
1214 nbnxn_pairlist_t
*nbl
,
1216 gmx_bool sci_equals_scj
,
1217 int stride
, const real
*x
,
1218 real rl2
, float rbb2
,
1224 int cj4_ind
, cj_offset
;
1228 const float *pbb_ci
;
1230 const nbnxn_bb_t
*bb_ci
;
1235 #define PRUNE_LIST_CPU_ONE
1236 #ifdef PRUNE_LIST_CPU_ONE
1240 d2l
= nbl
->work
->d2
;
1243 pbb_ci
= nbl
->work
->pbb_ci
;
1245 bb_ci
= nbl
->work
->bb_ci
;
1247 x_ci
= nbl
->work
->x_ci
;
1251 for (int cjo
= 0; cjo
< gridj
->nsubc
[scj
]; cjo
++)
1253 cj4_ind
= (nbl
->work
->cj_ind
>> NBNXN_GPU_JGROUP_SIZE_2LOG
);
1254 cj_offset
= nbl
->work
->cj_ind
- cj4_ind
*NBNXN_GPU_JGROUP_SIZE
;
1255 cj4
= &nbl
->cj4
[cj4_ind
];
1257 cj
= scj
*GPU_NSUBCELL
+ cjo
;
1259 cj_gl
= gridj
->cell0
*GPU_NSUBCELL
+ cj
;
1261 /* Initialize this j-subcell i-subcell list */
1262 cj4
->cj
[cj_offset
] = cj_gl
;
1271 ci1
= gridi
->nsubc
[sci
];
1275 /* Determine all ci1 bb distances in one call with SIMD4 */
1276 subc_bb_dist2_simd4_xxxx(gridj
->pbb
+(cj
>>STRIDE_PBB_2LOG
)*NNBSBB_XXXX
+(cj
& (STRIDE_PBB
-1)),
1282 /* We use a fixed upper-bound instead of ci1 to help optimization */
1283 for (int ci
= 0; ci
< GPU_NSUBCELL
; ci
++)
1290 #ifndef NBNXN_BBXXXX
1291 /* Determine the bb distance between ci and cj */
1292 d2l
[ci
] = subc_bb_dist2(ci
, bb_ci
, cj
, gridj
->bb
);
1297 #ifdef PRUNE_LIST_CPU_ALL
1298 /* Check if the distance is within the distance where
1299 * we use only the bounding box distance rbb,
1300 * or within the cut-off and there is at least one atom pair
1301 * within the cut-off. This check is very costly.
1303 *ndistc
+= na_c
*na_c
;
1306 #ifdef NBNXN_PBB_SIMD4
1311 (na_c
, ci
, x_ci
, cj_gl
, stride
, x
, rl2
)))
1313 /* Check if the distance between the two bounding boxes
1314 * in within the pair-list cut-off.
1319 /* Flag this i-subcell to be taken into account */
1320 imask
|= (1U << (cj_offset
*GPU_NSUBCELL
+ci
));
1322 #ifdef PRUNE_LIST_CPU_ONE
1330 #ifdef PRUNE_LIST_CPU_ONE
1331 /* If we only found 1 pair, check if any atoms are actually
1332 * within the cut-off, so we could get rid of it.
1334 if (npair
== 1 && d2l
[ci_last
] >= rbb2
)
1336 /* Avoid using function pointers here, as it's slower */
1338 #ifdef NBNXN_PBB_SIMD4
1339 !subc_in_range_simd4
1343 (na_c
, ci_last
, x_ci
, cj_gl
, stride
, x
, rl2
))
1345 imask
&= ~(1U << (cj_offset
*GPU_NSUBCELL
+ci_last
));
1353 /* We have a useful sj entry, close it now */
1355 /* Set the exclucions for the ci== sj entry.
1356 * Here we don't bother to check if this entry is actually flagged,
1357 * as it will nearly always be in the list.
1361 set_self_and_newton_excls_supersub(nbl
, cj4_ind
, cj_offset
, cjo
);
1364 /* Copy the cluster interaction mask to the list */
1365 for (w
= 0; w
< NWARP
; w
++)
1367 cj4
->imei
[w
].imask
|= imask
;
1370 nbl
->work
->cj_ind
++;
1372 /* Keep the count */
1373 nbl
->nci_tot
+= npair
;
1375 /* Increase the closing index in i super-cell list */
1376 nbl
->sci
[nbl
->nsci
].cj4_ind_end
=
1377 ((nbl
->work
->cj_ind
+NBNXN_GPU_JGROUP_SIZE
-1) >> NBNXN_GPU_JGROUP_SIZE_2LOG
);
1382 /* Set all atom-pair exclusions from the topology stored in excl
1383 * as masks in the pair-list for simple list i-entry nbl_ci
1385 static void set_ci_top_excls(const nbnxn_search_t nbs
,
1386 nbnxn_pairlist_t
*nbl
,
1387 gmx_bool diagRemoved
,
1390 const nbnxn_ci_t
*nbl_ci
,
1391 const t_blocka
*excl
)
1395 int cj_ind_first
, cj_ind_last
;
1396 int cj_first
, cj_last
;
1398 int ai
, aj
, si
, ge
, se
;
1399 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
1401 int inner_i
, inner_e
;
1405 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1413 cj_ind_first
= nbl_ci
->cj_ind_start
;
1414 cj_ind_last
= nbl
->ncj
- 1;
1416 cj_first
= nbl
->cj
[cj_ind_first
].cj
;
1417 cj_last
= nbl
->cj
[cj_ind_last
].cj
;
1419 /* Determine how many contiguous j-cells we have starting
1420 * from the first i-cell. This number can be used to directly
1421 * calculate j-cell indices for excluded atoms.
1424 if (na_ci_2log
== na_cj_2log
)
1426 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1427 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci
+ ndirect
)
1432 #ifdef NBNXN_SEARCH_BB_SIMD4
1435 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1436 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci_to_cj(na_cj_2log
, ci
) + ndirect
)
1443 /* Loop over the atoms in the i super-cell */
1444 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1446 ai
= nbs
->a
[ci
*nbl
->na_sc
+i
];
1449 si
= (i
>>na_ci_2log
);
1451 /* Loop over the topology-based exclusions for this i-atom */
1452 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
1458 /* The self exclusion are already set, save some time */
1464 /* Without shifts we only calculate interactions j>i
1465 * for one-way pair-lists.
1467 if (diagRemoved
&& ge
<= ci
*nbl
->na_sc
+ i
)
1472 se
= (ge
>> na_cj_2log
);
1474 /* Could the cluster se be in our list? */
1475 if (se
>= cj_first
&& se
<= cj_last
)
1477 if (se
< cj_first
+ ndirect
)
1479 /* We can calculate cj_ind directly from se */
1480 found
= cj_ind_first
+ se
- cj_first
;
1484 /* Search for se using bisection */
1486 cj_ind_0
= cj_ind_first
+ ndirect
;
1487 cj_ind_1
= cj_ind_last
+ 1;
1488 while (found
== -1 && cj_ind_0
< cj_ind_1
)
1490 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
1492 cj_m
= nbl
->cj
[cj_ind_m
].cj
;
1500 cj_ind_1
= cj_ind_m
;
1504 cj_ind_0
= cj_ind_m
+ 1;
1511 inner_i
= i
- (si
<< na_ci_2log
);
1512 inner_e
= ge
- (se
<< na_cj_2log
);
1514 nbl
->cj
[found
].excl
&= ~(1U<<((inner_i
<<na_cj_2log
) + inner_e
));
1522 /* Add a new i-entry to the FEP list and copy the i-properties */
1523 static gmx_inline
void fep_list_new_nri_copy(t_nblist
*nlist
)
1525 /* Add a new i-entry */
1528 assert(nlist
->nri
< nlist
->maxnri
);
1530 /* Duplicate the last i-entry, except for jindex, which continues */
1531 nlist
->iinr
[nlist
->nri
] = nlist
->iinr
[nlist
->nri
-1];
1532 nlist
->shift
[nlist
->nri
] = nlist
->shift
[nlist
->nri
-1];
1533 nlist
->gid
[nlist
->nri
] = nlist
->gid
[nlist
->nri
-1];
1534 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1537 /* For load balancing of the free-energy lists over threads, we set
1538 * the maximum nrj size of an i-entry to 40. This leads to good
1539 * load balancing in the worst case scenario of a single perturbed
1540 * particle on 16 threads, while not introducing significant overhead.
1541 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1542 * since non perturbed i-particles will see few perturbed j-particles).
1544 const int max_nrj_fep
= 40;
1546 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1547 * singularities for overlapping particles (0/0), since the charges and
1548 * LJ parameters have been zeroed in the nbnxn data structure.
1549 * Simultaneously make a group pair list for the perturbed pairs.
1551 static void make_fep_list(const nbnxn_search_t nbs
,
1552 const nbnxn_atomdata_t
*nbat
,
1553 nbnxn_pairlist_t
*nbl
,
1554 gmx_bool bDiagRemoved
,
1556 const nbnxn_grid_t
*gridi
,
1557 const nbnxn_grid_t
*gridj
,
1560 int ci
, cj_ind_start
, cj_ind_end
, cja
, cjr
;
1562 int ngid
, gid_i
= 0, gid_j
, gid
;
1563 int egp_shift
, egp_mask
;
1565 int ind_i
, ind_j
, ai
, aj
;
1567 gmx_bool bFEP_i
, bFEP_i_all
;
1569 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1577 cj_ind_start
= nbl_ci
->cj_ind_start
;
1578 cj_ind_end
= nbl_ci
->cj_ind_end
;
1580 /* In worst case we have alternating energy groups
1581 * and create #atom-pair lists, which means we need the size
1582 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1584 nri_max
= nbl
->na_ci
*nbl
->na_cj
*(cj_ind_end
- cj_ind_start
);
1585 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1587 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1588 reallocate_nblist(nlist
);
1591 ngid
= nbat
->nenergrp
;
1593 if (static_cast<std::size_t>(ngid
*gridj
->na_cj
) > sizeof(gid_cj
)*8)
1595 gmx_fatal(FARGS
, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1596 gridi
->na_c
, gridj
->na_cj
, (sizeof(gid_cj
)*8)/gridj
->na_cj
);
1599 egp_shift
= nbat
->neg_2log
;
1600 egp_mask
= (1<<nbat
->neg_2log
) - 1;
1602 /* Loop over the atoms in the i sub-cell */
1604 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1606 ind_i
= ci
*nbl
->na_ci
+ i
;
1611 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1612 nlist
->iinr
[nri
] = ai
;
1613 /* The actual energy group pair index is set later */
1614 nlist
->gid
[nri
] = 0;
1615 nlist
->shift
[nri
] = nbl_ci
->shift
& NBNXN_CI_SHIFT
;
1617 bFEP_i
= gridi
->fep
[ci
- gridi
->cell0
] & (1 << i
);
1619 bFEP_i_all
= bFEP_i_all
&& bFEP_i
;
1621 if ((nlist
->nrj
+ cj_ind_end
- cj_ind_start
)*nbl
->na_cj
> nlist
->maxnrj
)
1623 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj_ind_end
- cj_ind_start
)*nbl
->na_cj
);
1624 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1625 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1630 gid_i
= (nbat
->energrp
[ci
] >> (egp_shift
*i
)) & egp_mask
;
1633 for (int cj_ind
= cj_ind_start
; cj_ind
< cj_ind_end
; cj_ind
++)
1635 unsigned int fep_cj
;
1637 cja
= nbl
->cj
[cj_ind
].cj
;
1639 if (gridj
->na_cj
== gridj
->na_c
)
1641 cjr
= cja
- gridj
->cell0
;
1642 fep_cj
= gridj
->fep
[cjr
];
1645 gid_cj
= nbat
->energrp
[cja
];
1648 else if (2*gridj
->na_cj
== gridj
->na_c
)
1650 cjr
= cja
- gridj
->cell0
*2;
1651 /* Extract half of the ci fep/energrp mask */
1652 fep_cj
= (gridj
->fep
[cjr
>>1] >> ((cjr
&1)*gridj
->na_cj
)) & ((1<<gridj
->na_cj
) - 1);
1655 gid_cj
= nbat
->energrp
[cja
>>1] >> ((cja
&1)*gridj
->na_cj
*egp_shift
) & ((1<<(gridj
->na_cj
*egp_shift
)) - 1);
1660 cjr
= cja
- (gridj
->cell0
>>1);
1661 /* Combine two ci fep masks/energrp */
1662 fep_cj
= gridj
->fep
[cjr
*2] + (gridj
->fep
[cjr
*2+1] << gridj
->na_c
);
1665 gid_cj
= nbat
->energrp
[cja
*2] + (nbat
->energrp
[cja
*2+1] << (gridj
->na_c
*egp_shift
));
1669 if (bFEP_i
|| fep_cj
!= 0)
1671 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1673 /* Is this interaction perturbed and not excluded? */
1674 ind_j
= cja
*nbl
->na_cj
+ j
;
1677 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1678 (!bDiagRemoved
|| ind_j
>= ind_i
))
1682 gid_j
= (gid_cj
>> (j
*egp_shift
)) & egp_mask
;
1683 gid
= GID(gid_i
, gid_j
, ngid
);
1685 if (nlist
->nrj
> nlist
->jindex
[nri
] &&
1686 nlist
->gid
[nri
] != gid
)
1688 /* Energy group pair changed: new list */
1689 fep_list_new_nri_copy(nlist
);
1692 nlist
->gid
[nri
] = gid
;
1695 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1697 fep_list_new_nri_copy(nlist
);
1701 /* Add it to the FEP list */
1702 nlist
->jjnr
[nlist
->nrj
] = aj
;
1703 nlist
->excl_fep
[nlist
->nrj
] = (nbl
->cj
[cj_ind
].excl
>> (i
*nbl
->na_cj
+ j
)) & 1;
1706 /* Exclude it from the normal list.
1707 * Note that the charge has been set to zero,
1708 * but we need to avoid 0/0, as perturbed atoms
1709 * can be on top of each other.
1711 nbl
->cj
[cj_ind
].excl
&= ~(1U << (i
*nbl
->na_cj
+ j
));
1717 if (nlist
->nrj
> nlist
->jindex
[nri
])
1719 /* Actually add this new, non-empty, list */
1721 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1728 /* All interactions are perturbed, we can skip this entry */
1729 nbl_ci
->cj_ind_end
= cj_ind_start
;
1733 /* Return the index of atom a within a cluster */
1734 static gmx_inline
int cj_mod_cj4(int cj
)
1736 return cj
& (NBNXN_GPU_JGROUP_SIZE
- 1);
1739 /* Convert a j-cluster to a cj4 group */
1740 static gmx_inline
int cj_to_cj4(int cj
)
1742 return cj
>> NBNXN_GPU_JGROUP_SIZE_2LOG
;
1745 /* Return the index of an j-atom within a warp */
1746 static gmx_inline
int a_mod_wj(int a
)
1748 return a
& (NBNXN_GPU_CLUSTER_SIZE
/2 - 1);
1751 /* As make_fep_list above, but for super/sub lists. */
1752 static void make_fep_list_supersub(const nbnxn_search_t nbs
,
1753 const nbnxn_atomdata_t
*nbat
,
1754 nbnxn_pairlist_t
*nbl
,
1755 gmx_bool bDiagRemoved
,
1756 const nbnxn_sci_t
*nbl_sci
,
1761 const nbnxn_grid_t
*gridi
,
1762 const nbnxn_grid_t
*gridj
,
1765 int sci
, cj4_ind_start
, cj4_ind_end
, cjr
;
1768 int ind_i
, ind_j
, ai
, aj
;
1772 const nbnxn_cj4_t
*cj4
;
1774 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1782 cj4_ind_start
= nbl_sci
->cj4_ind_start
;
1783 cj4_ind_end
= nbl_sci
->cj4_ind_end
;
1785 /* Here we process one super-cell, max #atoms na_sc, versus a list
1786 * cj4 entries, each with max NBNXN_GPU_JGROUP_SIZE cj's, each
1787 * of size na_cj atoms.
1788 * On the GPU we don't support energy groups (yet).
1789 * So for each of the na_sc i-atoms, we need max one FEP list
1790 * for each max_nrj_fep j-atoms.
1792 nri_max
= nbl
->na_sc
*nbl
->na_cj
*(1 + ((cj4_ind_end
- cj4_ind_start
)*NBNXN_GPU_JGROUP_SIZE
)/max_nrj_fep
);
1793 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1795 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1796 reallocate_nblist(nlist
);
1799 /* Loop over the atoms in the i super-cluster */
1800 for (int c
= 0; c
< GPU_NSUBCELL
; c
++)
1802 c_abs
= sci
*GPU_NSUBCELL
+ c
;
1804 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1806 ind_i
= c_abs
*nbl
->na_ci
+ i
;
1811 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1812 nlist
->iinr
[nri
] = ai
;
1813 /* With GPUs, energy groups are not supported */
1814 nlist
->gid
[nri
] = 0;
1815 nlist
->shift
[nri
] = nbl_sci
->shift
& NBNXN_CI_SHIFT
;
1817 bFEP_i
= (gridi
->fep
[c_abs
- gridi
->cell0
*GPU_NSUBCELL
] & (1 << i
));
1819 xi
= nbat
->x
[ind_i
*nbat
->xstride
+XX
] + shx
;
1820 yi
= nbat
->x
[ind_i
*nbat
->xstride
+YY
] + shy
;
1821 zi
= nbat
->x
[ind_i
*nbat
->xstride
+ZZ
] + shz
;
1823 if ((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*NBNXN_GPU_JGROUP_SIZE
*nbl
->na_cj
> nlist
->maxnrj
)
1825 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*NBNXN_GPU_JGROUP_SIZE
*nbl
->na_cj
);
1826 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1827 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1830 for (int cj4_ind
= cj4_ind_start
; cj4_ind
< cj4_ind_end
; cj4_ind
++)
1832 cj4
= &nbl
->cj4
[cj4_ind
];
1834 for (int gcj
= 0; gcj
< NBNXN_GPU_JGROUP_SIZE
; gcj
++)
1836 unsigned int fep_cj
;
1838 if ((cj4
->imei
[0].imask
& (1U << (gcj
*GPU_NSUBCELL
+ c
))) == 0)
1840 /* Skip this ci for this cj */
1844 cjr
= cj4
->cj
[gcj
] - gridj
->cell0
*GPU_NSUBCELL
;
1846 fep_cj
= gridj
->fep
[cjr
];
1848 if (bFEP_i
|| fep_cj
!= 0)
1850 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1852 /* Is this interaction perturbed and not excluded? */
1853 ind_j
= (gridj
->cell0
*GPU_NSUBCELL
+ cjr
)*nbl
->na_cj
+ j
;
1856 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1857 (!bDiagRemoved
|| ind_j
>= ind_i
))
1861 unsigned int excl_bit
;
1864 get_nbl_exclusions_1(nbl
, cj4_ind
, j
>>2, &excl
);
1866 excl_pair
= a_mod_wj(j
)*nbl
->na_ci
+ i
;
1867 excl_bit
= (1U << (gcj
*GPU_NSUBCELL
+ c
));
1869 dx
= nbat
->x
[ind_j
*nbat
->xstride
+XX
] - xi
;
1870 dy
= nbat
->x
[ind_j
*nbat
->xstride
+YY
] - yi
;
1871 dz
= nbat
->x
[ind_j
*nbat
->xstride
+ZZ
] - zi
;
1873 /* The unpruned GPU list has more than 2/3
1874 * of the atom pairs beyond rlist. Using
1875 * this list will cause a lot of overhead
1876 * in the CPU FEP kernels, especially
1877 * relative to the fast GPU kernels.
1878 * So we prune the FEP list here.
1880 if (dx
*dx
+ dy
*dy
+ dz
*dz
< rlist_fep2
)
1882 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1884 fep_list_new_nri_copy(nlist
);
1888 /* Add it to the FEP list */
1889 nlist
->jjnr
[nlist
->nrj
] = aj
;
1890 nlist
->excl_fep
[nlist
->nrj
] = (excl
->pair
[excl_pair
] & excl_bit
) ? 1 : 0;
1894 /* Exclude it from the normal list.
1895 * Note that the charge and LJ parameters have
1896 * been set to zero, but we need to avoid 0/0,
1897 * as perturbed atoms can be on top of each other.
1899 excl
->pair
[excl_pair
] &= ~excl_bit
;
1903 /* Note that we could mask out this pair in imask
1904 * if all i- and/or all j-particles are perturbed.
1905 * But since the perturbed pairs on the CPU will
1906 * take an order of magnitude more time, the GPU
1907 * will finish before the CPU and there is no gain.
1913 if (nlist
->nrj
> nlist
->jindex
[nri
])
1915 /* Actually add this new, non-empty, list */
1917 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1924 /* Set all atom-pair exclusions from the topology stored in excl
1925 * as masks in the pair-list for i-super-cell entry nbl_sci
1927 static void set_sci_top_excls(const nbnxn_search_t nbs
,
1928 nbnxn_pairlist_t
*nbl
,
1929 gmx_bool diagRemoved
,
1931 const nbnxn_sci_t
*nbl_sci
,
1932 const t_blocka
*excl
)
1937 int cj_ind_first
, cj_ind_last
;
1938 int cj_first
, cj_last
;
1940 int ai
, aj
, si
, ge
, se
;
1941 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
1943 nbnxn_excl_t
*nbl_excl
;
1944 int inner_i
, inner_e
, w
;
1950 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1958 cj_ind_first
= nbl_sci
->cj4_ind_start
*NBNXN_GPU_JGROUP_SIZE
;
1959 cj_ind_last
= nbl
->work
->cj_ind
- 1;
1961 cj_first
= nbl
->cj4
[nbl_sci
->cj4_ind_start
].cj
[0];
1962 cj_last
= nbl_cj(nbl
, cj_ind_last
);
1964 /* Determine how many contiguous j-clusters we have starting
1965 * from the first i-cluster. This number can be used to directly
1966 * calculate j-cluster indices for excluded atoms.
1969 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1970 nbl_cj(nbl
, cj_ind_first
+ndirect
) == sci
*GPU_NSUBCELL
+ ndirect
)
1975 /* Loop over the atoms in the i super-cell */
1976 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1978 ai
= nbs
->a
[sci
*nbl
->na_sc
+i
];
1981 si
= (i
>>na_c_2log
);
1983 /* Loop over the topology-based exclusions for this i-atom */
1984 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
1990 /* The self exclusion are already set, save some time */
1996 /* Without shifts we only calculate interactions j>i
1997 * for one-way pair-lists.
1999 if (diagRemoved
&& ge
<= sci
*nbl
->na_sc
+ i
)
2005 /* Could the cluster se be in our list? */
2006 if (se
>= cj_first
&& se
<= cj_last
)
2008 if (se
< cj_first
+ ndirect
)
2010 /* We can calculate cj_ind directly from se */
2011 found
= cj_ind_first
+ se
- cj_first
;
2015 /* Search for se using bisection */
2017 cj_ind_0
= cj_ind_first
+ ndirect
;
2018 cj_ind_1
= cj_ind_last
+ 1;
2019 while (found
== -1 && cj_ind_0
< cj_ind_1
)
2021 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
2023 cj_m
= nbl_cj(nbl
, cj_ind_m
);
2031 cj_ind_1
= cj_ind_m
;
2035 cj_ind_0
= cj_ind_m
+ 1;
2042 inner_i
= i
- si
*na_c
;
2043 inner_e
= ge
- se
*na_c
;
2045 if (nbl_imask0(nbl
, found
) & (1U << (cj_mod_cj4(found
)*GPU_NSUBCELL
+ si
)))
2049 get_nbl_exclusions_1(nbl
, cj_to_cj4(found
), w
, &nbl_excl
);
2051 nbl_excl
->pair
[a_mod_wj(inner_e
)*nbl
->na_ci
+inner_i
] &=
2052 ~(1U << (cj_mod_cj4(found
)*GPU_NSUBCELL
+ si
));
2061 /* Reallocate the simple ci list for at least n entries */
2062 static void nb_realloc_ci(nbnxn_pairlist_t
*nbl
, int n
)
2064 nbl
->ci_nalloc
= over_alloc_small(n
);
2065 nbnxn_realloc_void((void **)&nbl
->ci
,
2066 nbl
->nci
*sizeof(*nbl
->ci
),
2067 nbl
->ci_nalloc
*sizeof(*nbl
->ci
),
2068 nbl
->alloc
, nbl
->free
);
2071 /* Reallocate the super-cell sci list for at least n entries */
2072 static void nb_realloc_sci(nbnxn_pairlist_t
*nbl
, int n
)
2074 nbl
->sci_nalloc
= over_alloc_small(n
);
2075 nbnxn_realloc_void((void **)&nbl
->sci
,
2076 nbl
->nsci
*sizeof(*nbl
->sci
),
2077 nbl
->sci_nalloc
*sizeof(*nbl
->sci
),
2078 nbl
->alloc
, nbl
->free
);
2081 /* Make a new ci entry at index nbl->nci */
2082 static void new_ci_entry(nbnxn_pairlist_t
*nbl
, int ci
, int shift
, int flags
)
2084 if (nbl
->nci
+ 1 > nbl
->ci_nalloc
)
2086 nb_realloc_ci(nbl
, nbl
->nci
+1);
2088 nbl
->ci
[nbl
->nci
].ci
= ci
;
2089 nbl
->ci
[nbl
->nci
].shift
= shift
;
2090 /* Store the interaction flags along with the shift */
2091 nbl
->ci
[nbl
->nci
].shift
|= flags
;
2092 nbl
->ci
[nbl
->nci
].cj_ind_start
= nbl
->ncj
;
2093 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
2096 /* Make a new sci entry at index nbl->nsci */
2097 static void new_sci_entry(nbnxn_pairlist_t
*nbl
, int sci
, int shift
)
2099 if (nbl
->nsci
+ 1 > nbl
->sci_nalloc
)
2101 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2103 nbl
->sci
[nbl
->nsci
].sci
= sci
;
2104 nbl
->sci
[nbl
->nsci
].shift
= shift
;
2105 nbl
->sci
[nbl
->nsci
].cj4_ind_start
= nbl
->ncj4
;
2106 nbl
->sci
[nbl
->nsci
].cj4_ind_end
= nbl
->ncj4
;
2109 /* Sort the simple j-list cj on exclusions.
2110 * Entries with exclusions will all be sorted to the beginning of the list.
2112 static void sort_cj_excl(nbnxn_cj_t
*cj
, int ncj
,
2113 nbnxn_list_work_t
*work
)
2117 if (ncj
> work
->cj_nalloc
)
2119 work
->cj_nalloc
= over_alloc_large(ncj
);
2120 srenew(work
->cj
, work
->cj_nalloc
);
2123 /* Make a list of the j-cells involving exclusions */
2125 for (int j
= 0; j
< ncj
; j
++)
2127 if (cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
2129 work
->cj
[jnew
++] = cj
[j
];
2132 /* Check if there are exclusions at all or not just the first entry */
2133 if (!((jnew
== 0) ||
2134 (jnew
== 1 && cj
[0].excl
!= NBNXN_INTERACTION_MASK_ALL
)))
2136 for (int j
= 0; j
< ncj
; j
++)
2138 if (cj
[j
].excl
== NBNXN_INTERACTION_MASK_ALL
)
2140 work
->cj
[jnew
++] = cj
[j
];
2143 for (int j
= 0; j
< ncj
; j
++)
2145 cj
[j
] = work
->cj
[j
];
2150 /* Close this simple list i entry */
2151 static void close_ci_entry_simple(nbnxn_pairlist_t
*nbl
)
2155 /* All content of the new ci entry have already been filled correctly,
2156 * we only need to increase the count here (for non empty lists).
2158 jlen
= nbl
->ci
[nbl
->nci
].cj_ind_end
- nbl
->ci
[nbl
->nci
].cj_ind_start
;
2161 sort_cj_excl(nbl
->cj
+nbl
->ci
[nbl
->nci
].cj_ind_start
, jlen
, nbl
->work
);
2163 /* The counts below are used for non-bonded pair/flop counts
2164 * and should therefore match the available kernel setups.
2166 if (!(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_COUL(0)))
2168 nbl
->work
->ncj_noq
+= jlen
;
2170 else if ((nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_HALF_LJ(0)) ||
2171 !(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_LJ(0)))
2173 nbl
->work
->ncj_hlj
+= jlen
;
2180 /* Split sci entry for load balancing on the GPU.
2181 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2182 * With progBal we generate progressively smaller lists, which improves
2183 * load balancing. As we only know the current count on our own thread,
2184 * we will need to estimate the current total amount of i-entries.
2185 * As the lists get concatenated later, this estimate depends
2186 * both on nthread and our own thread index.
2188 static void split_sci_entry(nbnxn_pairlist_t
*nbl
,
2190 gmx_bool progBal
, int nsp_tot_est
,
2191 int thread
, int nthread
)
2195 int cj4_start
, cj4_end
, j4len
;
2197 int nsp
, nsp_sci
, nsp_cj4
, nsp_cj4_e
, nsp_cj4_p
;
2201 /* Estimate the total numbers of ci's of the nblist combined
2202 * over all threads using the target number of ci's.
2204 nsp_est
= (nsp_tot_est
*thread
)/nthread
+ nbl
->nci_tot
;
2206 /* The first ci blocks should be larger, to avoid overhead.
2207 * The last ci blocks should be smaller, to improve load balancing.
2208 * The factor 3/2 makes the first block 3/2 times the target average
2209 * and ensures that the total number of blocks end up equal to
2210 * that with of equally sized blocks of size nsp_target_av.
2212 nsp_max
= nsp_target_av
*nsp_tot_est
*3/(2*(nsp_est
+ nsp_tot_est
));
2216 nsp_max
= nsp_target_av
;
2219 /* Since nsp_max is a maximum/cut-off (this avoids high outliers,
2220 * which lead to load imbalance), not an average, we add half the
2221 * number of pairs in a cj4 block to get the average about right.
2223 nsp_max
+= GPU_NSUBCELL
*NBNXN_GPU_JGROUP_SIZE
/2;
2225 cj4_start
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_start
;
2226 cj4_end
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_end
;
2227 j4len
= cj4_end
- cj4_start
;
2229 if (j4len
> 1 && j4len
*GPU_NSUBCELL
*NBNXN_GPU_JGROUP_SIZE
> nsp_max
)
2231 /* Remove the last ci entry and process the cj4's again */
2239 for (int cj4
= cj4_start
; cj4
< cj4_end
; cj4
++)
2241 nsp_cj4_p
= nsp_cj4
;
2242 /* Count the number of cluster pairs in this cj4 group */
2244 for (int p
= 0; p
< GPU_NSUBCELL
*NBNXN_GPU_JGROUP_SIZE
; p
++)
2246 nsp_cj4
+= (nbl
->cj4
[cj4
].imei
[0].imask
>> p
) & 1;
2249 /* Check if we should split at this cj4 to get a list of size nsp */
2250 if (nsp
> 0 && nsp
+ nsp_cj4
> nsp_max
)
2252 /* Split the list at cj4 */
2253 nbl
->sci
[sci
].cj4_ind_end
= cj4
;
2254 /* Create a new sci entry */
2257 if (nbl
->nsci
+1 > nbl
->sci_nalloc
)
2259 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2261 nbl
->sci
[sci
].sci
= nbl
->sci
[nbl
->nsci
-1].sci
;
2262 nbl
->sci
[sci
].shift
= nbl
->sci
[nbl
->nsci
-1].shift
;
2263 nbl
->sci
[sci
].cj4_ind_start
= cj4
;
2265 nsp_cj4_e
= nsp_cj4_p
;
2271 /* Put the remaining cj4's in the last sci entry */
2272 nbl
->sci
[sci
].cj4_ind_end
= cj4_end
;
2274 /* Possibly balance out the last two sci's
2275 * by moving the last cj4 of the second last sci.
2277 if (nsp_sci
- nsp_cj4_e
>= nsp
+ nsp_cj4_e
)
2279 nbl
->sci
[sci
-1].cj4_ind_end
--;
2280 nbl
->sci
[sci
].cj4_ind_start
--;
2287 /* Clost this super/sub list i entry */
2288 static void close_ci_entry_supersub(nbnxn_pairlist_t
*nbl
,
2290 gmx_bool progBal
, int nsp_tot_est
,
2291 int thread
, int nthread
)
2293 /* All content of the new ci entry have already been filled correctly,
2294 * we only need to increase the count here (for non empty lists).
2296 int j4len
= nbl
->sci
[nbl
->nsci
].cj4_ind_end
- nbl
->sci
[nbl
->nsci
].cj4_ind_start
;
2299 /* We can only have complete blocks of 4 j-entries in a list,
2300 * so round the count up before closing.
2302 nbl
->ncj4
= ((nbl
->work
->cj_ind
+ NBNXN_GPU_JGROUP_SIZE
- 1) >> NBNXN_GPU_JGROUP_SIZE_2LOG
);
2303 nbl
->work
->cj_ind
= nbl
->ncj4
*NBNXN_GPU_JGROUP_SIZE
;
2309 /* Measure the size of the new entry and potentially split it */
2310 split_sci_entry(nbl
, nsp_max_av
, progBal
, nsp_tot_est
,
2316 /* Syncs the working array before adding another grid pair to the list */
2317 static void sync_work(nbnxn_pairlist_t
*nbl
)
2321 nbl
->work
->cj_ind
= nbl
->ncj4
*NBNXN_GPU_JGROUP_SIZE
;
2322 nbl
->work
->cj4_init
= nbl
->ncj4
;
2326 /* Clears an nbnxn_pairlist_t data structure */
2327 static void clear_pairlist(nbnxn_pairlist_t
*nbl
)
2336 nbl
->work
->ncj_noq
= 0;
2337 nbl
->work
->ncj_hlj
= 0;
2340 /* Clears a group scheme pair list */
2341 static void clear_pairlist_fep(t_nblist
*nl
)
2345 if (nl
->jindex
== NULL
)
2347 snew(nl
->jindex
, 1);
2352 /* Sets a simple list i-cell bounding box, including PBC shift */
2353 static gmx_inline
void set_icell_bb_simple(const nbnxn_bb_t
*bb
, int ci
,
2354 real shx
, real shy
, real shz
,
2357 bb_ci
->lower
[BB_X
] = bb
[ci
].lower
[BB_X
] + shx
;
2358 bb_ci
->lower
[BB_Y
] = bb
[ci
].lower
[BB_Y
] + shy
;
2359 bb_ci
->lower
[BB_Z
] = bb
[ci
].lower
[BB_Z
] + shz
;
2360 bb_ci
->upper
[BB_X
] = bb
[ci
].upper
[BB_X
] + shx
;
2361 bb_ci
->upper
[BB_Y
] = bb
[ci
].upper
[BB_Y
] + shy
;
2362 bb_ci
->upper
[BB_Z
] = bb
[ci
].upper
[BB_Z
] + shz
;
2366 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2367 static void set_icell_bbxxxx_supersub(const float *bb
, int ci
,
2368 real shx
, real shy
, real shz
,
2371 int ia
= ci
*(GPU_NSUBCELL
>>STRIDE_PBB_2LOG
)*NNBSBB_XXXX
;
2372 for (int m
= 0; m
< (GPU_NSUBCELL
>>STRIDE_PBB_2LOG
)*NNBSBB_XXXX
; m
+= NNBSBB_XXXX
)
2374 for (int i
= 0; i
< STRIDE_PBB
; i
++)
2376 bb_ci
[m
+0*STRIDE_PBB
+i
] = bb
[ia
+m
+0*STRIDE_PBB
+i
] + shx
;
2377 bb_ci
[m
+1*STRIDE_PBB
+i
] = bb
[ia
+m
+1*STRIDE_PBB
+i
] + shy
;
2378 bb_ci
[m
+2*STRIDE_PBB
+i
] = bb
[ia
+m
+2*STRIDE_PBB
+i
] + shz
;
2379 bb_ci
[m
+3*STRIDE_PBB
+i
] = bb
[ia
+m
+3*STRIDE_PBB
+i
] + shx
;
2380 bb_ci
[m
+4*STRIDE_PBB
+i
] = bb
[ia
+m
+4*STRIDE_PBB
+i
] + shy
;
2381 bb_ci
[m
+5*STRIDE_PBB
+i
] = bb
[ia
+m
+5*STRIDE_PBB
+i
] + shz
;
2387 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2388 static void set_icell_bb_supersub(const nbnxn_bb_t
*bb
, int ci
,
2389 real shx
, real shy
, real shz
,
2392 for (int i
= 0; i
< GPU_NSUBCELL
; i
++)
2394 set_icell_bb_simple(bb
, ci
*GPU_NSUBCELL
+i
,
2400 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2401 static void icell_set_x_simple(int ci
,
2402 real shx
, real shy
, real shz
,
2403 int gmx_unused na_c
,
2404 int stride
, const real
*x
,
2405 nbnxn_list_work_t
*work
)
2407 int ia
= ci
*NBNXN_CPU_CLUSTER_I_SIZE
;
2409 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
; i
++)
2411 work
->x_ci
[i
*STRIDE_XYZ
+XX
] = x
[(ia
+i
)*stride
+XX
] + shx
;
2412 work
->x_ci
[i
*STRIDE_XYZ
+YY
] = x
[(ia
+i
)*stride
+YY
] + shy
;
2413 work
->x_ci
[i
*STRIDE_XYZ
+ZZ
] = x
[(ia
+i
)*stride
+ZZ
] + shz
;
2417 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2418 static void icell_set_x_supersub(int ci
,
2419 real shx
, real shy
, real shz
,
2421 int stride
, const real
*x
,
2422 nbnxn_list_work_t
*work
)
2424 real
* x_ci
= work
->x_ci
;
2426 int ia
= ci
*GPU_NSUBCELL
*na_c
;
2427 for (int i
= 0; i
< GPU_NSUBCELL
*na_c
; i
++)
2429 x_ci
[i
*DIM
+ XX
] = x
[(ia
+i
)*stride
+ XX
] + shx
;
2430 x_ci
[i
*DIM
+ YY
] = x
[(ia
+i
)*stride
+ YY
] + shy
;
2431 x_ci
[i
*DIM
+ ZZ
] = x
[(ia
+i
)*stride
+ ZZ
] + shz
;
2435 #ifdef NBNXN_SEARCH_BB_SIMD4
2436 /* Copies PBC shifted super-cell packed atom coordinates to working array */
2437 static void icell_set_x_supersub_simd4(int ci
,
2438 real shx
, real shy
, real shz
,
2440 int stride
, const real
*x
,
2441 nbnxn_list_work_t
*work
)
2443 real
* x_ci
= work
->x_ci
;
2445 for (int si
= 0; si
< GPU_NSUBCELL
; si
++)
2447 for (int i
= 0; i
< na_c
; i
+= STRIDE_PBB
)
2449 int io
= si
*na_c
+ i
;
2450 int ia
= ci
*GPU_NSUBCELL
*na_c
+ io
;
2451 for (int j
= 0; j
< STRIDE_PBB
; j
++)
2453 x_ci
[io
*DIM
+ j
+ XX
*STRIDE_PBB
] = x
[(ia
+j
)*stride
+XX
] + shx
;
2454 x_ci
[io
*DIM
+ j
+ YY
*STRIDE_PBB
] = x
[(ia
+j
)*stride
+YY
] + shy
;
2455 x_ci
[io
*DIM
+ j
+ ZZ
*STRIDE_PBB
] = x
[(ia
+j
)*stride
+ZZ
] + shz
;
2462 static real
minimum_subgrid_size_xy(const nbnxn_grid_t
*grid
)
2466 return std::min(grid
->sx
, grid
->sy
);
2470 return std::min(grid
->sx
/GPU_NSUBCELL_X
, grid
->sy
/GPU_NSUBCELL_Y
);
2474 static real
effective_buffer_1x1_vs_MxN(const nbnxn_grid_t
*gridi
,
2475 const nbnxn_grid_t
*gridj
)
2477 const real eff_1x1_buffer_fac_overest
= 0.1;
2479 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2480 * to be added to rlist (including buffer) used for MxN.
2481 * This is for converting an MxN list to a 1x1 list. This means we can't
2482 * use the normal buffer estimate, as we have an MxN list in which
2483 * some atom pairs beyond rlist are missing. We want to capture
2484 * the beneficial effect of buffering by extra pairs just outside rlist,
2485 * while removing the useless pairs that are further away from rlist.
2486 * (Also the buffer could have been set manually not using the estimate.)
2487 * This buffer size is an overestimate.
2488 * We add 10% of the smallest grid sub-cell dimensions.
2489 * Note that the z-size differs per cell and we don't use this,
2490 * so we overestimate.
2491 * With PME, the 10% value gives a buffer that is somewhat larger
2492 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2493 * Smaller tolerances or using RF lead to a smaller effective buffer,
2494 * so 10% gives a safe overestimate.
2496 return eff_1x1_buffer_fac_overest
*(minimum_subgrid_size_xy(gridi
) +
2497 minimum_subgrid_size_xy(gridj
));
2500 /* Clusters at the cut-off only increase rlist by 60% of their size */
2501 static real nbnxn_rlist_inc_outside_fac
= 0.6;
2503 /* Due to the cluster size the effective pair-list is longer than
2504 * that of a simple atom pair-list. This function gives the extra distance.
2506 real
nbnxn_get_rlist_effective_inc(int cluster_size_j
, real atom_density
)
2509 real vol_inc_i
, vol_inc_j
;
2511 /* We should get this from the setup, but currently it's the same for
2512 * all setups, including GPUs.
2514 cluster_size_i
= NBNXN_CPU_CLUSTER_I_SIZE
;
2516 vol_inc_i
= (cluster_size_i
- 1)/atom_density
;
2517 vol_inc_j
= (cluster_size_j
- 1)/atom_density
;
2519 return nbnxn_rlist_inc_outside_fac
*std::pow(static_cast<real
>(vol_inc_i
+ vol_inc_j
), static_cast<real
>(1.0/3.0));
2522 /* Estimates the interaction volume^2 for non-local interactions */
2523 static real
nonlocal_vol2(const struct gmx_domdec_zones_t
*zones
, rvec ls
, real r
)
2531 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2532 * not home interaction volume^2. As these volumes are not additive,
2533 * this is an overestimate, but it would only be significant in the limit
2534 * of small cells, where we anyhow need to split the lists into
2535 * as small parts as possible.
2538 for (int z
= 0; z
< zones
->n
; z
++)
2540 if (zones
->shift
[z
][XX
] + zones
->shift
[z
][YY
] + zones
->shift
[z
][ZZ
] == 1)
2545 for (int d
= 0; d
< DIM
; d
++)
2547 if (zones
->shift
[z
][d
] == 0)
2551 za
*= zones
->size
[z
].x1
[d
] - zones
->size
[z
].x0
[d
];
2555 /* 4 octants of a sphere */
2556 vold_est
= 0.25*M_PI
*r
*r
*r
*r
;
2557 /* 4 quarter pie slices on the edges */
2558 vold_est
+= 4*cl
*M_PI
/6.0*r
*r
*r
;
2559 /* One rectangular volume on a face */
2560 vold_est
+= ca
*0.5*r
*r
;
2562 vol2_est_tot
+= vold_est
*za
;
2566 return vol2_est_tot
;
2569 /* Estimates the average size of a full j-list for super/sub setup */
2570 static void get_nsubpair_target(const nbnxn_search_t nbs
,
2573 int min_ci_balanced
,
2574 int *nsubpair_target
,
2575 int *nsubpair_tot_est
)
2577 /* The target value of 36 seems to be the optimum for Kepler.
2578 * Maxwell is less sensitive to the exact value.
2580 const int nsubpair_target_min
= 36;
2581 const nbnxn_grid_t
*grid
;
2583 real xy_diag2
, r_eff_sup
, vol_est
, nsp_est
, nsp_est_nl
;
2585 grid
= &nbs
->grid
[0];
2587 if (min_ci_balanced
<= 0 || grid
->nc
>= min_ci_balanced
|| grid
->nc
== 0)
2589 /* We don't need to balance the list sizes */
2590 *nsubpair_target
= 0;
2591 *nsubpair_tot_est
= 0;
2596 ls
[XX
] = (grid
->c1
[XX
] - grid
->c0
[XX
])/(grid
->ncx
*GPU_NSUBCELL_X
);
2597 ls
[YY
] = (grid
->c1
[YY
] - grid
->c0
[YY
])/(grid
->ncy
*GPU_NSUBCELL_Y
);
2598 ls
[ZZ
] = grid
->na_c
/(grid
->atom_density
*ls
[XX
]*ls
[YY
]);
2600 /* The average squared length of the diagonal of a sub cell */
2601 xy_diag2
= ls
[XX
]*ls
[XX
] + ls
[YY
]*ls
[YY
] + ls
[ZZ
]*ls
[ZZ
];
2603 /* The formulas below are a heuristic estimate of the average nsj per si*/
2604 r_eff_sup
= rlist
+ nbnxn_rlist_inc_outside_fac
*sqr((grid
->na_c
- 1.0)/grid
->na_c
)*std::sqrt(xy_diag2
/3);
2606 if (!nbs
->DomDec
|| nbs
->zones
->n
== 1)
2613 sqr(grid
->atom_density
/grid
->na_c
)*
2614 nonlocal_vol2(nbs
->zones
, ls
, r_eff_sup
);
2619 /* Sub-cell interacts with itself */
2620 vol_est
= ls
[XX
]*ls
[YY
]*ls
[ZZ
];
2621 /* 6/2 rectangular volume on the faces */
2622 vol_est
+= (ls
[XX
]*ls
[YY
] + ls
[XX
]*ls
[ZZ
] + ls
[YY
]*ls
[ZZ
])*r_eff_sup
;
2623 /* 12/2 quarter pie slices on the edges */
2624 vol_est
+= 2*(ls
[XX
] + ls
[YY
] + ls
[ZZ
])*0.25*M_PI
*sqr(r_eff_sup
);
2625 /* 4 octants of a sphere */
2626 vol_est
+= 0.5*4.0/3.0*M_PI
*std::pow(r_eff_sup
, static_cast<real
>(3));
2628 /* Estimate the number of cluster pairs as the local number of
2629 * clusters times the volume they interact with times the density.
2631 nsp_est
= grid
->nsubc_tot
*vol_est
*grid
->atom_density
/grid
->na_c
;
2633 /* Subtract the non-local pair count */
2634 nsp_est
-= nsp_est_nl
;
2636 /* For small cut-offs nsp_est will be an underesimate.
2637 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2638 * So to avoid too small or negative nsp_est we set a minimum of
2639 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2640 * This might be a slight overestimate for small non-periodic groups of
2641 * atoms as will occur for a local domain with DD, but for small
2642 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2643 * so this overestimation will not matter.
2645 nsp_est
= std::max(nsp_est
, grid
->nsubc_tot
*static_cast<real
>(14));
2649 fprintf(debug
, "nsp_est local %5.1f non-local %5.1f\n",
2650 nsp_est
, nsp_est_nl
);
2655 nsp_est
= nsp_est_nl
;
2658 /* Thus the (average) maximum j-list size should be as follows.
2659 * Since there is overhead, we shouldn't make the lists too small
2660 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2662 *nsubpair_target
= std::max(nsubpair_target_min
,
2663 static_cast<int>(nsp_est
/min_ci_balanced
+ 0.5));
2664 *nsubpair_tot_est
= static_cast<int>(nsp_est
);
2668 fprintf(debug
, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2669 nsp_est
, *nsubpair_target
);
2673 /* Debug list print function */
2674 static void print_nblist_ci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2676 for (int i
= 0; i
< nbl
->nci
; i
++)
2678 fprintf(fp
, "ci %4d shift %2d ncj %3d\n",
2679 nbl
->ci
[i
].ci
, nbl
->ci
[i
].shift
,
2680 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
);
2682 for (int j
= nbl
->ci
[i
].cj_ind_start
; j
< nbl
->ci
[i
].cj_ind_end
; j
++)
2684 fprintf(fp
, " cj %5d imask %x\n",
2691 /* Debug list print function */
2692 static void print_nblist_sci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2694 for (int i
= 0; i
< nbl
->nsci
; i
++)
2696 fprintf(fp
, "ci %4d shift %2d ncj4 %2d\n",
2697 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2698 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
);
2701 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
2703 for (int j
= 0; j
< NBNXN_GPU_JGROUP_SIZE
; j
++)
2705 fprintf(fp
, " sj %5d imask %x\n",
2707 nbl
->cj4
[j4
].imei
[0].imask
);
2708 for (int si
= 0; si
< GPU_NSUBCELL
; si
++)
2710 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*GPU_NSUBCELL
+ si
)))
2717 fprintf(fp
, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2718 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2719 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
,
2724 /* Combine pair lists *nbl generated on multiple threads nblc */
2725 static void combine_nblists(int nnbl
, nbnxn_pairlist_t
**nbl
,
2726 nbnxn_pairlist_t
*nblc
)
2728 int nsci
, ncj4
, nexcl
;
2732 gmx_incons("combine_nblists does not support simple lists");
2737 nexcl
= nblc
->nexcl
;
2738 for (int i
= 0; i
< nnbl
; i
++)
2740 nsci
+= nbl
[i
]->nsci
;
2741 ncj4
+= nbl
[i
]->ncj4
;
2742 nexcl
+= nbl
[i
]->nexcl
;
2745 if (nsci
> nblc
->sci_nalloc
)
2747 nb_realloc_sci(nblc
, nsci
);
2749 if (ncj4
> nblc
->cj4_nalloc
)
2751 nblc
->cj4_nalloc
= over_alloc_small(ncj4
);
2752 nbnxn_realloc_void((void **)&nblc
->cj4
,
2753 nblc
->ncj4
*sizeof(*nblc
->cj4
),
2754 nblc
->cj4_nalloc
*sizeof(*nblc
->cj4
),
2755 nblc
->alloc
, nblc
->free
);
2757 if (nexcl
> nblc
->excl_nalloc
)
2759 nblc
->excl_nalloc
= over_alloc_small(nexcl
);
2760 nbnxn_realloc_void((void **)&nblc
->excl
,
2761 nblc
->nexcl
*sizeof(*nblc
->excl
),
2762 nblc
->excl_nalloc
*sizeof(*nblc
->excl
),
2763 nblc
->alloc
, nblc
->free
);
2766 /* Each thread should copy its own data to the combined arrays,
2767 * as otherwise data will go back and forth between different caches.
2769 #if (defined GMX_OPENMP) && !(defined __clang_analyzer__)
2770 // cppcheck-suppress unreadVariable
2771 int nthreads
= gmx_omp_nthreads_get(emntPairsearch
);
2774 #pragma omp parallel for num_threads(nthreads) schedule(static)
2775 for (int n
= 0; n
< nnbl
; n
++)
2780 const nbnxn_pairlist_t
*nbli
;
2782 /* Determine the offset in the combined data for our thread */
2783 sci_offset
= nblc
->nsci
;
2784 cj4_offset
= nblc
->ncj4
;
2785 excl_offset
= nblc
->nexcl
;
2787 for (int i
= 0; i
< n
; i
++)
2789 sci_offset
+= nbl
[i
]->nsci
;
2790 cj4_offset
+= nbl
[i
]->ncj4
;
2791 excl_offset
+= nbl
[i
]->nexcl
;
2796 for (int i
= 0; i
< nbli
->nsci
; i
++)
2798 nblc
->sci
[sci_offset
+i
] = nbli
->sci
[i
];
2799 nblc
->sci
[sci_offset
+i
].cj4_ind_start
+= cj4_offset
;
2800 nblc
->sci
[sci_offset
+i
].cj4_ind_end
+= cj4_offset
;
2803 for (int j4
= 0; j4
< nbli
->ncj4
; j4
++)
2805 nblc
->cj4
[cj4_offset
+j4
] = nbli
->cj4
[j4
];
2806 nblc
->cj4
[cj4_offset
+j4
].imei
[0].excl_ind
+= excl_offset
;
2807 nblc
->cj4
[cj4_offset
+j4
].imei
[1].excl_ind
+= excl_offset
;
2810 for (int j4
= 0; j4
< nbli
->nexcl
; j4
++)
2812 nblc
->excl
[excl_offset
+j4
] = nbli
->excl
[j4
];
2816 for (int n
= 0; n
< nnbl
; n
++)
2818 nblc
->nsci
+= nbl
[n
]->nsci
;
2819 nblc
->ncj4
+= nbl
[n
]->ncj4
;
2820 nblc
->nci_tot
+= nbl
[n
]->nci_tot
;
2821 nblc
->nexcl
+= nbl
[n
]->nexcl
;
2825 static void balance_fep_lists(const nbnxn_search_t nbs
,
2826 nbnxn_pairlist_set_t
*nbl_lists
)
2829 int nri_tot
, nrj_tot
, nrj_target
;
2833 nnbl
= nbl_lists
->nnbl
;
2837 /* Nothing to balance */
2841 /* Count the total i-lists and pairs */
2844 for (int th
= 0; th
< nnbl
; th
++)
2846 nri_tot
+= nbl_lists
->nbl_fep
[th
]->nri
;
2847 nrj_tot
+= nbl_lists
->nbl_fep
[th
]->nrj
;
2850 nrj_target
= (nrj_tot
+ nnbl
- 1)/nnbl
;
2852 assert(gmx_omp_nthreads_get(emntNonbonded
) == nnbl
);
2854 #pragma omp parallel for schedule(static) num_threads(nnbl)
2855 for (int th
= 0; th
< nnbl
; th
++)
2859 nbl
= nbs
->work
[th
].nbl_fep
;
2861 /* Note that here we allocate for the total size, instead of
2862 * a per-thread esimate (which is hard to obtain).
2864 if (nri_tot
> nbl
->maxnri
)
2866 nbl
->maxnri
= over_alloc_large(nri_tot
);
2867 reallocate_nblist(nbl
);
2869 if (nri_tot
> nbl
->maxnri
|| nrj_tot
> nbl
->maxnrj
)
2871 nbl
->maxnrj
= over_alloc_small(nrj_tot
);
2872 srenew(nbl
->jjnr
, nbl
->maxnrj
);
2873 srenew(nbl
->excl_fep
, nbl
->maxnrj
);
2876 clear_pairlist_fep(nbl
);
2879 /* Loop over the source lists and assign and copy i-entries */
2881 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2882 for (int th
= 0; th
< nnbl
; th
++)
2886 nbls
= nbl_lists
->nbl_fep
[th
];
2888 for (int i
= 0; i
< nbls
->nri
; i
++)
2892 /* The number of pairs in this i-entry */
2893 nrj
= nbls
->jindex
[i
+1] - nbls
->jindex
[i
];
2895 /* Decide if list th_dest is too large and we should procede
2896 * to the next destination list.
2898 if (th_dest
+1 < nnbl
&& nbld
->nrj
> 0 &&
2899 nbld
->nrj
+ nrj
- nrj_target
> nrj_target
- nbld
->nrj
)
2902 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2905 nbld
->iinr
[nbld
->nri
] = nbls
->iinr
[i
];
2906 nbld
->gid
[nbld
->nri
] = nbls
->gid
[i
];
2907 nbld
->shift
[nbld
->nri
] = nbls
->shift
[i
];
2909 for (int j
= nbls
->jindex
[i
]; j
< nbls
->jindex
[i
+1]; j
++)
2911 nbld
->jjnr
[nbld
->nrj
] = nbls
->jjnr
[j
];
2912 nbld
->excl_fep
[nbld
->nrj
] = nbls
->excl_fep
[j
];
2916 nbld
->jindex
[nbld
->nri
] = nbld
->nrj
;
2920 /* Swap the list pointers */
2921 for (int th
= 0; th
< nnbl
; th
++)
2925 nbl_tmp
= nbl_lists
->nbl_fep
[th
];
2926 nbl_lists
->nbl_fep
[th
] = nbs
->work
[th
].nbl_fep
;
2927 nbs
->work
[th
].nbl_fep
= nbl_tmp
;
2931 fprintf(debug
, "nbl_fep[%d] nri %4d nrj %4d\n",
2933 nbl_lists
->nbl_fep
[th
]->nri
,
2934 nbl_lists
->nbl_fep
[th
]->nrj
);
2939 /* Returns the next ci to be processes by our thread */
2940 static gmx_bool
next_ci(const nbnxn_grid_t
*grid
,
2942 int nth
, int ci_block
,
2943 int *ci_x
, int *ci_y
,
2949 if (*ci_b
== ci_block
)
2951 /* Jump to the next block assigned to this task */
2952 *ci
+= (nth
- 1)*ci_block
;
2956 if (*ci
>= grid
->nc
*conv
)
2961 while (*ci
>= grid
->cxy_ind
[*ci_x
*grid
->ncy
+ *ci_y
+ 1]*conv
)
2964 if (*ci_y
== grid
->ncy
)
2974 /* Returns the distance^2 for which we put cell pairs in the list
2975 * without checking atom pair distances. This is usually < rlist^2.
2977 static float boundingbox_only_distance2(const nbnxn_grid_t
*gridi
,
2978 const nbnxn_grid_t
*gridj
,
2982 /* If the distance between two sub-cell bounding boxes is less
2983 * than this distance, do not check the distance between
2984 * all particle pairs in the sub-cell, since then it is likely
2985 * that the box pair has atom pairs within the cut-off.
2986 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
2987 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
2988 * Using more than 0.5 gains at most 0.5%.
2989 * If forces are calculated more than twice, the performance gain
2990 * in the force calculation outweighs the cost of checking.
2991 * Note that with subcell lists, the atom-pair distance check
2992 * is only performed when only 1 out of 8 sub-cells in within range,
2993 * this is because the GPU is much faster than the cpu.
2998 bbx
= 0.5*(gridi
->sx
+ gridj
->sx
);
2999 bby
= 0.5*(gridi
->sy
+ gridj
->sy
);
3002 bbx
/= GPU_NSUBCELL_X
;
3003 bby
/= GPU_NSUBCELL_Y
;
3006 rbb2
= std::max(0.0, rlist
- 0.5*std::sqrt(bbx
*bbx
+ bby
*bby
));
3012 return (float)((1+GMX_FLOAT_EPS
)*rbb2
);
3016 static int get_ci_block_size(const nbnxn_grid_t
*gridi
,
3017 gmx_bool bDomDec
, int nth
)
3019 const int ci_block_enum
= 5;
3020 const int ci_block_denom
= 11;
3021 const int ci_block_min_atoms
= 16;
3024 /* Here we decide how to distribute the blocks over the threads.
3025 * We use prime numbers to try to avoid that the grid size becomes
3026 * a multiple of the number of threads, which would lead to some
3027 * threads getting "inner" pairs and others getting boundary pairs,
3028 * which in turns will lead to load imbalance between threads.
3029 * Set the block size as 5/11/ntask times the average number of cells
3030 * in a y,z slab. This should ensure a quite uniform distribution
3031 * of the grid parts of the different thread along all three grid
3032 * zone boundaries with 3D domain decomposition. At the same time
3033 * the blocks will not become too small.
3035 ci_block
= (gridi
->nc
*ci_block_enum
)/(ci_block_denom
*gridi
->ncx
*nth
);
3037 /* Ensure the blocks are not too small: avoids cache invalidation */
3038 if (ci_block
*gridi
->na_sc
< ci_block_min_atoms
)
3040 ci_block
= (ci_block_min_atoms
+ gridi
->na_sc
- 1)/gridi
->na_sc
;
3043 /* Without domain decomposition
3044 * or with less than 3 blocks per task, divide in nth blocks.
3046 if (!bDomDec
|| nth
*3*ci_block
> gridi
->nc
)
3048 ci_block
= (gridi
->nc
+ nth
- 1)/nth
;
3051 if (ci_block
> 1 && (nth
- 1)*ci_block
>= gridi
->nc
)
3053 /* Some threads have no work. Although reducing the block size
3054 * does not decrease the block count on the first few threads,
3055 * with GPUs better mixing of "upper" cells that have more empty
3056 * clusters results in a somewhat lower max load over all threads.
3057 * Without GPUs the regime of so few atoms per thread is less
3058 * performance relevant, but with 8-wide SIMD the same reasoning
3059 * applies, since the pair list uses 4 i-atom "sub-clusters".
3067 /* Generates the part of pair-list nbl assigned to our thread */
3068 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs
,
3069 const nbnxn_grid_t
*gridi
,
3070 const nbnxn_grid_t
*gridj
,
3071 nbnxn_search_work_t
*work
,
3072 const nbnxn_atomdata_t
*nbat
,
3073 const t_blocka
*excl
,
3077 gmx_bool bFBufferFlag
,
3080 int nsubpair_tot_est
,
3082 nbnxn_pairlist_t
*nbl
,
3087 real rl2
, rl_fep2
= 0;
3089 int ci_b
, ci
, ci_x
, ci_y
, ci_xy
, cj
;
3093 int conv_i
, cell0_i
;
3094 const nbnxn_bb_t
*bb_i
= NULL
;
3096 const float *pbb_i
= NULL
;
3098 const float *bbcz_i
, *bbcz_j
;
3100 real bx0
, bx1
, by0
, by1
, bz0
, bz1
;
3102 real d2cx
, d2z
, d2z_cx
, d2z_cy
, d2zx
, d2zxy
, d2xy
;
3103 int cxf
, cxl
, cyf
, cyf_x
, cyl
;
3104 int c0
, c1
, cs
, cf
, cl
;
3107 int gridi_flag_shift
= 0, gridj_flag_shift
= 0;
3108 gmx_bitmask_t
*gridj_flag
= NULL
;
3109 int ncj_old_i
, ncj_old_j
;
3111 nbs_cycle_start(&work
->cc
[enbsCCsearch
]);
3113 if (gridj
->bSimple
!= nbl
->bSimple
)
3115 gmx_incons("Grid incompatible with pair-list");
3119 nbl
->na_sc
= gridj
->na_sc
;
3120 nbl
->na_ci
= gridj
->na_c
;
3121 nbl
->na_cj
= nbnxn_kernel_to_cluster_j_size(nb_kernel_type
);
3122 na_cj_2log
= get_2log(nbl
->na_cj
);
3128 /* Determine conversion of clusters to flag blocks */
3129 gridi_flag_shift
= 0;
3130 while ((nbl
->na_ci
<<gridi_flag_shift
) < NBNXN_BUFFERFLAG_SIZE
)
3134 gridj_flag_shift
= 0;
3135 while ((nbl
->na_cj
<<gridj_flag_shift
) < NBNXN_BUFFERFLAG_SIZE
)
3140 gridj_flag
= work
->buffer_flags
.flag
;
3143 copy_mat(nbs
->box
, box
);
3145 rl2
= nbl
->rlist
*nbl
->rlist
;
3147 if (nbs
->bFEP
&& !nbl
->bSimple
)
3149 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3150 * We should not simply use rlist, since then we would not have
3151 * the small, effective buffering of the NxN lists.
3152 * The buffer is on overestimate, but the resulting cost for pairs
3153 * beyond rlist is neglible compared to the FEP pairs within rlist.
3155 rl_fep2
= nbl
->rlist
+ effective_buffer_1x1_vs_MxN(gridi
, gridj
);
3159 fprintf(debug
, "nbl_fep atom-pair rlist %f\n", rl_fep2
);
3161 rl_fep2
= rl_fep2
*rl_fep2
;
3164 rbb2
= boundingbox_only_distance2(gridi
, gridj
, nbl
->rlist
, nbl
->bSimple
);
3168 fprintf(debug
, "nbl bounding box only distance %f\n", std::sqrt(rbb2
));
3171 /* Set the shift range */
3172 for (int d
= 0; d
< DIM
; d
++)
3174 /* Check if we need periodicity shifts.
3175 * Without PBC or with domain decomposition we don't need them.
3177 if (d
>= ePBC2npbcdim(nbs
->ePBC
) || nbs
->dd_dim
[d
])
3184 box
[XX
][XX
] - fabs(box
[YY
][XX
]) - fabs(box
[ZZ
][XX
]) < std::sqrt(rl2
))
3195 if (nbl
->bSimple
&& !gridi
->bSimple
)
3197 conv_i
= gridi
->na_sc
/gridj
->na_sc
;
3198 bb_i
= gridi
->bb_simple
;
3199 bbcz_i
= gridi
->bbcz_simple
;
3200 flags_i
= gridi
->flags_simple
;
3215 /* We use the normal bounding box format for both grid types */
3218 bbcz_i
= gridi
->bbcz
;
3219 flags_i
= gridi
->flags
;
3221 cell0_i
= gridi
->cell0
*conv_i
;
3223 bbcz_j
= gridj
->bbcz
;
3227 /* Blocks of the conversion factor - 1 give a large repeat count
3228 * combined with a small block size. This should result in good
3229 * load balancing for both small and large domains.
3231 ci_block
= conv_i
- 1;
3235 fprintf(debug
, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3236 gridi
->nc
, gridi
->nc
/(double)(gridi
->ncx
*gridi
->ncy
), ci_block
);
3242 /* Initially ci_b and ci to 1 before where we want them to start,
3243 * as they will both be incremented in next_ci.
3246 ci
= th
*ci_block
- 1;
3249 while (next_ci(gridi
, conv_i
, nth
, ci_block
, &ci_x
, &ci_y
, &ci_b
, &ci
))
3251 if (nbl
->bSimple
&& flags_i
[ci
] == 0)
3256 ncj_old_i
= nbl
->ncj
;
3259 if (gridj
!= gridi
&& shp
[XX
] == 0)
3263 bx1
= bb_i
[ci
].upper
[BB_X
];
3267 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
;
3269 if (bx1
< gridj
->c0
[XX
])
3271 d2cx
= sqr(gridj
->c0
[XX
] - bx1
);
3280 ci_xy
= ci_x
*gridi
->ncy
+ ci_y
;
3282 /* Loop over shift vectors in three dimensions */
3283 for (int tz
= -shp
[ZZ
]; tz
<= shp
[ZZ
]; tz
++)
3285 shz
= tz
*box
[ZZ
][ZZ
];
3287 bz0
= bbcz_i
[ci
*NNBSBB_D
] + shz
;
3288 bz1
= bbcz_i
[ci
*NNBSBB_D
+1] + shz
;
3300 d2z
= sqr(bz0
- box
[ZZ
][ZZ
]);
3303 d2z_cx
= d2z
+ d2cx
;
3310 bz1_frac
= bz1
/(gridi
->cxy_ind
[ci_xy
+1] - gridi
->cxy_ind
[ci_xy
]);
3315 /* The check with bz1_frac close to or larger than 1 comes later */
3317 for (int ty
= -shp
[YY
]; ty
<= shp
[YY
]; ty
++)
3319 shy
= ty
*box
[YY
][YY
] + tz
*box
[ZZ
][YY
];
3323 by0
= bb_i
[ci
].lower
[BB_Y
] + shy
;
3324 by1
= bb_i
[ci
].upper
[BB_Y
] + shy
;
3328 by0
= gridi
->c0
[YY
] + (ci_y
)*gridi
->sy
+ shy
;
3329 by1
= gridi
->c0
[YY
] + (ci_y
+1)*gridi
->sy
+ shy
;
3332 get_cell_range(by0
, by1
,
3333 gridj
->ncy
, gridj
->c0
[YY
], gridj
->sy
, gridj
->inv_sy
,
3343 if (by1
< gridj
->c0
[YY
])
3345 d2z_cy
+= sqr(gridj
->c0
[YY
] - by1
);
3347 else if (by0
> gridj
->c1
[YY
])
3349 d2z_cy
+= sqr(by0
- gridj
->c1
[YY
]);
3352 for (int tx
= -shp
[XX
]; tx
<= shp
[XX
]; tx
++)
3354 shift
= XYZ2IS(tx
, ty
, tz
);
3356 #ifdef NBNXN_SHIFT_BACKWARD
3357 if (gridi
== gridj
&& shift
> CENTRAL
)
3363 shx
= tx
*box
[XX
][XX
] + ty
*box
[YY
][XX
] + tz
*box
[ZZ
][XX
];
3367 bx0
= bb_i
[ci
].lower
[BB_X
] + shx
;
3368 bx1
= bb_i
[ci
].upper
[BB_X
] + shx
;
3372 bx0
= gridi
->c0
[XX
] + (ci_x
)*gridi
->sx
+ shx
;
3373 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
+ shx
;
3376 get_cell_range(bx0
, bx1
,
3377 gridj
->ncx
, gridj
->c0
[XX
], gridj
->sx
, gridj
->inv_sx
,
3388 new_ci_entry(nbl
, cell0_i
+ci
, shift
, flags_i
[ci
]);
3392 new_sci_entry(nbl
, cell0_i
+ci
, shift
);
3395 #ifndef NBNXN_SHIFT_BACKWARD
3398 if (shift
== CENTRAL
&& gridi
== gridj
&&
3402 /* Leave the pairs with i > j.
3403 * x is the major index, so skip half of it.
3410 set_icell_bb_simple(bb_i
, ci
, shx
, shy
, shz
,
3416 set_icell_bbxxxx_supersub(pbb_i
, ci
, shx
, shy
, shz
,
3419 set_icell_bb_supersub(bb_i
, ci
, shx
, shy
, shz
,
3424 nbs
->icell_set_x(cell0_i
+ci
, shx
, shy
, shz
,
3425 gridi
->na_c
, nbat
->xstride
, nbat
->x
,
3428 for (int cx
= cxf
; cx
<= cxl
; cx
++)
3431 if (gridj
->c0
[XX
] + cx
*gridj
->sx
> bx1
)
3433 d2zx
+= sqr(gridj
->c0
[XX
] + cx
*gridj
->sx
- bx1
);
3435 else if (gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
< bx0
)
3437 d2zx
+= sqr(gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
- bx0
);
3440 #ifndef NBNXN_SHIFT_BACKWARD
3441 if (gridi
== gridj
&&
3442 cx
== 0 && cyf
< ci_y
)
3444 if (gridi
== gridj
&&
3445 cx
== 0 && shift
== CENTRAL
&& cyf
< ci_y
)
3448 /* Leave the pairs with i > j.
3449 * Skip half of y when i and j have the same x.
3458 for (int cy
= cyf_x
; cy
<= cyl
; cy
++)
3460 c0
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
];
3461 c1
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
+1];
3462 #ifdef NBNXN_SHIFT_BACKWARD
3463 if (gridi
== gridj
&&
3464 shift
== CENTRAL
&& c0
< ci
)
3471 if (gridj
->c0
[YY
] + cy
*gridj
->sy
> by1
)
3473 d2zxy
+= sqr(gridj
->c0
[YY
] + cy
*gridj
->sy
- by1
);
3475 else if (gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
< by0
)
3477 d2zxy
+= sqr(gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
- by0
);
3479 if (c1
> c0
&& d2zxy
< rl2
)
3481 cs
= c0
+ static_cast<int>(bz1_frac
*(c1
- c0
));
3489 /* Find the lowest cell that can possibly
3494 (bbcz_j
[cf
*NNBSBB_D
+1] >= bz0
||
3495 d2xy
+ sqr(bbcz_j
[cf
*NNBSBB_D
+1] - bz0
) < rl2
))
3500 /* Find the highest cell that can possibly
3505 (bbcz_j
[cl
*NNBSBB_D
] <= bz1
||
3506 d2xy
+ sqr(bbcz_j
[cl
*NNBSBB_D
] - bz1
) < rl2
))
3511 #ifdef NBNXN_REFCODE
3513 /* Simple reference code, for debugging,
3514 * overrides the more complex code above.
3518 for (int k
= c0
; k
< c1
; k
++)
3520 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3525 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3536 /* We want each atom/cell pair only once,
3537 * only use cj >= ci.
3539 #ifndef NBNXN_SHIFT_BACKWARD
3540 cf
= std::max(cf
, ci
);
3542 if (shift
== CENTRAL
)
3544 cf
= std::max(cf
, ci
);
3551 /* For f buffer flags with simple lists */
3552 ncj_old_j
= nbl
->ncj
;
3554 switch (nb_kernel_type
)
3556 case nbnxnk4x4_PlainC
:
3557 check_subcell_list_space_simple(nbl
, cl
-cf
+1);
3559 make_cluster_list_simple(gridj
,
3561 (gridi
== gridj
&& shift
== CENTRAL
),
3566 #ifdef GMX_NBNXN_SIMD_4XN
3567 case nbnxnk4xN_SIMD_4xN
:
3568 check_subcell_list_space_simple(nbl
, ci_to_cj(na_cj_2log
, cl
-cf
)+2);
3569 make_cluster_list_simd_4xn(gridj
,
3571 (gridi
== gridj
&& shift
== CENTRAL
),
3577 #ifdef GMX_NBNXN_SIMD_2XNN
3578 case nbnxnk4xN_SIMD_2xNN
:
3579 check_subcell_list_space_simple(nbl
, ci_to_cj(na_cj_2log
, cl
-cf
)+2);
3580 make_cluster_list_simd_2xnn(gridj
,
3582 (gridi
== gridj
&& shift
== CENTRAL
),
3588 case nbnxnk8x8x8_PlainC
:
3589 case nbnxnk8x8x8_GPU
:
3590 check_subcell_list_space_supersub(nbl
, cl
-cf
+1);
3591 for (cj
= cf
; cj
<= cl
; cj
++)
3593 make_cluster_list_supersub(gridi
, gridj
,
3595 (gridi
== gridj
&& shift
== CENTRAL
&& ci
== cj
),
3596 nbat
->xstride
, nbat
->x
,
3602 ncpcheck
+= cl
- cf
+ 1;
3604 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_j
)
3606 int cbf
= nbl
->cj
[ncj_old_j
].cj
>> gridj_flag_shift
;
3607 int cbl
= nbl
->cj
[nbl
->ncj
-1].cj
>> gridj_flag_shift
;
3608 for (int cb
= cbf
; cb
<= cbl
; cb
++)
3610 bitmask_init_bit(&gridj_flag
[cb
], th
);
3618 /* Set the exclusions for this ci list */
3621 set_ci_top_excls(nbs
,
3623 shift
== CENTRAL
&& gridi
== gridj
,
3626 &(nbl
->ci
[nbl
->nci
]),
3631 make_fep_list(nbs
, nbat
, nbl
,
3632 shift
== CENTRAL
&& gridi
== gridj
,
3633 &(nbl
->ci
[nbl
->nci
]),
3634 gridi
, gridj
, nbl_fep
);
3639 set_sci_top_excls(nbs
,
3641 shift
== CENTRAL
&& gridi
== gridj
,
3643 &(nbl
->sci
[nbl
->nsci
]),
3648 make_fep_list_supersub(nbs
, nbat
, nbl
,
3649 shift
== CENTRAL
&& gridi
== gridj
,
3650 &(nbl
->sci
[nbl
->nsci
]),
3653 gridi
, gridj
, nbl_fep
);
3657 /* Close this ci list */
3660 close_ci_entry_simple(nbl
);
3664 close_ci_entry_supersub(nbl
,
3666 progBal
, nsubpair_tot_est
,
3673 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_i
)
3675 bitmask_init_bit(&(work
->buffer_flags
.flag
[(gridi
->cell0
+ci
)>>gridi_flag_shift
]), th
);
3679 work
->ndistc
= ndistc
;
3681 nbs_cycle_stop(&work
->cc
[enbsCCsearch
]);
3685 fprintf(debug
, "number of distance checks %d\n", ndistc
);
3686 fprintf(debug
, "ncpcheck %s %d\n", gridi
== gridj
? "local" : "non-local",
3691 print_nblist_statistics_simple(debug
, nbl
, nbs
, rlist
);
3695 print_nblist_statistics_supersub(debug
, nbl
, nbs
, rlist
);
3700 fprintf(debug
, "nbl FEP list pairs: %d\n", nbl_fep
->nrj
);
3705 static void reduce_buffer_flags(const nbnxn_search_t nbs
,
3707 const nbnxn_buffer_flags_t
*dest
)
3709 for (int s
= 0; s
< nsrc
; s
++)
3711 gmx_bitmask_t
* flag
= nbs
->work
[s
].buffer_flags
.flag
;
3713 for (int b
= 0; b
< dest
->nflag
; b
++)
3715 bitmask_union(&(dest
->flag
[b
]), flag
[b
]);
3720 static void print_reduction_cost(const nbnxn_buffer_flags_t
*flags
, int nout
)
3722 int nelem
, nkeep
, ncopy
, nred
, out
;
3723 gmx_bitmask_t mask_0
;
3729 bitmask_init_bit(&mask_0
, 0);
3730 for (int b
= 0; b
< flags
->nflag
; b
++)
3732 if (bitmask_is_equal(flags
->flag
[b
], mask_0
))
3734 /* Only flag 0 is set, no copy of reduction required */
3738 else if (!bitmask_is_zero(flags
->flag
[b
]))
3741 for (out
= 0; out
< nout
; out
++)
3743 if (bitmask_is_set(flags
->flag
[b
], out
))
3760 fprintf(debug
, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3762 nelem
/(double)(flags
->nflag
),
3763 nkeep
/(double)(flags
->nflag
),
3764 ncopy
/(double)(flags
->nflag
),
3765 nred
/(double)(flags
->nflag
));
3768 /* Perform a count (linear) sort to sort the smaller lists to the end.
3769 * This avoids load imbalance on the GPU, as large lists will be
3770 * scheduled and executed first and the smaller lists later.
3771 * Load balancing between multi-processors only happens at the end
3772 * and there smaller lists lead to more effective load balancing.
3773 * The sorting is done on the cj4 count, not on the actual pair counts.
3774 * Not only does this make the sort faster, but it also results in
3775 * better load balancing than using a list sorted on exact load.
3776 * This function swaps the pointer in the pair list to avoid a copy operation.
3778 static void sort_sci(nbnxn_pairlist_t
*nbl
)
3780 nbnxn_list_work_t
*work
;
3782 nbnxn_sci_t
*sci_sort
;
3784 if (nbl
->ncj4
<= nbl
->nsci
)
3786 /* nsci = 0 or all sci have size 1, sorting won't change the order */
3792 /* We will distinguish differences up to double the average */
3793 m
= (2*nbl
->ncj4
)/nbl
->nsci
;
3795 if (m
+ 1 > work
->sort_nalloc
)
3797 work
->sort_nalloc
= over_alloc_large(m
+ 1);
3798 srenew(work
->sort
, work
->sort_nalloc
);
3801 if (work
->sci_sort_nalloc
!= nbl
->sci_nalloc
)
3803 work
->sci_sort_nalloc
= nbl
->sci_nalloc
;
3804 nbnxn_realloc_void((void **)&work
->sci_sort
,
3806 work
->sci_sort_nalloc
*sizeof(*work
->sci_sort
),
3807 nbl
->alloc
, nbl
->free
);
3810 /* Count the entries of each size */
3811 for (int i
= 0; i
<= m
; i
++)
3815 for (int s
= 0; s
< nbl
->nsci
; s
++)
3817 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
3820 /* Calculate the offset for each count */
3823 for (int i
= m
- 1; i
>= 0; i
--)
3826 work
->sort
[i
] = work
->sort
[i
+ 1] + s0
;
3830 /* Sort entries directly into place */
3831 sci_sort
= work
->sci_sort
;
3832 for (int s
= 0; s
< nbl
->nsci
; s
++)
3834 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
3835 sci_sort
[work
->sort
[i
]++] = nbl
->sci
[s
];
3838 /* Swap the sci pointers so we use the new, sorted list */
3839 work
->sci_sort
= nbl
->sci
;
3840 nbl
->sci
= sci_sort
;
3843 /* Make a local or non-local pair-list, depending on iloc */
3844 void nbnxn_make_pairlist(const nbnxn_search_t nbs
,
3845 nbnxn_atomdata_t
*nbat
,
3846 const t_blocka
*excl
,
3848 int min_ci_balanced
,
3849 nbnxn_pairlist_set_t
*nbl_list
,
3854 nbnxn_grid_t
*gridi
, *gridj
;
3857 int nsubpair_target
, nsubpair_tot_est
;
3859 nbnxn_pairlist_t
**nbl
;
3861 gmx_bool CombineNBLists
;
3863 int np_tot
, np_noq
, np_hlj
, nap
;
3865 /* Check if we are running hybrid GPU + CPU nbnxn mode */
3866 bGPUCPU
= (!nbs
->grid
[0].bSimple
&& nbl_list
->bSimple
);
3868 nnbl
= nbl_list
->nnbl
;
3869 nbl
= nbl_list
->nbl
;
3870 CombineNBLists
= nbl_list
->bCombined
;
3874 fprintf(debug
, "ns making %d nblists\n", nnbl
);
3877 nbat
->bUseBufferFlags
= (nbat
->nout
> 1);
3878 /* We should re-init the flags before making the first list */
3879 if (nbat
->bUseBufferFlags
&& (LOCAL_I(iloc
) || bGPUCPU
))
3881 init_buffer_flags(&nbat
->buffer_flags
, nbat
->natoms
);
3884 if (nbl_list
->bSimple
)
3886 #ifdef GMX_NBNXN_SIMD
3887 switch (nb_kernel_type
)
3889 #ifdef GMX_NBNXN_SIMD_4XN
3890 case nbnxnk4xN_SIMD_4xN
:
3891 nbs
->icell_set_x
= icell_set_x_simd_4xn
;
3894 #ifdef GMX_NBNXN_SIMD_2XNN
3895 case nbnxnk4xN_SIMD_2xNN
:
3896 nbs
->icell_set_x
= icell_set_x_simd_2xnn
;
3900 nbs
->icell_set_x
= icell_set_x_simple
;
3903 #else /* GMX_NBNXN_SIMD */
3904 /* MSVC 2013 complains about switch statements without case */
3905 nbs
->icell_set_x
= icell_set_x_simple
;
3906 #endif /* GMX_NBNXN_SIMD */
3910 #ifdef NBNXN_SEARCH_BB_SIMD4
3911 nbs
->icell_set_x
= icell_set_x_supersub_simd4
;
3913 nbs
->icell_set_x
= icell_set_x_supersub
;
3919 /* Only zone (grid) 0 vs 0 */
3926 nzi
= nbs
->zones
->nizone
;
3929 if (!nbl_list
->bSimple
&& min_ci_balanced
> 0)
3931 get_nsubpair_target(nbs
, iloc
, rlist
, min_ci_balanced
,
3932 &nsubpair_target
, &nsubpair_tot_est
);
3936 nsubpair_target
= 0;
3937 nsubpair_tot_est
= 0;
3940 /* Clear all pair-lists */
3941 for (int th
= 0; th
< nnbl
; th
++)
3943 clear_pairlist(nbl
[th
]);
3947 clear_pairlist_fep(nbl_list
->nbl_fep
[th
]);
3951 for (int zi
= 0; zi
< nzi
; zi
++)
3953 gridi
= &nbs
->grid
[zi
];
3955 if (NONLOCAL_I(iloc
))
3957 zj0
= nbs
->zones
->izone
[zi
].j0
;
3958 zj1
= nbs
->zones
->izone
[zi
].j1
;
3964 for (int zj
= zj0
; zj
< zj1
; zj
++)
3966 gridj
= &nbs
->grid
[zj
];
3970 fprintf(debug
, "ns search grid %d vs %d\n", zi
, zj
);
3973 nbs_cycle_start(&nbs
->cc
[enbsCCsearch
]);
3975 if (nbl
[0]->bSimple
&& !gridi
->bSimple
)
3977 /* Hybrid list, determine blocking later */
3982 ci_block
= get_ci_block_size(gridi
, nbs
->DomDec
, nnbl
);
3985 /* With GPU: generate progressively smaller lists for
3986 * load balancing for local only or non-local with 2 zones.
3988 progBal
= (LOCAL_I(iloc
) || nbs
->zones
->n
<= 2);
3990 #pragma omp parallel for num_threads(nnbl) schedule(static)
3991 for (int th
= 0; th
< nnbl
; th
++)
3993 /* Re-init the thread-local work flag data before making
3994 * the first list (not an elegant conditional).
3996 if (nbat
->bUseBufferFlags
&& ((zi
== 0 && zj
== 0) ||
3997 (bGPUCPU
&& zi
== 0 && zj
== 1)))
3999 init_buffer_flags(&nbs
->work
[th
].buffer_flags
, nbat
->natoms
);
4002 if (CombineNBLists
&& th
> 0)
4004 clear_pairlist(nbl
[th
]);
4007 /* Divide the i super cell equally over the nblists */
4008 nbnxn_make_pairlist_part(nbs
, gridi
, gridj
,
4009 &nbs
->work
[th
], nbat
, excl
,
4013 nbat
->bUseBufferFlags
,
4015 progBal
, nsubpair_tot_est
,
4018 nbl_list
->nbl_fep
[th
]);
4020 nbs_cycle_stop(&nbs
->cc
[enbsCCsearch
]);
4025 for (int th
= 0; th
< nnbl
; th
++)
4027 inc_nrnb(nrnb
, eNR_NBNXN_DIST2
, nbs
->work
[th
].ndistc
);
4029 if (nbl_list
->bSimple
)
4031 np_tot
+= nbl
[th
]->ncj
;
4032 np_noq
+= nbl
[th
]->work
->ncj_noq
;
4033 np_hlj
+= nbl
[th
]->work
->ncj_hlj
;
4037 /* This count ignores potential subsequent pair pruning */
4038 np_tot
+= nbl
[th
]->nci_tot
;
4041 nap
= nbl
[0]->na_ci
*nbl
[0]->na_cj
;
4042 nbl_list
->natpair_ljq
= (np_tot
- np_noq
)*nap
- np_hlj
*nap
/2;
4043 nbl_list
->natpair_lj
= np_noq
*nap
;
4044 nbl_list
->natpair_q
= np_hlj
*nap
/2;
4046 if (CombineNBLists
&& nnbl
> 1)
4048 nbs_cycle_start(&nbs
->cc
[enbsCCcombine
]);
4050 combine_nblists(nnbl
-1, nbl
+1, nbl
[0]);
4052 nbs_cycle_stop(&nbs
->cc
[enbsCCcombine
]);
4057 if (!nbl_list
->bSimple
)
4059 /* Sort the entries on size, large ones first */
4060 if (CombineNBLists
|| nnbl
== 1)
4066 #pragma omp parallel for num_threads(nnbl) schedule(static)
4067 for (int th
= 0; th
< nnbl
; th
++)
4074 if (nbat
->bUseBufferFlags
)
4076 reduce_buffer_flags(nbs
, nnbl
, &nbat
->buffer_flags
);
4081 /* Balance the free-energy lists over all the threads */
4082 balance_fep_lists(nbs
, nbl_list
);
4085 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4088 nbs
->search_count
++;
4090 if (nbs
->print_cycles
&&
4091 (!nbs
->DomDec
|| (nbs
->DomDec
&& !LOCAL_I(iloc
))) &&
4092 nbs
->search_count
% 100 == 0)
4094 nbs_cycle_print(stderr
, nbs
);
4097 if (debug
&& (CombineNBLists
&& nnbl
> 1))
4099 if (nbl
[0]->bSimple
)
4101 print_nblist_statistics_simple(debug
, nbl
[0], nbs
, rlist
);
4105 print_nblist_statistics_supersub(debug
, nbl
[0], nbs
, rlist
);
4113 if (nbl
[0]->bSimple
)
4115 print_nblist_ci_cj(debug
, nbl
[0]);
4119 print_nblist_sci_cj(debug
, nbl
[0]);
4123 if (nbat
->bUseBufferFlags
)
4125 print_reduction_cost(&nbat
->buffer_flags
, nnbl
);