2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file defines functions for managing threading of listed
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
42 * \ingroup module_listed-forces
46 #include "manage-threading.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/mdlib/gmx_omp_nthreads.h"
58 #include "gromacs/pbcutil/ishift.h"
59 #include "gromacs/topology/ifunc.h"
60 #include "gromacs/utility/exceptions.h"
61 #include "gromacs/utility/fatalerror.h"
62 #include "gromacs/utility/smalloc.h"
63 #include "gromacs/utility/stringutil.h"
65 #include "listed-internal.h"
67 /*! \brief struct for passing all data required for a function type */
69 int ftype
; /**< the function type index */
70 t_ilist
*il
; /**< pointer to t_ilist entry corresponding to ftype */
71 int nat
; /**< nr of atoms involved in a single ftype interaction */
74 /*! \brief Divides listed interactions over threads
76 * This routine attempts to divide all interactions of the ntype bondeds
77 * types stored in ild over the threads such that each thread has roughly
78 * equal load and different threads avoid touching the same atoms as much
81 static void divide_bondeds_by_locality(int ntype
,
82 const ilist_data_t
*ild
,
87 int ind
[F_NRE
]; /* index into the ild[].il->iatoms */
88 int at_ind
[F_NRE
]; /* index of the first atom of the interaction at ind */
91 assert(ntype
<= F_NRE
);
94 for (f
= 0; f
< ntype
; f
++)
96 /* Sum #bondeds*#atoms_per_bond over all bonded types */
97 nat_tot
+= ild
[f
].il
->nr
/(ild
[f
].nat
+ 1)*ild
[f
].nat
;
98 /* The start bound for thread 0 is 0 for all interactions */
100 /* Initialize the next atom index array */
101 assert(ild
[f
].il
->nr
> 0);
102 at_ind
[f
] = ild
[f
].il
->iatoms
[1];
106 /* Loop over the end bounds of the nthread threads to determine
107 * which interactions threads 0 to nthread shall calculate.
109 * NOTE: The cost of these combined loops is #interactions*ntype.
110 * This code is running single threaded (difficult to parallelize
111 * over threads). So the relative cost of this function increases
112 * linearly with the number of threads. Since the inner-most loop
113 * is cheap and this is done only at DD repartitioning, the cost should
114 * be negligble. At high thread count many other parts of the code
115 * scale the same way, so it's (currently) not worth improving this.
117 for (t
= 1; t
<= nthread
; t
++)
121 /* Here we assume that the computational cost is proportional
122 * to the number of atoms in the interaction. This is a rough
123 * measure, but roughly correct. Usually there are very few
124 * interactions anyhow and there are distributed relatively
125 * uniformly. Proper and RB dihedrals are often distributed
126 * non-uniformly, but their cost is roughly equal.
128 nat_thread
= (nat_tot
*t
)/nthread
;
130 while (nat_sum
< nat_thread
)
132 /* To divide bonds based on atom order, we compare
133 * the index of the first atom in the bonded interaction.
134 * This works well, since the domain decomposition generates
135 * bondeds in order of the atoms by looking up interactions
136 * which are linked to the first atom in each interaction.
137 * It usually also works well without DD, since than the atoms
138 * in bonded interactions are usually in increasing order.
139 * If they are not assigned in increasing order, the balancing
140 * is still good, but the memory access and reduction cost will
145 /* Find out which of the types has the lowest atom index */
147 for (f
= 1; f
< ntype
; f
++)
149 if (at_ind
[f
] < at_ind
[f_min
])
154 assert(f_min
>= 0 && f_min
< ntype
);
156 /* Assign the interaction with the lowest atom index (of type
157 * index f_min) to thread t-1 by increasing ind.
159 ind
[f_min
] += ild
[f_min
].nat
+ 1;
160 nat_sum
+= ild
[f_min
].nat
;
162 /* Update the first unassigned atom index for this type */
163 if (ind
[f_min
] < ild
[f_min
].il
->nr
)
165 at_ind
[f_min
] = ild
[f_min
].il
->iatoms
[ind
[f_min
] + 1];
169 /* We have assigned all interactions of this type.
170 * Setting at_ind to INT_MAX ensures this type will not be
171 * chosen in the for loop above during next iterations.
173 at_ind
[f_min
] = INT_MAX
;
177 /* Store the bonded end boundaries (at index t) for thread t-1 */
178 for (f
= 0; f
< ntype
; f
++)
180 idef
->il_thread_division
[ild
[f
].ftype
*(nthread
+ 1) + t
] = ind
[f
];
184 for (f
= 0; f
< ntype
; f
++)
186 assert(ind
[f
] == ild
[f
].il
->nr
);
190 //! Divides bonded interactions over threads
191 static void divide_bondeds_over_threads(t_idef
*idef
,
193 int max_nthread_uniform
,
196 ilist_data_t ild
[F_NRE
];
202 idef
->nthreads
= nthread
;
204 if (F_NRE
*(nthread
+ 1) > idef
->il_thread_division_nalloc
)
206 idef
->il_thread_division_nalloc
= F_NRE
*(nthread
+ 1);
207 srenew(idef
->il_thread_division
, idef
->il_thread_division_nalloc
);
210 *haveBondeds
= false;
212 for (f
= 0; f
< F_NRE
; f
++)
214 if (!ftype_is_bonded_potential(f
))
219 if (idef
->il
[f
].nr
> 0)
224 if (idef
->il
[f
].nr
== 0)
226 /* No interactions, avoid all the integer math below */
228 for (t
= 0; t
<= nthread
; t
++)
230 idef
->il_thread_division
[f
*(nthread
+ 1) + t
] = 0;
233 else if (nthread
<= max_nthread_uniform
|| f
== F_DISRES
)
235 /* On up to 4 threads, load balancing the bonded work
236 * is more important than minimizing the reduction cost.
240 /* nat1 = 1 + #atoms(ftype) which is the stride use for iatoms */
243 for (t
= 0; t
<= nthread
; t
++)
247 /* Divide equally over the threads */
248 nr_t
= (((idef
->il
[f
].nr
/nat1
)*t
)/nthread
)*nat1
;
252 /* Ensure that distance restraint pairs with the same label
253 * end up on the same thread.
255 while (nr_t
> 0 && nr_t
< idef
->il
[f
].nr
&&
256 idef
->iparams
[idef
->il
[f
].iatoms
[nr_t
]].disres
.label
==
257 idef
->iparams
[idef
->il
[f
].iatoms
[nr_t
-nat1
]].disres
.label
)
263 idef
->il_thread_division
[f
*(nthread
+ 1) + t
] = nr_t
;
268 /* Add this ftype to the list to be distributed */
272 ild
[ntype
].ftype
= f
;
273 ild
[ntype
].il
= &idef
->il
[f
];
274 ild
[ntype
].nat
= nat
;
276 /* The first index for the thread division is always 0 */
277 idef
->il_thread_division
[f
*(nthread
+ 1)] = 0;
285 divide_bondeds_by_locality(ntype
, ild
, nthread
, idef
);
292 fprintf(debug
, "Division of bondeds over threads:\n");
293 for (f
= 0; f
< F_NRE
; f
++)
295 if (ftype_is_bonded_potential(f
) && idef
->il
[f
].nr
> 0)
299 fprintf(debug
, "%16s", interaction_function
[f
].name
);
300 for (t
= 0; t
< nthread
; t
++)
302 fprintf(debug
, " %4d",
303 (idef
->il_thread_division
[f
*(nthread
+ 1) + t
+ 1] -
304 idef
->il_thread_division
[f
*(nthread
+ 1) + t
])/
307 fprintf(debug
, "\n");
313 //! Construct a reduction mask for which parts (blocks) of the force array are touched on which thread task
315 calc_bonded_reduction_mask(int natoms
,
316 f_thread_t
*f_thread
,
318 int thread
, int nthread
)
320 static_assert(BITMASK_SIZE
== GMX_OPENMP_MAX_THREADS
, "For the error message below we assume these two are equal.");
322 if (nthread
> BITMASK_SIZE
)
325 gmx_fatal(FARGS
, "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS (%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger value for GMX_OPENMP_MAX_THREADS.",
326 nthread
, GMX_OPENMP_MAX_THREADS
);
329 GMX_ASSERT(nthread
<= BITMASK_SIZE
, "We need at least nthread bits in the mask");
331 int nblock
= (natoms
+ reduction_block_size
- 1) >> reduction_block_bits
;
333 if (nblock
> f_thread
->block_nalloc
)
335 f_thread
->block_nalloc
= over_alloc_large(nblock
);
336 srenew(f_thread
->mask
, f_thread
->block_nalloc
);
337 srenew(f_thread
->block_index
, f_thread
->block_nalloc
);
338 sfree_aligned(f_thread
->f
);
339 snew_aligned(f_thread
->f
, f_thread
->block_nalloc
*reduction_block_size
, 128);
342 gmx_bitmask_t
*mask
= f_thread
->mask
;
344 for (int b
= 0; b
< nblock
; b
++)
346 bitmask_clear(&mask
[b
]);
349 for (int ftype
= 0; ftype
< F_NRE
; ftype
++)
351 if (ftype_is_bonded_potential(ftype
))
353 int nb
= idef
->il
[ftype
].nr
;
356 int nat1
= interaction_function
[ftype
].nratoms
+ 1;
358 int nb0
= idef
->il_thread_division
[ftype
*(nthread
+ 1) + thread
];
359 int nb1
= idef
->il_thread_division
[ftype
*(nthread
+ 1) + thread
+ 1];
361 for (int i
= nb0
; i
< nb1
; i
+= nat1
)
363 for (int a
= 1; a
< nat1
; a
++)
365 bitmask_set_bit(&mask
[idef
->il
[ftype
].iatoms
[i
+a
] >> reduction_block_bits
], thread
);
372 /* Make an index of the blocks our thread touches, so we can do fast
373 * force buffer clearing.
375 f_thread
->nblock_used
= 0;
376 for (int b
= 0; b
< nblock
; b
++)
378 if (bitmask_is_set(mask
[b
], thread
))
380 f_thread
->block_index
[f_thread
->nblock_used
++] = b
;
385 void setup_bonded_threading(t_forcerec
*fr
, t_idef
*idef
)
387 bonded_threading_t
*bt
= fr
->bonded_threading
;
390 assert(bt
->nthreads
>= 1);
392 /* Divide the bonded interaction over the threads */
393 divide_bondeds_over_threads(idef
,
395 bt
->bonded_max_nthread_uniform
,
398 if (!bt
->haveBondeds
)
400 /* We don't have bondeds, so there is nothing to reduce */
404 /* Determine to which blocks each thread's bonded force calculation
405 * contributes. Store this as a mask for each thread.
407 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
408 for (int t
= 0; t
< bt
->nthreads
; t
++)
412 calc_bonded_reduction_mask(fr
->natoms_force
, &bt
->f_t
[t
],
413 idef
, t
, bt
->nthreads
);
415 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
418 /* Reduce the masks over the threads and determine which blocks
419 * we need to reduce over.
421 int nblock_tot
= (fr
->natoms_force
+ reduction_block_size
- 1) >> reduction_block_bits
;
422 if (nblock_tot
> bt
->block_nalloc
)
424 bt
->block_nalloc
= over_alloc_large(nblock_tot
);
425 srenew(bt
->block_index
, bt
->block_nalloc
);
426 srenew(bt
->mask
, bt
->block_nalloc
);
429 for (int b
= 0; b
< nblock_tot
; b
++)
431 gmx_bitmask_t
*mask
= &bt
->mask
[b
];
433 /* Generate the union over the threads of the bitmask */
435 for (int t
= 0; t
< bt
->nthreads
; t
++)
437 bitmask_union(mask
, bt
->f_t
[t
].mask
[b
]);
439 if (!bitmask_is_zero(*mask
))
441 bt
->block_index
[bt
->nblock_used
++] = b
;
447 for (int t
= 0; t
< bt
->nthreads
; t
++)
449 if (bitmask_is_set(*mask
, t
))
458 #if BITMASK_SIZE <= 64 //move into bitmask when it is C++
459 std::string flags
= gmx::formatString("%x", *mask
);
461 std::string flags
= gmx::formatAndJoin(*mask
,
463 "", gmx::StringFormatter("%x"));
465 fprintf(debug
, "block %d flags %s count %d\n",
466 b
, flags
.c_str(), c
);
472 fprintf(debug
, "Number of %d atom blocks to reduce: %d\n",
473 reduction_block_size
, bt
->nblock_used
);
474 fprintf(debug
, "Reduction density %.2f for touched blocks only %.2f\n",
475 ctot
*reduction_block_size
/(double)fr
->natoms_force
,
476 ctot
/(double)bt
->nblock_used
);
480 void tear_down_bonded_threading(bonded_threading_t
*bt
,
483 for (int th
= 0; th
< bt
->nthreads
; th
++)
485 sfree(bt
->f_t
[th
].fshift
);
486 for (int i
= 0; i
< egNR
; i
++)
488 sfree(bt
->f_t
[th
].grpp
.ener
[i
]);
493 sfree(idef
->il_thread_division
);
496 void init_bonded_threading(FILE *fplog
, int nenergrp
,
497 struct bonded_threading_t
**bt_ptr
)
499 bonded_threading_t
*bt
;
503 /* These thread local data structures are used for bondeds only.
505 * Note that we also use there structures when running single-threaded.
506 * This is because the bonded force buffer uses type rvec4, whereas
507 * the normal force buffer is uses type rvec. This leads to a little
508 * reduction overhead, but the speed gain in the bonded calculations
509 * of doing transposeScatterIncr/DecrU with aligment 4 instead of 3
510 * is much larger than the reduction overhead.
512 bt
->nthreads
= gmx_omp_nthreads_get(emntBonded
);
514 snew(bt
->f_t
, bt
->nthreads
);
515 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
516 for (int t
= 0; t
< bt
->nthreads
; t
++)
520 /* Note that thread 0 uses the global fshift and energy arrays,
521 * but to keep the code simple, we initialize all data here.
523 bt
->f_t
[t
].f
= nullptr;
524 bt
->f_t
[t
].f_nalloc
= 0;
525 snew(bt
->f_t
[t
].fshift
, SHIFTS
);
526 bt
->f_t
[t
].grpp
.nener
= nenergrp
*nenergrp
;
527 for (int i
= 0; i
< egNR
; i
++)
529 snew(bt
->f_t
[t
].grpp
.ener
[i
], bt
->f_t
[t
].grpp
.nener
);
532 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
536 bt
->block_index
= nullptr;
538 bt
->block_nalloc
= 0;
540 /* The optimal value after which to switch from uniform to localized
541 * bonded interaction distribution is 3, 4 or 5 depending on the system
544 const int max_nthread_uniform
= 4;
547 if ((ptr
= getenv("GMX_BONDED_NTHREAD_UNIFORM")) != nullptr)
549 sscanf(ptr
, "%d", &bt
->bonded_max_nthread_uniform
);
550 if (fplog
!= nullptr)
552 fprintf(fplog
, "\nMax threads for uniform bonded distribution set to %d by env.var.\n",
553 bt
->bonded_max_nthread_uniform
);
558 bt
->bonded_max_nthread_uniform
= max_nthread_uniform
;