Split off the NMR related analyses from gmx energy.
[gromacs.git] / src / gromacs / mdrunutility / threadaffinity.cpp
bloba4370e9450063b546ac8c56626cccc82d6c0cf74
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #include "gmxpre.h"
37 #include "threadaffinity.h"
39 #include "config.h"
41 #include <cerrno>
42 #include <cstdio>
43 #include <cstring>
45 #if HAVE_SCHED_AFFINITY
46 # include <sched.h>
47 # include <sys/syscall.h>
48 #endif
50 #include "thread_mpi/threads.h"
52 #include "gromacs/hardware/hardwaretopology.h"
53 #include "gromacs/hardware/hw_info.h"
54 #include "gromacs/mdtypes/commrec.h"
55 #include "gromacs/utility/basenetwork.h"
56 #include "gromacs/utility/cstringutil.h"
57 #include "gromacs/utility/exceptions.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/gmxomp.h"
61 #include "gromacs/utility/logger.h"
62 #include "gromacs/utility/programcontext.h"
63 #include "gromacs/utility/smalloc.h"
64 #include "gromacs/utility/unique_cptr.h"
66 namespace
69 class DefaultThreadAffinityAccess : public gmx::IThreadAffinityAccess
71 public:
72 virtual bool isThreadAffinitySupported() const
74 return tMPI_Thread_setaffinity_support() == TMPI_SETAFFINITY_SUPPORT_YES;
76 virtual int physicalNodeId() const
78 return gmx_physicalnode_id_hash();
80 virtual bool setCurrentThreadAffinityToCore(int core)
82 const int ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
83 return ret == 0;
87 //! Global instance of DefaultThreadAffinityAccess
88 DefaultThreadAffinityAccess g_defaultAffinityAccess;
90 } // namespace
92 gmx::IThreadAffinityAccess::~IThreadAffinityAccess()
96 static bool invalidWithinSimulation(const t_commrec *cr, bool invalidLocally)
98 #if GMX_MPI
99 if (cr->nnodes > 1)
101 int value = invalidLocally ? 1 : 0;
102 int globalValue;
103 MPI_Reduce(&value, &globalValue, 1, MPI_INT, MPI_LOR, MASTERRANK(cr),
104 cr->mpi_comm_mysim);
105 return SIMMASTER(cr) ? (globalValue != 0) : invalidLocally;
107 #else
108 GMX_UNUSED_VALUE(cr);
109 #endif
110 return invalidLocally;
113 static bool
114 get_thread_affinity_layout(const gmx::MDLogger &mdlog,
115 const t_commrec *cr,
116 const gmx::HardwareTopology &hwTop,
117 int threads,
118 bool automatic,
119 int pin_offset, int * pin_stride,
120 int **localityOrder)
122 int hwThreads;
123 int hwThreadsPerCore = 0;
124 bool bPickPinStride;
125 bool haveTopology;
126 bool invalidValue;
128 haveTopology = (hwTop.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic);
130 if (pin_offset < 0)
132 gmx_fatal(FARGS, "Negative thread pinning offset requested");
134 if (*pin_stride < 0)
136 gmx_fatal(FARGS, "Negative thread pinning stride requested");
139 if (haveTopology)
141 hwThreads = hwTop.machine().logicalProcessorCount;
142 // Just use the value for the first core
143 hwThreadsPerCore = hwTop.machine().sockets[0].cores[0].hwThreads.size();
144 snew(*localityOrder, hwThreads);
145 int i = 0;
146 for (auto &s : hwTop.machine().sockets)
148 for (auto &c : s.cores)
150 for (auto &t : c.hwThreads)
152 (*localityOrder)[i++] = t.logicalProcessorId;
157 else
159 /* topology information not available or invalid, ignore it */
160 hwThreads = hwTop.machine().logicalProcessorCount;
161 *localityOrder = nullptr;
163 // Only warn about the first problem per node. Otherwise, the first test
164 // failing would essentially always cause also the other problems get
165 // reported, leading to bogus warnings. The order in the conditionals
166 // with this variable is important, since the MPI_Reduce() in
167 // invalidWithinSimulation() needs to always happen.
168 bool alreadyWarned = false;
169 invalidValue = (hwThreads <= 0);
170 if (invalidWithinSimulation(cr, invalidValue))
172 /* We don't know anything about the hardware, don't pin */
173 GMX_LOG(mdlog.warning).asParagraph().appendText(
174 "NOTE: No information on available cores, thread pinning disabled.");
175 alreadyWarned = true;
177 bool validLayout = !invalidValue;
179 if (automatic)
181 invalidValue = (threads != hwThreads);
182 bool warn = (invalidValue && threads > 1 && threads < hwThreads);
183 if (invalidWithinSimulation(cr, warn) && !alreadyWarned)
185 GMX_LOG(mdlog.warning).asParagraph().appendText(
186 "NOTE: The number of threads is not equal to the number of (logical) cores\n"
187 " and the -pin option is set to auto: will not pin thread to cores.\n"
188 " This can lead to significant performance degradation.\n"
189 " Consider using -pin on (and -pinoffset in case you run multiple jobs).");
190 alreadyWarned = true;
192 validLayout = validLayout && !invalidValue;
195 invalidValue = (threads > hwThreads);
196 if (invalidWithinSimulation(cr, invalidValue) && !alreadyWarned)
198 GMX_LOG(mdlog.warning).asParagraph().appendText(
199 "NOTE: Oversubscribing the CPU, will not pin threads");
200 alreadyWarned = true;
202 validLayout = validLayout && !invalidValue;
204 invalidValue = (pin_offset + threads > hwThreads);
205 if (invalidWithinSimulation(cr, invalidValue) && !alreadyWarned)
207 GMX_LOG(mdlog.warning).asParagraph().appendText(
208 "WARNING: Requested offset too large for available cores, thread pinning disabled.");
209 alreadyWarned = true;
212 validLayout = validLayout && !invalidValue;
214 invalidValue = false;
215 /* do we need to choose the pinning stride? */
216 bPickPinStride = (*pin_stride == 0);
218 if (bPickPinStride)
220 if (haveTopology && pin_offset + threads*hwThreadsPerCore <= hwThreads)
222 /* Put one thread on each physical core */
223 *pin_stride = hwThreadsPerCore;
225 else
227 /* We don't know if we have SMT, and if we do, we don't know
228 * if hw threads in the same physical core are consecutive.
229 * Without SMT the pinning layout should not matter too much.
230 * so we assume a consecutive layout and maximally spread out"
231 * the threads at equal threads per core.
232 * Note that IBM is the major non-x86 case with cpuid support
233 * and probably threads are already pinned by the queuing system,
234 * so we wouldn't end up here in the first place.
236 *pin_stride = (hwThreads - pin_offset)/threads;
239 else
241 /* Check the placement of the thread with the largest index to make sure
242 * that the offset & stride doesn't cause pinning beyond the last hardware thread. */
243 invalidValue = (pin_offset + (threads-1)*(*pin_stride) >= hwThreads);
245 if (invalidWithinSimulation(cr, invalidValue) && !alreadyWarned)
247 /* We are oversubscribing, don't pin */
248 GMX_LOG(mdlog.warning).asParagraph().appendText(
249 "WARNING: Requested stride too large for available cores, thread pinning disabled.");
251 validLayout = validLayout && !invalidValue;
253 if (validLayout)
255 GMX_LOG(mdlog.info).appendTextFormatted(
256 "Pinning threads with a%s logical core stride of %d",
257 bPickPinStride ? "n auto-selected" : " user-specified",
258 *pin_stride);
261 return validLayout;
264 static bool set_affinity(const t_commrec *cr, int nthread_local, int thread0_id_node,
265 int offset, int core_pinning_stride, int *localityOrder,
266 gmx::IThreadAffinityAccess *affinityAccess)
268 // Set the per-thread affinity. In order to be able to check the success
269 // of affinity settings, we will set nth_affinity_set to 1 on threads
270 // where the affinity setting succeded and to 0 where it failed.
271 // Reducing these 0/1 values over the threads will give the total number
272 // of threads on which we succeeded.
274 // To avoid warnings from the static analyzer we initialize nth_affinity_set
275 // to zero outside the OpenMP block, and then add to it inside the block.
276 // The value will still always be 0 or 1 from each thread.
277 int nth_affinity_set = 0;
278 #pragma omp parallel num_threads(nthread_local) reduction(+:nth_affinity_set)
282 int thread_id, thread_id_node;
283 int index, core;
285 thread_id = gmx_omp_get_thread_num();
286 thread_id_node = thread0_id_node + thread_id;
287 index = offset + thread_id_node*core_pinning_stride;
288 if (localityOrder != nullptr)
290 core = localityOrder[index];
292 else
294 core = index;
297 const bool ret = affinityAccess->setCurrentThreadAffinityToCore(core);
299 /* store the per-thread success-values of the setaffinity */
300 nth_affinity_set += (ret ? 1 : 0);
302 if (debug)
304 fprintf(debug, "On rank %2d, thread %2d, index %2d, core %2d the affinity setting returned %d\n",
305 cr->nodeid, gmx_omp_get_thread_num(), index, core, ret ? 1 : 0);
308 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
311 if (nth_affinity_set > nthread_local)
313 char msg[STRLEN];
315 sprintf(msg, "Looks like we have set affinity for more threads than "
316 "we have (%d > %d)!\n", nth_affinity_set, nthread_local);
317 gmx_incons(msg);
320 /* check & warn if some threads failed to set their affinities */
321 const bool allAffinitiesSet = (nth_affinity_set == nthread_local);
322 if (!allAffinitiesSet)
324 char sbuf1[STRLEN], sbuf2[STRLEN];
326 /* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
327 sbuf1[0] = sbuf2[0] = '\0';
328 /* Only add rank info if we have more than one rank. */
329 if (cr->nnodes > 1)
331 #if GMX_MPI
332 #if GMX_THREAD_MPI
333 sprintf(sbuf1, "In tMPI thread #%d: ", cr->nodeid);
334 #else /* GMX_LIB_MPI */
335 sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
336 #endif
337 #endif /* GMX_MPI */
340 if (nthread_local > 1)
342 sprintf(sbuf2, "for %d/%d thread%s ",
343 nthread_local - nth_affinity_set, nthread_local,
344 nthread_local > 1 ? "s" : "");
347 // TODO: This output should also go through mdlog.
348 fprintf(stderr, "NOTE: %sAffinity setting %sfailed.\n", sbuf1, sbuf2);
350 return allAffinitiesSet;
353 /* Set CPU affinity. Can be important for performance.
354 On some systems (e.g. Cray) CPU Affinity is set by default.
355 But default assigning doesn't work (well) with only some ranks
356 having threads. This causes very low performance.
357 External tools have cumbersome syntax for setting affinity
358 in the case that only some ranks have threads.
359 Thus it is important that GROMACS sets the affinity internally
360 if only PME is using threads.
362 void
363 gmx_set_thread_affinity(const gmx::MDLogger &mdlog,
364 const t_commrec *cr,
365 const gmx_hw_opt_t *hw_opt,
366 const gmx::HardwareTopology &hwTop,
367 int nthread_local,
368 gmx::IThreadAffinityAccess *affinityAccess)
370 int thread0_id_node, nthread_node;
371 int * localityOrder = nullptr;
373 if (hw_opt->thread_affinity == threadaffOFF)
375 /* Nothing to do */
376 return;
379 if (affinityAccess == nullptr)
381 affinityAccess = &g_defaultAffinityAccess;
384 /* If the tMPI thread affinity setting is not supported encourage the user
385 * to report it as it's either a bug or an exotic platform which we might
386 * want to support. */
387 if (!affinityAccess->isThreadAffinitySupported())
389 /* we know Mac OS & BlueGene do not support setting thread affinity, so there's
390 no point in warning the user in that case. In any other case
391 the user might be able to do something about it. */
392 #if !defined(__APPLE__) && !defined(__bg__)
393 GMX_LOG(mdlog.warning).asParagraph().appendText(
394 "NOTE: Cannot set thread affinities on the current platform.");
395 #endif /* __APPLE__ */
396 return;
399 /* map the current process to cores */
400 thread0_id_node = 0;
401 nthread_node = nthread_local;
402 #if GMX_MPI
403 if (PAR(cr) || MULTISIM(cr))
405 /* We need to determine a scan of the thread counts in this
406 * compute node.
408 MPI_Comm comm_intra;
410 MPI_Comm_split(MPI_COMM_WORLD,
411 affinityAccess->physicalNodeId(), cr->rank_intranode,
412 &comm_intra);
413 MPI_Scan(&nthread_local, &thread0_id_node, 1, MPI_INT, MPI_SUM, comm_intra);
414 /* MPI_Scan is inclusive, but here we need exclusive */
415 thread0_id_node -= nthread_local;
416 /* Get the total number of threads on this physical node */
417 MPI_Allreduce(&nthread_local, &nthread_node, 1, MPI_INT, MPI_SUM, comm_intra);
418 MPI_Comm_free(&comm_intra);
420 #endif
422 int offset = hw_opt->core_pinning_offset;
423 int core_pinning_stride = hw_opt->core_pinning_stride;
424 if (offset != 0)
426 GMX_LOG(mdlog.warning).appendTextFormatted("Applying core pinning offset %d", offset);
429 bool automatic = (hw_opt->thread_affinity == threadaffAUTO);
430 bool validLayout
431 = get_thread_affinity_layout(mdlog, cr, hwTop, nthread_node, automatic,
432 offset, &core_pinning_stride, &localityOrder);
433 const gmx::sfree_guard localityOrderGuard(localityOrder);
435 bool allAffinitiesSet;
436 if (validLayout)
438 allAffinitiesSet = set_affinity(cr, nthread_local, thread0_id_node,
439 offset, core_pinning_stride, localityOrder,
440 affinityAccess);
442 else
444 // Produce the warning if any rank fails.
445 allAffinitiesSet = false;
447 if (invalidWithinSimulation(cr, !allAffinitiesSet))
449 GMX_LOG(mdlog.warning).asParagraph().appendText(
450 "NOTE: Thread affinity setting failed. This can cause performance degradation.\n"
451 " If you think your settings are correct, ask on the gmx-users list.");
455 /* Check the process affinity mask and if it is found to be non-zero,
456 * will honor it and disable mdrun internal affinity setting.
457 * Note that this will only work on Linux as we use a GNU feature.
459 void
460 gmx_check_thread_affinity_set(const gmx::MDLogger &mdlog,
461 const t_commrec *cr,
462 gmx_hw_opt_t *hw_opt,
463 int gmx_unused nthreads_hw_avail,
464 gmx_bool bAfterOpenmpInit)
466 GMX_RELEASE_ASSERT(hw_opt, "hw_opt must be a non-NULL pointer");
468 if (!bAfterOpenmpInit)
470 /* Check for externally set OpenMP affinity and turn off internal
471 * pinning if any is found. We need to do this check early to tell
472 * thread-MPI whether it should do pinning when spawning threads.
473 * TODO: the above no longer holds, we should move these checks later
475 if (hw_opt->thread_affinity != threadaffOFF)
477 char *message;
478 if (!gmx_omp_check_thread_affinity(&message))
480 /* TODO: with -pin auto we should only warn when using all cores */
481 GMX_LOG(mdlog.warning).asParagraph().appendText(message);
482 sfree(message);
483 hw_opt->thread_affinity = threadaffOFF;
487 /* With thread-MPI this is needed as pinning might get turned off,
488 * which needs to be known before starting thread-MPI.
489 * With thread-MPI hw_opt is processed here on the master rank
490 * and passed to the other ranks later, so we only do this on master.
492 if (!SIMMASTER(cr))
494 return;
496 #if !GMX_THREAD_MPI
497 return;
498 #endif
501 #if HAVE_SCHED_AFFINITY
502 int ret;
503 cpu_set_t mask_current;
505 if (hw_opt->thread_affinity == threadaffOFF)
507 /* internal affinity setting is off, don't bother checking process affinity */
508 return;
511 CPU_ZERO(&mask_current);
512 if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
514 /* failed to query affinity mask, will just return */
515 if (debug)
517 fprintf(debug, "Failed to query affinity mask (error %d)", ret);
519 return;
522 /* Before proceeding with the actual check, make sure that the number of
523 * detected CPUs is >= the CPUs in the current set.
524 * We need to check for CPU_COUNT as it was added only in glibc 2.6. */
525 #ifdef CPU_COUNT
526 if (nthreads_hw_avail < CPU_COUNT(&mask_current))
528 if (debug)
530 fprintf(debug, "%d hardware threads detected, but %d was returned by CPU_COUNT",
531 nthreads_hw_avail, CPU_COUNT(&mask_current));
533 return;
535 #endif /* CPU_COUNT */
537 gmx_bool bAllSet = TRUE;
538 for (int i = 0; (i < nthreads_hw_avail && i < CPU_SETSIZE); i++)
540 bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
543 #if GMX_MPI
544 int isInitialized;
545 MPI_Initialized(&isInitialized);
546 /* Before OpenMP initialization, thread-MPI is not yet initialized.
547 * With thread-MPI bAllSet will be the same on all MPI-ranks, but the
548 * MPI_Allreduce then functions as a barrier before setting affinities.
550 if (isInitialized)
552 gmx_bool bAllSet_All;
554 MPI_Allreduce(&bAllSet, &bAllSet_All, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
555 bAllSet = bAllSet_All;
557 #endif
559 if (!bAllSet)
561 if (hw_opt->thread_affinity == threadaffAUTO)
563 if (!bAfterOpenmpInit)
565 GMX_LOG(mdlog.warning).asParagraph().appendText(
566 "Non-default thread affinity set, disabling internal thread affinity");
568 else
570 GMX_LOG(mdlog.warning).asParagraph().appendText(
571 "Non-default thread affinity set probably by the OpenMP library,\n"
572 "disabling internal thread affinity");
574 hw_opt->thread_affinity = threadaffOFF;
576 else
578 /* Only warn once, at the last check (bAfterOpenmpInit==TRUE) */
579 if (bAfterOpenmpInit)
581 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
582 "Overriding thread affinity set outside %s",
583 gmx::getProgramContext().displayName());
587 if (debug)
589 fprintf(debug, "Non-default affinity mask found\n");
592 else
594 if (debug)
596 fprintf(debug, "Default affinity mask found\n");
599 #endif /* HAVE_SCHED_AFFINITY */