2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2009,2010,2011,2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
42 #ifdef HAVE_SYS_TIME_H
46 #include "gromacs/commandline/pargs.h"
47 #include "gromacs/fileio/tpxio.h"
48 #include "gromacs/gmxana/gmx_ana.h"
49 #include "gromacs/legacyheaders/calcgrid.h"
50 #include "gromacs/legacyheaders/checkpoint.h"
51 #include "gromacs/legacyheaders/inputrec.h"
52 #include "gromacs/legacyheaders/macros.h"
53 #include "gromacs/legacyheaders/names.h"
54 #include "gromacs/legacyheaders/perf_est.h"
55 #include "gromacs/legacyheaders/readinp.h"
56 #include "gromacs/legacyheaders/typedefs.h"
57 #include "gromacs/legacyheaders/types/commrec.h"
58 #include "gromacs/math/utilities.h"
59 #include "gromacs/math/vec.h"
60 #include "gromacs/timing/walltime_accounting.h"
61 #include "gromacs/utility/baseversion.h"
62 #include "gromacs/utility/cstringutil.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/futil.h"
65 #include "gromacs/utility/smalloc.h"
67 /* Enum for situations that can occur during log file parsing, the
68 * corresponding string entries can be found in do_the_tests() in
69 * const char* ParseLog[] */
70 /* TODO clean up CamelCasing of these enum names */
76 eParselogResetProblem
,
80 eParselogLargePrimeFactor
,
81 eParselogMismatchOfNumberOfPPRanksAndAvailableGPUs
,
90 int nPMEnodes
; /* number of PME-only nodes used in this test */
91 int nx
, ny
, nz
; /* DD grid */
92 int guessPME
; /* if nPMEnodes == -1, this is the guessed number of PME nodes */
93 double *Gcycles
; /* This can contain more than one value if doing multiple tests */
97 float *PME_f_load
; /* PME mesh/force load average*/
98 float PME_f_load_Av
; /* Average average ;) ... */
99 char *mdrun_cmd_line
; /* Mdrun command line used for this test */
105 int nr_inputfiles
; /* The number of tpr and mdp input files */
106 gmx_int64_t orig_sim_steps
; /* Number of steps to be done in the real simulation */
107 gmx_int64_t orig_init_step
; /* Init step for the real simulation */
108 real
*rcoulomb
; /* The coulomb radii [0...nr_inputfiles] */
109 real
*rvdw
; /* The vdW radii */
110 real
*rlist
; /* Neighbourlist cutoff radius */
112 int *nkx
, *nky
, *nkz
;
113 real
*fsx
, *fsy
, *fsz
; /* Fourierspacing in x,y,z dimension */
117 static void sep_line(FILE *fp
)
119 fprintf(fp
, "\n------------------------------------------------------------\n");
123 /* Wrapper for system calls */
124 static int gmx_system_call(char *command
)
126 return ( system(command
) );
130 /* Check if string starts with substring */
131 static gmx_bool
str_starts(const char *string
, const char *substring
)
133 return ( strncmp(string
, substring
, strlen(substring
)) == 0);
137 static void cleandata(t_perf
*perfdata
, int test_nr
)
139 perfdata
->Gcycles
[test_nr
] = 0.0;
140 perfdata
->ns_per_day
[test_nr
] = 0.0;
141 perfdata
->PME_f_load
[test_nr
] = 0.0;
147 static void remove_if_exists(const char *fn
)
151 fprintf(stdout
, "Deleting %s\n", fn
);
157 static void finalize(const char *fn_out
)
163 fp
= fopen(fn_out
, "r");
164 fprintf(stdout
, "\n\n");
166 while (fgets(buf
, STRLEN
-1, fp
) != NULL
)
168 fprintf(stdout
, "%s", buf
);
171 fprintf(stdout
, "\n\n");
176 eFoundNothing
, eFoundDDStr
, eFoundAccountingStr
, eFoundCycleStr
179 static int parse_logfile(const char *logfile
, const char *errfile
,
180 t_perf
*perfdata
, int test_nr
, int presteps
, gmx_int64_t cpt_steps
,
184 char line
[STRLEN
], dumstring
[STRLEN
], dumstring2
[STRLEN
];
185 const char matchstrdd
[] = "Domain decomposition grid";
186 const char matchstrcr
[] = "resetting all time and cycle counters";
187 const char matchstrbal
[] = "Average PME mesh/force load:";
188 const char matchstring
[] = "R E A L C Y C L E A N D T I M E A C C O U N T I N G";
189 const char errSIG
[] = "signal, stopping at the next";
191 float dum1
, dum2
, dum3
, dum4
;
194 gmx_int64_t resetsteps
= -1;
195 gmx_bool bFoundResetStr
= FALSE
;
196 gmx_bool bResetChecked
= FALSE
;
199 if (!gmx_fexist(logfile
))
201 fprintf(stderr
, "WARNING: Could not find logfile %s.\n", logfile
);
202 cleandata(perfdata
, test_nr
);
203 return eParselogNotFound
;
206 fp
= fopen(logfile
, "r");
207 perfdata
->PME_f_load
[test_nr
] = -1.0;
208 perfdata
->guessPME
= -1;
210 iFound
= eFoundNothing
;
213 iFound
= eFoundDDStr
; /* Skip some case statements */
216 while (fgets(line
, STRLEN
, fp
) != NULL
)
218 /* Remove leading spaces */
221 /* Check for TERM and INT signals from user: */
222 if (strstr(line
, errSIG
) != NULL
)
225 cleandata(perfdata
, test_nr
);
226 return eParselogTerm
;
229 /* Check whether cycle resetting worked */
230 if (presteps
> 0 && !bFoundResetStr
)
232 if (strstr(line
, matchstrcr
) != NULL
)
234 sprintf(dumstring
, "step %s", "%"GMX_SCNd64
);
235 sscanf(line
, dumstring
, &resetsteps
);
236 bFoundResetStr
= TRUE
;
237 if (resetsteps
== presteps
+cpt_steps
)
239 bResetChecked
= TRUE
;
243 sprintf(dumstring
, "%"GMX_PRId64
, resetsteps
);
244 sprintf(dumstring2
, "%"GMX_PRId64
, presteps
+cpt_steps
);
245 fprintf(stderr
, "WARNING: Time step counters were reset at step %s,\n"
246 " though they were supposed to be reset at step %s!\n",
247 dumstring
, dumstring2
);
252 /* Look for strings that appear in a certain order in the log file: */
256 /* Look for domain decomp grid and separate PME nodes: */
257 if (str_starts(line
, matchstrdd
))
259 sscanf(line
, "Domain decomposition grid %d x %d x %d, separate PME ranks %d",
260 &(perfdata
->nx
), &(perfdata
->ny
), &(perfdata
->nz
), &npme
);
261 if (perfdata
->nPMEnodes
== -1)
263 perfdata
->guessPME
= npme
;
265 else if (perfdata
->nPMEnodes
!= npme
)
267 gmx_fatal(FARGS
, "PME ranks from command line and output file are not identical");
269 iFound
= eFoundDDStr
;
271 /* Catch a few errors that might have occured: */
272 else if (str_starts(line
, "There is no domain decomposition for"))
275 return eParselogNoDDGrid
;
277 else if (str_starts(line
, "The number of ranks you selected"))
280 return eParselogLargePrimeFactor
;
282 else if (str_starts(line
, "reading tpx file"))
285 return eParselogTPXVersion
;
287 else if (str_starts(line
, "The -dd or -npme option request a parallel simulation"))
290 return eParselogNotParallel
;
294 /* Even after the "Domain decomposition grid" string was found,
295 * it could be that mdrun had to quit due to some error. */
296 if (str_starts(line
, "Incorrect launch configuration: mismatching number of"))
299 return eParselogMismatchOfNumberOfPPRanksAndAvailableGPUs
;
301 else if (str_starts(line
, "Some of the requested GPUs do not exist"))
304 return eParselogGpuProblem
;
306 /* Look for PME mesh/force balance (not necessarily present, though) */
307 else if (str_starts(line
, matchstrbal
))
309 sscanf(&line
[strlen(matchstrbal
)], "%f", &(perfdata
->PME_f_load
[test_nr
]));
311 /* Look for matchstring */
312 else if (str_starts(line
, matchstring
))
314 iFound
= eFoundAccountingStr
;
317 case eFoundAccountingStr
:
318 /* Already found matchstring - look for cycle data */
319 if (str_starts(line
, "Total "))
321 sscanf(line
, "Total %*f %lf", &(perfdata
->Gcycles
[test_nr
]));
322 iFound
= eFoundCycleStr
;
326 /* Already found cycle data - look for remaining performance info and return */
327 if (str_starts(line
, "Performance:"))
329 ndum
= sscanf(line
, "%s %f %f %f %f", dumstring
, &dum1
, &dum2
, &dum3
, &dum4
);
330 /* (ns/day) is the second last entry, depending on whether GMX_DETAILED_PERF_STATS was set in print_perf(), nrnb.c */
331 perfdata
->ns_per_day
[test_nr
] = (ndum
== 5) ? dum3
: dum1
;
333 if (bResetChecked
|| presteps
== 0)
339 return eParselogResetProblem
;
346 /* Close the log file */
349 /* Check why there is no performance data in the log file.
350 * Did a fatal errors occur? */
351 if (gmx_fexist(errfile
))
353 fp
= fopen(errfile
, "r");
354 while (fgets(line
, STRLEN
, fp
) != NULL
)
356 if (str_starts(line
, "Fatal error:") )
358 if (fgets(line
, STRLEN
, fp
) != NULL
)
360 fprintf(stderr
, "\nWARNING: An error occured during this benchmark:\n"
364 cleandata(perfdata
, test_nr
);
365 return eParselogFatal
;
372 fprintf(stderr
, "WARNING: Could not find stderr file %s.\n", errfile
);
375 /* Giving up ... we could not find out why there is no performance data in
377 fprintf(stdout
, "No performance data in log file.\n");
378 cleandata(perfdata
, test_nr
);
380 return eParselogNoPerfData
;
384 static gmx_bool
analyze_data(
393 int *index_tpr
, /* OUT: Nr of mdp file with best settings */
394 int *npme_optimal
) /* OUT: Optimal number of PME nodes */
397 int line
= 0, line_win
= -1;
398 int k_win
= -1, i_win
= -1, winPME
;
399 double s
= 0.0; /* standard deviation */
402 char str_PME_f_load
[13];
403 gmx_bool bCanUseOrigTPR
;
404 gmx_bool bRefinedCoul
, bRefinedVdW
, bRefinedGrid
;
410 fprintf(fp
, "Summary of successful runs:\n");
411 fprintf(fp
, "Line tpr PME ranks Gcycles Av. Std.dev. ns/day PME/f");
414 fprintf(fp
, " DD grid");
420 for (k
= 0; k
< ntprs
; k
++)
422 for (i
= 0; i
< ntests
; i
++)
424 /* Select the right dataset: */
425 pd
= &(perfdata
[k
][i
]);
427 pd
->Gcycles_Av
= 0.0;
428 pd
->PME_f_load_Av
= 0.0;
429 pd
->ns_per_day_Av
= 0.0;
431 if (pd
->nPMEnodes
== -1)
433 sprintf(strbuf
, "(%3d)", pd
->guessPME
);
437 sprintf(strbuf
, " ");
440 /* Get the average run time of a setting */
441 for (j
= 0; j
< nrepeats
; j
++)
443 pd
->Gcycles_Av
+= pd
->Gcycles
[j
];
444 pd
->PME_f_load_Av
+= pd
->PME_f_load
[j
];
446 pd
->Gcycles_Av
/= nrepeats
;
447 pd
->PME_f_load_Av
/= nrepeats
;
449 for (j
= 0; j
< nrepeats
; j
++)
451 if (pd
->ns_per_day
[j
] > 0.0)
453 pd
->ns_per_day_Av
+= pd
->ns_per_day
[j
];
457 /* Somehow the performance number was not aquired for this run,
458 * therefor set the average to some negative value: */
459 pd
->ns_per_day_Av
= -1.0f
*nrepeats
;
463 pd
->ns_per_day_Av
/= nrepeats
;
466 if (pd
->PME_f_load_Av
> 0.0)
468 sprintf(str_PME_f_load
, "%12.3f", pd
->PME_f_load_Av
);
472 sprintf(str_PME_f_load
, "%s", " - ");
476 /* We assume we had a successful run if both averages are positive */
477 if (pd
->Gcycles_Av
> 0.0 && pd
->ns_per_day_Av
> 0.0)
479 /* Output statistics if repeats were done */
482 /* Calculate the standard deviation */
484 for (j
= 0; j
< nrepeats
; j
++)
486 s
+= pow( pd
->Gcycles
[j
] - pd
->Gcycles_Av
, 2 );
491 fprintf(fp
, "%4d %3d %4d%s %12.3f %12.3f %12.3f %s",
492 line
, k
, pd
->nPMEnodes
, strbuf
, pd
->Gcycles_Av
, s
,
493 pd
->ns_per_day_Av
, str_PME_f_load
);
496 fprintf(fp
, " %3d %3d %3d", pd
->nx
, pd
->ny
, pd
->nz
);
500 /* Store the index of the best run found so far in 'winner': */
501 if ( (k_win
== -1) || (pd
->Gcycles_Av
< perfdata
[k_win
][i_win
].Gcycles_Av
) )
514 gmx_fatal(FARGS
, "None of the runs was successful! Check %s for problems.", fn
);
519 winPME
= perfdata
[k_win
][i_win
].nPMEnodes
;
523 /* We stuck to a fixed number of PME-only nodes */
524 sprintf(strbuf
, "settings No. %d", k_win
);
528 /* We have optimized the number of PME-only nodes */
531 sprintf(strbuf
, "%s", "the automatic number of PME ranks");
535 sprintf(strbuf
, "%d PME ranks", winPME
);
538 fprintf(fp
, "Best performance was achieved with %s", strbuf
);
539 if ((nrepeats
> 1) && (ntests
> 1))
541 fprintf(fp
, " (see line %d)", line_win
);
545 /* Only mention settings if they were modified: */
546 bRefinedCoul
= !gmx_within_tol(info
->rcoulomb
[k_win
], info
->rcoulomb
[0], GMX_REAL_EPS
);
547 bRefinedVdW
= !gmx_within_tol(info
->rvdw
[k_win
], info
->rvdw
[0], GMX_REAL_EPS
);
548 bRefinedGrid
= !(info
->nkx
[k_win
] == info
->nkx
[0] &&
549 info
->nky
[k_win
] == info
->nky
[0] &&
550 info
->nkz
[k_win
] == info
->nkz
[0]);
552 if (bRefinedCoul
|| bRefinedVdW
|| bRefinedGrid
)
554 fprintf(fp
, "Optimized PME settings:\n");
555 bCanUseOrigTPR
= FALSE
;
559 bCanUseOrigTPR
= TRUE
;
564 fprintf(fp
, " New Coulomb radius: %f nm (was %f nm)\n", info
->rcoulomb
[k_win
], info
->rcoulomb
[0]);
569 fprintf(fp
, " New Van der Waals radius: %f nm (was %f nm)\n", info
->rvdw
[k_win
], info
->rvdw
[0]);
574 fprintf(fp
, " New Fourier grid xyz: %d %d %d (was %d %d %d)\n", info
->nkx
[k_win
], info
->nky
[k_win
], info
->nkz
[k_win
],
575 info
->nkx
[0], info
->nky
[0], info
->nkz
[0]);
578 if (bCanUseOrigTPR
&& ntprs
> 1)
580 fprintf(fp
, "and original PME settings.\n");
585 /* Return the index of the mdp file that showed the highest performance
586 * and the optimal number of PME nodes */
588 *npme_optimal
= winPME
;
590 return bCanUseOrigTPR
;
594 /* Get the commands we need to set up the runs from environment variables */
595 static void get_program_paths(gmx_bool bThreads
, char *cmd_mpirun
[], char *cmd_mdrun
[])
599 const char def_mpirun
[] = "mpirun";
600 const char def_mdrun
[] = "mdrun";
602 const char empty_mpirun
[] = "";
604 /* Get the commands we need to set up the runs from environment variables */
607 if ( (cp
= getenv("MPIRUN")) != NULL
)
609 *cmd_mpirun
= gmx_strdup(cp
);
613 *cmd_mpirun
= gmx_strdup(def_mpirun
);
618 *cmd_mpirun
= gmx_strdup(empty_mpirun
);
621 if ( (cp
= getenv("MDRUN" )) != NULL
)
623 *cmd_mdrun
= gmx_strdup(cp
);
627 *cmd_mdrun
= gmx_strdup(def_mdrun
);
631 /* Check that the commands will run mdrun (perhaps via mpirun) by
632 * running a very quick test simulation. Requires MPI environment or
633 * GPU support to be available if applicable. */
634 /* TODO implement feature to parse the log file to get the list of
635 compatible GPUs from mdrun, if the user of gmx tune-pme has not
637 static void check_mdrun_works(gmx_bool bThreads
,
638 const char *cmd_mpirun
,
640 const char *cmd_mdrun
,
641 gmx_bool bNeedGpuSupport
)
643 char *command
= NULL
;
647 const char filename
[] = "benchtest.log";
649 /* This string should always be identical to the one in copyrite.c,
650 * gmx_print_version_info() in the defined(GMX_MPI) section */
651 const char match_mpi
[] = "MPI library: MPI";
652 const char match_mdrun
[] = "Executable: ";
653 const char match_gpu
[] = "GPU support: enabled";
654 gmx_bool bMdrun
= FALSE
;
655 gmx_bool bMPI
= FALSE
;
656 gmx_bool bHaveGpuSupport
= FALSE
;
658 /* Run a small test to see whether mpirun + mdrun work */
659 fprintf(stdout
, "Making sure that mdrun can be executed. ");
662 snew(command
, strlen(cmd_mdrun
) + strlen(cmd_np
) + strlen(filename
) + 50);
663 sprintf(command
, "%s%s-version -maxh 0.001 1> %s 2>&1", cmd_mdrun
, cmd_np
, filename
);
667 snew(command
, strlen(cmd_mpirun
) + strlen(cmd_np
) + strlen(cmd_mdrun
) + strlen(filename
) + 50);
668 sprintf(command
, "%s%s%s -version -maxh 0.001 1> %s 2>&1", cmd_mpirun
, cmd_np
, cmd_mdrun
, filename
);
670 fprintf(stdout
, "Trying '%s' ... ", command
);
671 make_backup(filename
);
672 gmx_system_call(command
);
674 /* Check if we find the characteristic string in the output: */
675 if (!gmx_fexist(filename
))
677 gmx_fatal(FARGS
, "Output from test run could not be found.");
680 fp
= fopen(filename
, "r");
681 /* We need to scan the whole output file, since sometimes the queuing system
682 * also writes stuff to stdout/err */
685 cp
= fgets(line
, STRLEN
, fp
);
688 if (str_starts(line
, match_mdrun
) )
692 if (str_starts(line
, match_mpi
) )
696 if (str_starts(line
, match_gpu
) )
698 bHaveGpuSupport
= TRUE
;
708 gmx_fatal(FARGS
, "Need a threaded version of mdrun. This one\n"
710 "seems to have been compiled with MPI instead.",
718 gmx_fatal(FARGS
, "Need an MPI-enabled version of mdrun. This one\n"
720 "seems to have been compiled without MPI support.",
727 gmx_fatal(FARGS
, "Cannot execute mdrun. Please check %s for problems!",
731 if (bNeedGpuSupport
&& !bHaveGpuSupport
)
733 gmx_fatal(FARGS
, "The mdrun executable did not have the expected GPU support.");
736 fprintf(stdout
, "passed.\n");
743 /*! \brief Helper struct so we can parse the string with eligible GPU
744 IDs outside do_the_tests. */
745 typedef struct eligible_gpu_ids
747 int n
; /**< Length of ids */
748 int *ids
; /**< Array of length n. NULL if no GPUs in use */
749 } t_eligible_gpu_ids
;
751 /* Handles the no-GPU case by emitting an empty string. */
752 static char *make_gpu_id_command_line(int numRanks
, int numPmeRanks
, const t_eligible_gpu_ids
*gpu_ids
)
754 char *command_line
, *flag
= "-gpu_id ", *ptr
;
757 /* Reserve enough room for the option name, enough single-digit
758 GPU ids (since that is currently all that is possible to use
759 with mdrun), and a terminating NULL. */
760 flag_length
= strlen(flag
);
761 snew(command_line
, flag_length
+ numRanks
+ 1);
764 /* If the user has given no eligible GPU IDs, or we're trying the
765 * default behaviour, then there is nothing for g_tune_pme to give
766 * to mdrun -gpu_id */
767 if (gpu_ids
->n
> 0 && numPmeRanks
> -1)
769 int numPpRanks
, max_num_ranks_for_each_GPU
;
772 /* Write the option flag */
776 numPpRanks
= numRanks
- numPmeRanks
;
777 max_num_ranks_for_each_GPU
= numPpRanks
/ gpu_ids
->n
;
778 if (max_num_ranks_for_each_GPU
* gpu_ids
->n
!= numPpRanks
)
780 /* Some GPUs will receive more work than others, which
781 * we choose to be those with the lowest indices */
782 max_num_ranks_for_each_GPU
++;
785 /* Loop over all eligible GPU ids */
786 for (gpu_id
= 0, rank
= 0; gpu_id
< gpu_ids
->n
; gpu_id
++)
788 int rank_for_this_GPU
;
789 /* Loop over all PP ranks for GPU with ID gpu_id, building the
790 assignment string. */
791 for (rank_for_this_GPU
= 0;
792 rank_for_this_GPU
< max_num_ranks_for_each_GPU
&& rank
< numPpRanks
;
793 rank
++, rank_for_this_GPU
++)
795 *ptr
= '0' + gpu_ids
->ids
[gpu_id
];
805 static void launch_simulation(
806 gmx_bool bLaunch
, /* Should the simulation be launched? */
807 FILE *fp
, /* General log file */
808 gmx_bool bThreads
, /* whether to use threads */
809 char *cmd_mpirun
, /* Command for mpirun */
810 char *cmd_np
, /* Switch for -np or -ntmpi or empty */
811 char *cmd_mdrun
, /* Command for mdrun */
812 char *args_for_mdrun
, /* Arguments for mdrun */
813 const char *simulation_tpr
, /* This tpr will be simulated */
814 int nnodes
, /* Number of ranks to use */
815 int nPMEnodes
, /* Number of PME ranks to use */
816 const t_eligible_gpu_ids
*gpu_ids
) /* Struct containing GPU IDs for
817 * constructing mdrun command lines */
819 char *command
, *cmd_gpu_ids
;
822 /* Make enough space for the system call command,
823 * (200 extra chars for -npme ... etc. options should suffice): */
824 snew(command
, strlen(cmd_mpirun
)+strlen(cmd_mdrun
)+strlen(cmd_np
)+strlen(args_for_mdrun
)+strlen(simulation_tpr
)+200);
826 cmd_gpu_ids
= make_gpu_id_command_line(nnodes
, nPMEnodes
, gpu_ids
);
828 /* Note that the -passall options requires args_for_mdrun to be at the end
829 * of the command line string */
832 sprintf(command
, "%s%s-npme %d -s %s %s %s",
833 cmd_mdrun
, cmd_np
, nPMEnodes
, simulation_tpr
, args_for_mdrun
, cmd_gpu_ids
);
837 sprintf(command
, "%s%s%s -npme %d -s %s %s %s",
838 cmd_mpirun
, cmd_np
, cmd_mdrun
, nPMEnodes
, simulation_tpr
, args_for_mdrun
, cmd_gpu_ids
);
841 fprintf(fp
, "%s this command line to launch the simulation:\n\n%s", bLaunch
? "Using" : "Please use", command
);
845 /* Now the real thing! */
848 fprintf(stdout
, "\nLaunching simulation with best parameters now.\nExecuting '%s'", command
);
851 gmx_system_call(command
);
856 static void modify_PMEsettings(
857 gmx_int64_t simsteps
, /* Set this value as number of time steps */
858 gmx_int64_t init_step
, /* Set this value as init_step */
859 const char *fn_best_tpr
, /* tpr file with the best performance */
860 const char *fn_sim_tpr
) /* name of tpr file to be launched */
868 read_tpx_state(fn_best_tpr
, ir
, &state
, NULL
, &mtop
);
870 /* Reset nsteps and init_step to the value of the input .tpr file */
871 ir
->nsteps
= simsteps
;
872 ir
->init_step
= init_step
;
874 /* Write the tpr file which will be launched */
875 sprintf(buf
, "Writing optimized simulation file %s with nsteps=%s.\n", fn_sim_tpr
, "%"GMX_PRId64
);
876 fprintf(stdout
, buf
, ir
->nsteps
);
878 write_tpx_state(fn_sim_tpr
, ir
, &state
, &mtop
);
883 static gmx_bool
can_scale_rvdw(int vdwtype
)
885 return (evdwCUT
== vdwtype
||
889 #define EPME_SWITCHED(e) ((e) == eelPMESWITCH || (e) == eelPMEUSERSWITCH)
891 /* Make additional TPR files with more computational load for the
892 * direct space processors: */
893 static void make_benchmark_tprs(
894 const char *fn_sim_tpr
, /* READ : User-provided tpr file */
895 char *fn_bench_tprs
[], /* WRITE: Names of benchmark tpr files */
896 gmx_int64_t benchsteps
, /* Number of time steps for benchmark runs */
897 gmx_int64_t statesteps
, /* Step counter in checkpoint file */
898 real rmin
, /* Minimal Coulomb radius */
899 real rmax
, /* Maximal Coulomb radius */
900 real bScaleRvdw
, /* Scale rvdw along with rcoulomb */
901 int *ntprs
, /* No. of TPRs to write, each with a different
902 rcoulomb and fourierspacing */
903 t_inputinfo
*info
, /* Contains information about mdp file options */
904 FILE *fp
) /* Write the output here */
910 real nlist_buffer
; /* Thickness of the buffer regions for PME-switch potentials */
913 gmx_bool bNote
= FALSE
;
914 real add
; /* Add this to rcoul for the next test */
915 real fac
= 1.0; /* Scaling factor for Coulomb radius */
916 real fourierspacing
; /* Basic fourierspacing from tpr */
919 sprintf(buf
, "Making benchmark tpr file%s with %s time step%s",
920 *ntprs
> 1 ? "s" : "", "%"GMX_PRId64
, benchsteps
> 1 ? "s" : "");
921 fprintf(stdout
, buf
, benchsteps
);
924 sprintf(buf
, " (adding %s steps from checkpoint file)", "%"GMX_PRId64
);
925 fprintf(stdout
, buf
, statesteps
);
926 benchsteps
+= statesteps
;
928 fprintf(stdout
, ".\n");
932 read_tpx_state(fn_sim_tpr
, ir
, &state
, NULL
, &mtop
);
934 /* Check if some kind of PME was chosen */
935 if (EEL_PME(ir
->coulombtype
) == FALSE
)
937 gmx_fatal(FARGS
, "Can only do optimizations for simulations with %s electrostatics.",
941 /* Check if rcoulomb == rlist, which is necessary for plain PME. */
942 if ( (ir
->cutoff_scheme
!= ecutsVERLET
) &&
943 (eelPME
== ir
->coulombtype
) && !(ir
->rcoulomb
== ir
->rlist
))
945 gmx_fatal(FARGS
, "%s requires rcoulomb (%f) to be equal to rlist (%f).",
946 EELTYPE(eelPME
), ir
->rcoulomb
, ir
->rlist
);
948 /* For other PME types, rcoulomb is allowed to be smaller than rlist */
949 else if (ir
->rcoulomb
> ir
->rlist
)
951 gmx_fatal(FARGS
, "%s requires rcoulomb (%f) to be equal to or smaller than rlist (%f)",
952 EELTYPE(ir
->coulombtype
), ir
->rcoulomb
, ir
->rlist
);
955 if (bScaleRvdw
&& ir
->rvdw
!= ir
->rcoulomb
)
957 fprintf(stdout
, "NOTE: input rvdw != rcoulomb, will not scale rvdw\n");
961 /* Reduce the number of steps for the benchmarks */
962 info
->orig_sim_steps
= ir
->nsteps
;
963 ir
->nsteps
= benchsteps
;
964 /* We must not use init_step from the input tpr file for the benchmarks */
965 info
->orig_init_step
= ir
->init_step
;
968 /* For PME-switch potentials, keep the radial distance of the buffer region */
969 nlist_buffer
= ir
->rlist
- ir
->rcoulomb
;
971 /* Determine length of triclinic box vectors */
972 for (d
= 0; d
< DIM
; d
++)
975 for (i
= 0; i
< DIM
; i
++)
977 box_size
[d
] += state
.box
[d
][i
]*state
.box
[d
][i
];
979 box_size
[d
] = sqrt(box_size
[d
]);
982 if (ir
->fourier_spacing
> 0)
984 info
->fsx
[0] = ir
->fourier_spacing
;
985 info
->fsy
[0] = ir
->fourier_spacing
;
986 info
->fsz
[0] = ir
->fourier_spacing
;
990 /* Reconstruct fourierspacing per dimension from the number of grid points and box size */
991 info
->fsx
[0] = box_size
[XX
]/ir
->nkx
;
992 info
->fsy
[0] = box_size
[YY
]/ir
->nky
;
993 info
->fsz
[0] = box_size
[ZZ
]/ir
->nkz
;
996 /* If no value for the fourierspacing was provided on the command line, we
997 * use the reconstruction from the tpr file */
998 if (ir
->fourier_spacing
> 0)
1000 /* Use the spacing from the tpr */
1001 fourierspacing
= ir
->fourier_spacing
;
1005 /* Use the maximum observed spacing */
1006 fourierspacing
= max(max(info
->fsx
[0], info
->fsy
[0]), info
->fsz
[0]);
1009 fprintf(stdout
, "Calculating PME grid points on the basis of a fourierspacing of %f nm\n", fourierspacing
);
1011 /* For performance comparisons the number of particles is useful to have */
1012 fprintf(fp
, " Number of particles : %d\n", mtop
.natoms
);
1014 /* Print information about settings of which some are potentially modified: */
1015 fprintf(fp
, " Coulomb type : %s\n", EELTYPE(ir
->coulombtype
));
1016 fprintf(fp
, " Grid spacing x y z : %f %f %f\n",
1017 box_size
[XX
]/ir
->nkx
, box_size
[YY
]/ir
->nky
, box_size
[ZZ
]/ir
->nkz
);
1018 fprintf(fp
, " Van der Waals type : %s\n", EVDWTYPE(ir
->vdwtype
));
1019 if (ir_vdw_switched(ir
))
1021 fprintf(fp
, " rvdw_switch : %f nm\n", ir
->rvdw_switch
);
1023 if (EPME_SWITCHED(ir
->coulombtype
))
1025 fprintf(fp
, " rlist : %f nm\n", ir
->rlist
);
1027 if (ir
->rlistlong
!= max_cutoff(ir
->rvdw
, ir
->rcoulomb
))
1029 fprintf(fp
, " rlistlong : %f nm\n", ir
->rlistlong
);
1032 /* Print a descriptive line about the tpr settings tested */
1033 fprintf(fp
, "\nWill try these real/reciprocal workload settings:\n");
1034 fprintf(fp
, " No. scaling rcoulomb");
1035 fprintf(fp
, " nkx nky nkz");
1036 fprintf(fp
, " spacing");
1037 if (can_scale_rvdw(ir
->vdwtype
))
1039 fprintf(fp
, " rvdw");
1041 if (EPME_SWITCHED(ir
->coulombtype
))
1043 fprintf(fp
, " rlist");
1045 if (ir
->rlistlong
!= max_cutoff(ir
->rlist
, max_cutoff(ir
->rvdw
, ir
->rcoulomb
)) )
1047 fprintf(fp
, " rlistlong");
1049 fprintf(fp
, " tpr file\n");
1051 /* Loop to create the requested number of tpr input files */
1052 for (j
= 0; j
< *ntprs
; j
++)
1054 /* The first .tpr is the provided one, just need to modify nsteps,
1055 * so skip the following block */
1058 /* Determine which Coulomb radii rc to use in the benchmarks */
1059 add
= (rmax
-rmin
)/(*ntprs
-1);
1060 if (gmx_within_tol(rmin
, info
->rcoulomb
[0], GMX_REAL_EPS
))
1062 ir
->rcoulomb
= rmin
+ j
*add
;
1064 else if (gmx_within_tol(rmax
, info
->rcoulomb
[0], GMX_REAL_EPS
))
1066 ir
->rcoulomb
= rmin
+ (j
-1)*add
;
1070 /* rmin != rcoul != rmax, ergo test between rmin and rmax */
1071 add
= (rmax
-rmin
)/(*ntprs
-2);
1072 ir
->rcoulomb
= rmin
+ (j
-1)*add
;
1075 /* Determine the scaling factor fac */
1076 fac
= ir
->rcoulomb
/info
->rcoulomb
[0];
1078 /* Scale the Fourier grid spacing */
1079 ir
->nkx
= ir
->nky
= ir
->nkz
= 0;
1080 calc_grid(NULL
, state
.box
, fourierspacing
*fac
, &ir
->nkx
, &ir
->nky
, &ir
->nkz
);
1082 /* Adjust other radii since various conditions need to be fulfilled */
1083 if (eelPME
== ir
->coulombtype
)
1085 /* plain PME, rcoulomb must be equal to rlist */
1086 ir
->rlist
= ir
->rcoulomb
;
1090 /* rlist must be >= rcoulomb, we keep the size of the buffer region */
1091 ir
->rlist
= ir
->rcoulomb
+ nlist_buffer
;
1094 if (bScaleRvdw
&& can_scale_rvdw(ir
->vdwtype
))
1096 if (ecutsVERLET
== ir
->cutoff_scheme
||
1097 evdwPME
== ir
->vdwtype
)
1099 /* With either the Verlet cutoff-scheme or LJ-PME,
1100 the van der Waals radius must always equal the
1102 ir
->rvdw
= ir
->rcoulomb
;
1106 /* For vdw cutoff, rvdw >= rlist */
1107 ir
->rvdw
= max(info
->rvdw
[0], ir
->rlist
);
1111 ir
->rlistlong
= max_cutoff(ir
->rlist
, max_cutoff(ir
->rvdw
, ir
->rcoulomb
));
1113 } /* end of "if (j != 0)" */
1115 /* for j==0: Save the original settings
1116 * for j >0: Save modified radii and Fourier grids */
1117 info
->rcoulomb
[j
] = ir
->rcoulomb
;
1118 info
->rvdw
[j
] = ir
->rvdw
;
1119 info
->nkx
[j
] = ir
->nkx
;
1120 info
->nky
[j
] = ir
->nky
;
1121 info
->nkz
[j
] = ir
->nkz
;
1122 info
->rlist
[j
] = ir
->rlist
;
1123 info
->rlistlong
[j
] = ir
->rlistlong
;
1124 info
->fsx
[j
] = fac
*fourierspacing
;
1125 info
->fsy
[j
] = fac
*fourierspacing
;
1126 info
->fsz
[j
] = fac
*fourierspacing
;
1128 /* Write the benchmark tpr file */
1129 strncpy(fn_bench_tprs
[j
], fn_sim_tpr
, strlen(fn_sim_tpr
)-strlen(".tpr"));
1130 sprintf(buf
, "_bench%.2d.tpr", j
);
1131 strcat(fn_bench_tprs
[j
], buf
);
1132 fprintf(stdout
, "Writing benchmark tpr %s with nsteps=", fn_bench_tprs
[j
]);
1133 fprintf(stdout
, "%"GMX_PRId64
, ir
->nsteps
);
1136 fprintf(stdout
, ", scaling factor %f\n", fac
);
1140 fprintf(stdout
, ", unmodified settings\n");
1143 write_tpx_state(fn_bench_tprs
[j
], ir
, &state
, &mtop
);
1145 /* Write information about modified tpr settings to log file */
1146 fprintf(fp
, "%4d%10f%10f", j
, fac
, ir
->rcoulomb
);
1147 fprintf(fp
, "%5d%5d%5d", ir
->nkx
, ir
->nky
, ir
->nkz
);
1148 fprintf(fp
, " %9f ", info
->fsx
[j
]);
1149 if (can_scale_rvdw(ir
->vdwtype
))
1151 fprintf(fp
, "%10f", ir
->rvdw
);
1153 if (EPME_SWITCHED(ir
->coulombtype
))
1155 fprintf(fp
, "%10f", ir
->rlist
);
1157 if (info
->rlistlong
[0] != max_cutoff(info
->rlist
[0], max_cutoff(info
->rvdw
[0], info
->rcoulomb
[0])) )
1159 fprintf(fp
, "%10f", ir
->rlistlong
);
1161 fprintf(fp
, " %-14s\n", fn_bench_tprs
[j
]);
1163 /* Make it clear to the user that some additional settings were modified */
1164 if (!gmx_within_tol(ir
->rvdw
, info
->rvdw
[0], GMX_REAL_EPS
)
1165 || !gmx_within_tol(ir
->rlistlong
, info
->rlistlong
[0], GMX_REAL_EPS
) )
1172 fprintf(fp
, "\nNote that in addition to the Coulomb radius and the Fourier grid\n"
1173 "other input settings were also changed (see table above).\n"
1174 "Please check if the modified settings are appropriate.\n");
1182 /* Rename the files we want to keep to some meaningful filename and
1183 * delete the rest */
1184 static void cleanup(const t_filenm
*fnm
, int nfile
, int k
, int nnodes
,
1185 int nPMEnodes
, int nr
, gmx_bool bKeepStderr
)
1187 char numstring
[STRLEN
];
1188 char newfilename
[STRLEN
];
1189 const char *fn
= NULL
;
1194 fprintf(stdout
, "Cleaning up, deleting benchmark temp files ...\n");
1196 for (i
= 0; i
< nfile
; i
++)
1198 opt
= (char *)fnm
[i
].opt
;
1199 if (strcmp(opt
, "-p") == 0)
1201 /* do nothing; keep this file */
1204 else if (strcmp(opt
, "-bg") == 0)
1206 /* Give the log file a nice name so one can later see which parameters were used */
1207 numstring
[0] = '\0';
1210 sprintf(numstring
, "_%d", nr
);
1212 sprintf(newfilename
, "%s_no%d_np%d_npme%d%s", opt2fn("-bg", nfile
, fnm
), k
, nnodes
, nPMEnodes
, numstring
);
1213 if (gmx_fexist(opt2fn("-bg", nfile
, fnm
)))
1215 fprintf(stdout
, "renaming log file to %s\n", newfilename
);
1216 make_backup(newfilename
);
1217 rename(opt2fn("-bg", nfile
, fnm
), newfilename
);
1220 else if (strcmp(opt
, "-err") == 0)
1222 /* This file contains the output of stderr. We want to keep it in
1223 * cases where there have been problems. */
1224 fn
= opt2fn(opt
, nfile
, fnm
);
1225 numstring
[0] = '\0';
1228 sprintf(numstring
, "_%d", nr
);
1230 sprintf(newfilename
, "%s_no%d_np%d_npme%d%s", fn
, k
, nnodes
, nPMEnodes
, numstring
);
1235 fprintf(stdout
, "Saving stderr output in %s\n", newfilename
);
1236 make_backup(newfilename
);
1237 rename(fn
, newfilename
);
1241 fprintf(stdout
, "Deleting %s\n", fn
);
1246 /* Delete the files which are created for each benchmark run: (options -b*) */
1247 else if ( (0 == strncmp(opt
, "-b", 2)) && (opt2bSet(opt
, nfile
, fnm
) || !is_optional(&fnm
[i
])) )
1249 remove_if_exists(opt2fn(opt
, nfile
, fnm
));
1256 eNpmeAuto
, eNpmeAll
, eNpmeReduced
, eNpmeSubset
, eNpmeNr
1259 /* Create a list of numbers of PME nodes to test */
1260 static void make_npme_list(
1261 const char *npmevalues_opt
, /* Make a complete list with all
1262 * possibilities or a short list that keeps only
1263 * reasonable numbers of PME nodes */
1264 int *nentries
, /* Number of entries we put in the nPMEnodes list */
1265 int *nPMEnodes
[], /* Each entry contains the value for -npme */
1266 int nnodes
, /* Total number of nodes to do the tests on */
1267 int minPMEnodes
, /* Minimum number of PME nodes */
1268 int maxPMEnodes
) /* Maximum number of PME nodes */
1271 int min_factor
= 1; /* We request that npp and npme have this minimal
1272 * largest common factor (depends on npp) */
1273 int nlistmax
; /* Max. list size */
1274 int nlist
; /* Actual number of entries in list */
1278 /* Do we need to check all possible values for -npme or is a reduced list enough? */
1279 if (0 == strcmp(npmevalues_opt
, "all") )
1283 else if (0 == strcmp(npmevalues_opt
, "subset") )
1285 eNPME
= eNpmeSubset
;
1287 else /* "auto" or "range" */
1293 else if (nnodes
< 128)
1295 eNPME
= eNpmeReduced
;
1299 eNPME
= eNpmeSubset
;
1303 /* Calculate how many entries we could possibly have (in case of -npme all) */
1306 nlistmax
= maxPMEnodes
- minPMEnodes
+ 3;
1307 if (0 == minPMEnodes
)
1317 /* Now make the actual list which is at most of size nlist */
1318 snew(*nPMEnodes
, nlistmax
);
1319 nlist
= 0; /* start counting again, now the real entries in the list */
1320 for (i
= 0; i
< nlistmax
- 2; i
++)
1322 npme
= maxPMEnodes
- i
;
1333 /* For 2d PME we want a common largest factor of at least the cube
1334 * root of the number of PP nodes */
1335 min_factor
= (int) pow(npp
, 1.0/3.0);
1338 gmx_fatal(FARGS
, "Unknown option for eNPME in make_npme_list");
1341 if (gmx_greatest_common_divisor(npp
, npme
) >= min_factor
)
1343 (*nPMEnodes
)[nlist
] = npme
;
1347 /* We always test 0 PME nodes and the automatic number */
1348 *nentries
= nlist
+ 2;
1349 (*nPMEnodes
)[nlist
] = 0;
1350 (*nPMEnodes
)[nlist
+1] = -1;
1352 fprintf(stderr
, "Will try the following %d different values for -npme:\n", *nentries
);
1353 for (i
= 0; i
< *nentries
-1; i
++)
1355 fprintf(stderr
, "%d, ", (*nPMEnodes
)[i
]);
1357 fprintf(stderr
, "and %d (auto).\n", (*nPMEnodes
)[*nentries
-1]);
1361 /* Allocate memory to store the performance data */
1362 static void init_perfdata(t_perf
*perfdata
[], int ntprs
, int datasets
, int repeats
)
1367 for (k
= 0; k
< ntprs
; k
++)
1369 snew(perfdata
[k
], datasets
);
1370 for (i
= 0; i
< datasets
; i
++)
1372 for (j
= 0; j
< repeats
; j
++)
1374 snew(perfdata
[k
][i
].Gcycles
, repeats
);
1375 snew(perfdata
[k
][i
].ns_per_day
, repeats
);
1376 snew(perfdata
[k
][i
].PME_f_load
, repeats
);
1383 /* Check for errors on mdrun -h */
1384 static void make_sure_it_runs(char *mdrun_cmd_line
, int length
, FILE *fp
,
1385 const t_filenm
*fnm
, int nfile
)
1387 const char *fn
= NULL
;
1388 char *command
, *msg
;
1392 snew(command
, length
+ 15);
1393 snew(msg
, length
+ 500);
1395 fprintf(stdout
, "Making sure the benchmarks can be executed by running just 1 step...\n");
1396 sprintf(command
, "%s -nsteps 1 -quiet", mdrun_cmd_line
);
1397 fprintf(stdout
, "Executing '%s' ...\n", command
);
1398 ret
= gmx_system_call(command
);
1402 /* To prevent confusion, do not again issue a gmx_fatal here since we already
1403 * get the error message from mdrun itself */
1405 "Cannot run the first benchmark simulation! Please check the error message of\n"
1406 "mdrun for the source of the problem. Did you provide a command line\n"
1407 "argument that neither gmx tune_pme nor mdrun understands? If you're\n"
1408 "sure your command line should work, you can bypass this check with \n"
1409 "gmx tune_pme -nocheck. The failing command was:\n"
1410 "\n%s\n\n", command
);
1412 fprintf(stderr
, "%s", msg
);
1414 fprintf(fp
, "%s", msg
);
1418 fprintf(stdout
, "Benchmarks can be executed!\n");
1420 /* Clean up the benchmark output files we just created */
1421 fprintf(stdout
, "Cleaning up ...\n");
1422 remove_if_exists(opt2fn("-bc", nfile
, fnm
));
1423 remove_if_exists(opt2fn("-be", nfile
, fnm
));
1424 remove_if_exists(opt2fn("-bcpo", nfile
, fnm
));
1425 remove_if_exists(opt2fn("-bg", nfile
, fnm
));
1431 static void do_the_tests(
1432 FILE *fp
, /* General g_tune_pme output file */
1433 char **tpr_names
, /* Filenames of the input files to test */
1434 int maxPMEnodes
, /* Max fraction of nodes to use for PME */
1435 int minPMEnodes
, /* Min fraction of nodes to use for PME */
1436 int npme_fixed
, /* If >= -1, test fixed number of PME
1438 const char *npmevalues_opt
, /* Which -npme values should be tested */
1439 t_perf
**perfdata
, /* Here the performace data is stored */
1440 int *pmeentries
, /* Entries in the nPMEnodes list */
1441 int repeats
, /* Repeat each test this often */
1442 int nnodes
, /* Total number of nodes = nPP + nPME */
1443 int nr_tprs
, /* Total number of tpr files to test */
1444 gmx_bool bThreads
, /* Threads or MPI? */
1445 char *cmd_mpirun
, /* mpirun command string */
1446 char *cmd_np
, /* "-np", "-n", whatever mpirun needs */
1447 char *cmd_mdrun
, /* mdrun command string */
1448 char *cmd_args_bench
, /* arguments for mdrun in a string */
1449 const t_filenm
*fnm
, /* List of filenames from command line */
1450 int nfile
, /* Number of files specified on the cmdl. */
1451 int presteps
, /* DLB equilibration steps, is checked */
1452 gmx_int64_t cpt_steps
, /* Time step counter in the checkpoint */
1453 gmx_bool bCheck
, /* Check whether benchmark mdrun works */
1454 const t_eligible_gpu_ids
*gpu_ids
) /* Struct containing GPU IDs for
1455 * constructing mdrun command lines */
1457 int i
, nr
, k
, ret
, count
= 0, totaltests
;
1458 int *nPMEnodes
= NULL
;
1461 char *command
, *cmd_stub
;
1463 gmx_bool bResetProblem
= FALSE
;
1464 gmx_bool bFirst
= TRUE
;
1465 gmx_bool bUsingGpus
= 0 < gpu_ids
->n
;
1467 /* This string array corresponds to the eParselog enum type at the start
1469 const char* ParseLog
[] = {
1471 "Logfile not found!",
1472 "No timings, logfile truncated?",
1473 "Run was terminated.",
1474 "Counters were not reset properly.",
1475 "No DD grid found for these settings.",
1476 "TPX version conflict!",
1477 "mdrun was not started in parallel!",
1478 "Number of PP ranks has a prime factor that is too large.",
1479 "The number of PP ranks did not suit the number of GPUs.",
1480 "Some GPUs were not detected or are incompatible.",
1483 char str_PME_f_load
[13];
1486 /* Allocate space for the mdrun command line. 100 extra characters should
1487 be more than enough for the -npme etcetera arguments */
1488 cmdline_length
= strlen(cmd_mpirun
)
1491 + strlen(cmd_args_bench
)
1492 + strlen(tpr_names
[0]) + 100;
1493 snew(command
, cmdline_length
);
1494 snew(cmd_stub
, cmdline_length
);
1496 /* Construct the part of the command line that stays the same for all tests: */
1499 sprintf(cmd_stub
, "%s%s", cmd_mdrun
, cmd_np
);
1503 sprintf(cmd_stub
, "%s%s%s ", cmd_mpirun
, cmd_np
, cmd_mdrun
);
1506 /* Create a list of numbers of PME nodes to test */
1507 if (npme_fixed
< -1)
1509 make_npme_list(npmevalues_opt
, pmeentries
, &nPMEnodes
,
1510 nnodes
, minPMEnodes
, maxPMEnodes
);
1516 nPMEnodes
[0] = npme_fixed
;
1517 fprintf(stderr
, "Will use a fixed number of %d PME-only ranks.\n", nPMEnodes
[0]);
1522 fprintf(fp
, "\nNo benchmarks done since number of repeats (-r) is 0.\n");
1524 finalize(opt2fn("-p", nfile
, fnm
));
1528 /* Allocate one dataset for each tpr input file: */
1529 init_perfdata(perfdata
, nr_tprs
, *pmeentries
, repeats
);
1531 /*****************************************/
1532 /* Main loop over all tpr files to test: */
1533 /*****************************************/
1534 totaltests
= nr_tprs
*(*pmeentries
)*repeats
;
1535 for (k
= 0; k
< nr_tprs
; k
++)
1537 fprintf(fp
, "\nIndividual timings for input file %d (%s):\n", k
, tpr_names
[k
]);
1538 fprintf(fp
, "PME ranks Gcycles ns/day PME/f Remark\n");
1539 /* Loop over various numbers of PME nodes: */
1540 for (i
= 0; i
< *pmeentries
; i
++)
1542 char *cmd_gpu_ids
= NULL
;
1544 pd
= &perfdata
[k
][i
];
1546 cmd_gpu_ids
= make_gpu_id_command_line(nnodes
, nPMEnodes
[i
], gpu_ids
);
1548 /* Loop over the repeats for each scenario: */
1549 for (nr
= 0; nr
< repeats
; nr
++)
1551 pd
->nPMEnodes
= nPMEnodes
[i
];
1553 /* Add -npme and -s to the command line and save it. Note that
1554 * the -passall (if set) options requires cmd_args_bench to be
1555 * at the end of the command line string */
1556 snew(pd
->mdrun_cmd_line
, cmdline_length
);
1557 sprintf(pd
->mdrun_cmd_line
, "%s-npme %d -s %s %s %s",
1558 cmd_stub
, pd
->nPMEnodes
, tpr_names
[k
], cmd_args_bench
, cmd_gpu_ids
);
1560 /* To prevent that all benchmarks fail due to a show-stopper argument
1561 * on the mdrun command line, we make a quick check first.
1562 * This check can be turned off in cases where the automatically chosen
1563 * number of PME-only ranks leads to a number of PP ranks for which no
1564 * decomposition can be found (e.g. for large prime numbers) */
1565 if (bFirst
&& bCheck
)
1567 /* TODO This check is really for a functional
1568 * .tpr, and if we need it, it should take place
1569 * for every .tpr, and the logic for it should be
1570 * immediately inside the loop over k, not in
1571 * this inner loop. */
1572 char *temporary_cmd_line
;
1574 snew(temporary_cmd_line
, cmdline_length
);
1575 /* TODO -npme 0 is more likely to succeed at low
1576 parallelism than the default of -npme -1, but
1577 is more likely to fail at the scaling limit
1578 when the PP domains may be too small. "mpirun
1579 -np 1 mdrun" is probably a reasonable thing to
1580 do for this check, but it'll be easier to
1581 implement that after some refactoring of how
1582 the number of MPI ranks is managed. */
1583 sprintf(temporary_cmd_line
, "%s-npme 0 -nb cpu -s %s %s",
1584 cmd_stub
, tpr_names
[k
], cmd_args_bench
);
1585 make_sure_it_runs(temporary_cmd_line
, cmdline_length
, fp
, fnm
, nfile
);
1589 /* Do a benchmark simulation: */
1592 sprintf(buf
, ", pass %d/%d", nr
+1, repeats
);
1598 fprintf(stdout
, "\n=== Progress %2.0f%%, tpr %d/%d, run %d/%d%s:\n",
1599 (100.0*count
)/totaltests
,
1600 k
+1, nr_tprs
, i
+1, *pmeentries
, buf
);
1601 make_backup(opt2fn("-err", nfile
, fnm
));
1602 sprintf(command
, "%s 1> /dev/null 2>%s", pd
->mdrun_cmd_line
, opt2fn("-err", nfile
, fnm
));
1603 fprintf(stdout
, "%s\n", pd
->mdrun_cmd_line
);
1604 gmx_system_call(command
);
1606 /* Collect the performance data from the log file; also check stderr
1607 * for fatal errors */
1608 ret
= parse_logfile(opt2fn("-bg", nfile
, fnm
), opt2fn("-err", nfile
, fnm
),
1609 pd
, nr
, presteps
, cpt_steps
, nnodes
);
1610 if ((presteps
> 0) && (ret
== eParselogResetProblem
))
1612 bResetProblem
= TRUE
;
1615 if (-1 == pd
->nPMEnodes
)
1617 sprintf(buf
, "(%3d)", pd
->guessPME
);
1625 if (pd
->PME_f_load
[nr
] > 0.0)
1627 sprintf(str_PME_f_load
, "%12.3f", pd
->PME_f_load
[nr
]);
1631 sprintf(str_PME_f_load
, "%s", " - ");
1634 /* Write the data we got to disk */
1635 fprintf(fp
, "%4d%s %12.3f %12.3f %s %s", pd
->nPMEnodes
,
1636 buf
, pd
->Gcycles
[nr
], pd
->ns_per_day
[nr
], str_PME_f_load
, ParseLog
[ret
]);
1637 if (!(ret
== eParselogOK
|| ret
== eParselogNoDDGrid
|| ret
== eParselogNotFound
) )
1639 fprintf(fp
, " Check %s file for problems.", ret
== eParselogFatal
? "err" : "log");
1645 /* Do some cleaning up and delete the files we do not need any more */
1646 cleanup(fnm
, nfile
, k
, nnodes
, pd
->nPMEnodes
, nr
, ret
== eParselogFatal
);
1648 /* If the first run with this number of processors already failed, do not try again: */
1649 if (pd
->Gcycles
[0] <= 0.0 && repeats
> 1)
1651 fprintf(stdout
, "Skipping remaining passes of unsuccessful setting, see log file for details.\n");
1652 count
+= repeats
-(nr
+1);
1655 } /* end of repeats loop */
1657 } /* end of -npme loop */
1658 } /* end of tpr file loop */
1663 fprintf(fp
, "WARNING: The cycle and time step counters could not be reset properly. ");
1671 static void check_input(
1678 real maxPMEfraction
,
1679 real minPMEfraction
,
1681 gmx_int64_t bench_nsteps
,
1682 const t_filenm
*fnm
,
1692 /* Make sure the input file exists */
1693 if (!gmx_fexist(opt2fn("-s", nfile
, fnm
)))
1695 gmx_fatal(FARGS
, "File %s not found.", opt2fn("-s", nfile
, fnm
));
1698 /* Make sure that the checkpoint file is not overwritten during benchmarking */
1699 if ( (0 == strcmp(opt2fn("-cpi", nfile
, fnm
), opt2fn("-bcpo", nfile
, fnm
)) ) && (sim_part
> 1) )
1701 gmx_fatal(FARGS
, "Checkpoint input (-cpi) and benchmark checkpoint output (-bcpo) files must not be identical.\n"
1702 "The checkpoint input file must not be overwritten during the benchmarks.\n");
1705 /* Make sure that repeats is >= 0 (if == 0, only write tpr files) */
1708 gmx_fatal(FARGS
, "Number of repeats < 0!");
1711 /* Check number of nodes */
1714 gmx_fatal(FARGS
, "Number of ranks/threads must be a positive integer.");
1717 /* Automatically choose -ntpr if not set */
1727 /* Set a reasonable scaling factor for rcoulomb */
1730 *rmax
= rcoulomb
* 1.2;
1733 fprintf(stderr
, "Will test %d tpr file%s.\n", *ntprs
, *ntprs
== 1 ? "" : "s");
1739 fprintf(stderr
, "Note: Choose ntpr>1 to shift PME load between real and reciprocal space.\n");
1743 /* Make shure that rmin <= rcoulomb <= rmax */
1752 if (!(*rmin
<= *rmax
) )
1754 gmx_fatal(FARGS
, "Please choose the Coulomb radii such that rmin <= rmax.\n"
1755 "rmin = %g, rmax = %g, actual rcoul from .tpr file = %g\n", *rmin
, *rmax
, rcoulomb
);
1757 /* Add test scenarios if rmin or rmax were set */
1760 if (!gmx_within_tol(*rmin
, rcoulomb
, GMX_REAL_EPS
) && (*ntprs
== 1) )
1763 fprintf(stderr
, "NOTE: Setting -rmin to %g changed -ntpr to %d\n",
1766 if (!gmx_within_tol(*rmax
, rcoulomb
, GMX_REAL_EPS
) && (*ntprs
== 1) )
1769 fprintf(stderr
, "NOTE: Setting -rmax to %g changed -ntpr to %d\n",
1774 /* If one of rmin, rmax is set, we need 2 tpr files at minimum */
1775 if (!gmx_within_tol(*rmax
, rcoulomb
, GMX_REAL_EPS
) || !gmx_within_tol(*rmin
, rcoulomb
, GMX_REAL_EPS
) )
1777 *ntprs
= max(*ntprs
, 2);
1780 /* If both rmin, rmax are set, we need 3 tpr files at minimum */
1781 if (!gmx_within_tol(*rmax
, rcoulomb
, GMX_REAL_EPS
) && !gmx_within_tol(*rmin
, rcoulomb
, GMX_REAL_EPS
) )
1783 *ntprs
= max(*ntprs
, 3);
1788 fprintf(stderr
, "NOTE: Your rmin, rmax setting changed -ntpr to %d\n", *ntprs
);
1793 if (gmx_within_tol(*rmin
, rcoulomb
, GMX_REAL_EPS
) && gmx_within_tol(rcoulomb
, *rmax
, GMX_REAL_EPS
)) /* We have just a single rc */
1795 fprintf(stderr
, "WARNING: Resetting -ntpr to 1 since no Coulomb radius scaling is requested.\n"
1796 "Please set rmin < rmax to test Coulomb radii in the [rmin, rmax] interval\n"
1797 "with correspondingly adjusted PME grid settings\n");
1802 /* Check whether max and min fraction are within required values */
1803 if (maxPMEfraction
> 0.5 || maxPMEfraction
< 0)
1805 gmx_fatal(FARGS
, "-max must be between 0 and 0.5");
1807 if (minPMEfraction
> 0.5 || minPMEfraction
< 0)
1809 gmx_fatal(FARGS
, "-min must be between 0 and 0.5");
1811 if (maxPMEfraction
< minPMEfraction
)
1813 gmx_fatal(FARGS
, "-max must be larger or equal to -min");
1816 /* Check whether the number of steps - if it was set - has a reasonable value */
1817 if (bench_nsteps
< 0)
1819 gmx_fatal(FARGS
, "Number of steps must be positive.");
1822 if (bench_nsteps
> 10000 || bench_nsteps
< 100)
1824 fprintf(stderr
, "WARNING: steps=");
1825 fprintf(stderr
, "%"GMX_PRId64
, bench_nsteps
);
1826 fprintf(stderr
, ". Are you sure you want to perform so %s steps for each benchmark?\n", (bench_nsteps
< 100) ? "few" : "many");
1831 gmx_fatal(FARGS
, "Cannot have a negative number of presteps.\n");
1834 /* Check for rcoulomb scaling if more than one .tpr file is tested */
1837 if (*rmin
/rcoulomb
< 0.75 || *rmax
/rcoulomb
> 1.25)
1839 fprintf(stderr
, "WARNING: Applying extreme scaling factor. I hope you know what you are doing.\n");
1843 /* If a fixed number of PME nodes is set we do rcoulomb and PME gird tuning
1844 * only. We need to check whether the requested number of PME-only nodes
1846 if (npme_fixed
> -1)
1848 /* No more than 50% of all nodes can be assigned as PME-only nodes. */
1849 if (2*npme_fixed
> nnodes
)
1851 gmx_fatal(FARGS
, "Cannot have more than %d PME-only ranks for a total of %d ranks (you chose %d).\n",
1852 nnodes
/2, nnodes
, npme_fixed
);
1854 if ((npme_fixed
> 0) && (5*npme_fixed
< nnodes
))
1856 fprintf(stderr
, "WARNING: Only %g percent of the ranks are assigned as PME-only ranks.\n",
1857 100.0*((real
)npme_fixed
/ (real
)nnodes
));
1859 if (opt2parg_bSet("-min", npargs
, pa
) || opt2parg_bSet("-max", npargs
, pa
))
1861 fprintf(stderr
, "NOTE: The -min, -max, and -npme options have no effect when a\n"
1862 " fixed number of PME-only ranks is requested with -fix.\n");
1868 /* Returns TRUE when "opt" is needed at launch time */
1869 static gmx_bool
is_launch_file(char *opt
, gmx_bool bSet
)
1871 if (0 == strncmp(opt
, "-swap", 5))
1876 /* Apart from the input .tpr and the output log files we need all options that
1877 * were set on the command line and that do not start with -b */
1878 if (0 == strncmp(opt
, "-b", 2) || 0 == strncmp(opt
, "-s", 2)
1879 || 0 == strncmp(opt
, "-err", 4) || 0 == strncmp(opt
, "-p", 2) )
1888 /* Returns TRUE when "opt" defines a file which is needed for the benchmarks runs */
1889 static gmx_bool
is_bench_file(char *opt
, gmx_bool bSet
, gmx_bool bOptional
, gmx_bool bIsOutput
)
1891 /* Apart from the input .tpr, all files starting with "-b" are for
1892 * _b_enchmark files exclusively */
1893 if (0 == strncmp(opt
, "-s", 2))
1898 if (0 == strncmp(opt
, "-b", 2) || 0 == strncmp(opt
, "-s", 2))
1900 if (!bOptional
|| bSet
)
1917 if (bSet
) /* These are additional input files like -cpi -ei */
1930 /* Adds 'buf' to 'str' */
1931 static void add_to_string(char **str
, char *buf
)
1936 len
= strlen(*str
) + strlen(buf
) + 1;
1942 /* Create the command line for the benchmark as well as for the real run */
1943 static void create_command_line_snippets(
1944 gmx_bool bAppendFiles
,
1945 gmx_bool bKeepAndNumCPT
,
1946 gmx_bool bResetHWay
,
1950 char *cmd_args_bench
[], /* command line arguments for benchmark runs */
1951 char *cmd_args_launch
[], /* command line arguments for simulation run */
1952 char extra_args
[]) /* Add this to the end of the command line */
1957 char strbuf
[STRLEN
];
1960 /* strlen needs at least '\0' as a string: */
1961 snew(*cmd_args_bench
, 1);
1962 snew(*cmd_args_launch
, 1);
1963 *cmd_args_launch
[0] = '\0';
1964 *cmd_args_bench
[0] = '\0';
1967 /*******************************************/
1968 /* 1. Process other command line arguments */
1969 /*******************************************/
1972 /* Add equilibration steps to benchmark options */
1973 sprintf(strbuf
, "-resetstep %d ", presteps
);
1974 add_to_string(cmd_args_bench
, strbuf
);
1976 /* These switches take effect only at launch time */
1977 if (FALSE
== bAppendFiles
)
1979 add_to_string(cmd_args_launch
, "-noappend ");
1983 add_to_string(cmd_args_launch
, "-cpnum ");
1987 add_to_string(cmd_args_launch
, "-resethway ");
1990 /********************/
1991 /* 2. Process files */
1992 /********************/
1993 for (i
= 0; i
< nfile
; i
++)
1995 opt
= (char *)fnm
[i
].opt
;
1996 name
= opt2fn(opt
, nfile
, fnm
);
1998 /* Strbuf contains the options, now let's sort out where we need that */
1999 sprintf(strbuf
, "%s %s ", opt
, name
);
2001 if (is_bench_file(opt
, opt2bSet(opt
, nfile
, fnm
), is_optional(&fnm
[i
]), is_output(&fnm
[i
])) )
2003 /* All options starting with -b* need the 'b' removed,
2004 * therefore overwrite strbuf */
2005 if (0 == strncmp(opt
, "-b", 2))
2007 sprintf(strbuf
, "-%s %s ", &opt
[2], name
);
2010 add_to_string(cmd_args_bench
, strbuf
);
2013 if (is_launch_file(opt
, opt2bSet(opt
, nfile
, fnm
)) )
2015 add_to_string(cmd_args_launch
, strbuf
);
2019 add_to_string(cmd_args_bench
, extra_args
);
2020 add_to_string(cmd_args_launch
, extra_args
);
2024 /* Set option opt */
2025 static void setopt(const char *opt
, int nfile
, t_filenm fnm
[])
2029 for (i
= 0; (i
< nfile
); i
++)
2031 if (strcmp(opt
, fnm
[i
].opt
) == 0)
2033 fnm
[i
].flag
|= ffSET
;
2039 /* This routine inspects the tpr file and ...
2040 * 1. checks for output files that get triggered by a tpr option. These output
2041 * files are marked as 'set' to allow for a proper cleanup after each
2043 * 2. returns the PME:PP load ratio
2044 * 3. returns rcoulomb from the tpr */
2045 static float inspect_tpr(int nfile
, t_filenm fnm
[], real
*rcoulomb
)
2047 gmx_bool bTpi
; /* Is test particle insertion requested? */
2048 gmx_bool bFree
; /* Is a free energy simulation requested? */
2049 gmx_bool bNM
; /* Is a normal mode analysis requested? */
2050 gmx_bool bSwap
; /* Is water/ion position swapping requested? */
2056 /* Check tpr file for options that trigger extra output files */
2057 read_tpx_state(opt2fn("-s", nfile
, fnm
), &ir
, &state
, NULL
, &mtop
);
2058 bFree
= (efepNO
!= ir
.efep
);
2059 bNM
= (eiNM
== ir
.eI
);
2060 bSwap
= (eswapNO
!= ir
.eSwapCoords
);
2061 bTpi
= EI_TPI(ir
.eI
);
2063 /* Set these output files on the tuning command-line */
2066 setopt("-pf", nfile
, fnm
);
2067 setopt("-px", nfile
, fnm
);
2071 setopt("-dhdl", nfile
, fnm
);
2075 setopt("-tpi", nfile
, fnm
);
2076 setopt("-tpid", nfile
, fnm
);
2080 setopt("-mtx", nfile
, fnm
);
2084 setopt("-swap", nfile
, fnm
);
2087 *rcoulomb
= ir
.rcoulomb
;
2089 /* Return the estimate for the number of PME nodes */
2090 return pme_load_estimate(&mtop
, &ir
, state
.box
);
2094 static void couple_files_options(int nfile
, t_filenm fnm
[])
2097 gmx_bool bSet
, bBench
;
2102 for (i
= 0; i
< nfile
; i
++)
2104 opt
= (char *)fnm
[i
].opt
;
2105 bSet
= ((fnm
[i
].flag
& ffSET
) != 0);
2106 bBench
= (0 == strncmp(opt
, "-b", 2));
2108 /* Check optional files */
2109 /* If e.g. -eo is set, then -beo also needs to be set */
2110 if (is_optional(&fnm
[i
]) && bSet
&& !bBench
)
2112 sprintf(buf
, "-b%s", &opt
[1]);
2113 setopt(buf
, nfile
, fnm
);
2115 /* If -beo is set, then -eo also needs to be! */
2116 if (is_optional(&fnm
[i
]) && bSet
&& bBench
)
2118 sprintf(buf
, "-%s", &opt
[2]);
2119 setopt(buf
, nfile
, fnm
);
2125 #define BENCHSTEPS (1000)
2127 int gmx_tune_pme(int argc
, char *argv
[])
2129 const char *desc
[] = {
2130 "For a given number [TT]-np[tt] or [TT]-ntmpi[tt] of ranks, [THISMODULE] systematically",
2131 "times [gmx-mdrun] with various numbers of PME-only ranks and determines",
2132 "which setting is fastest. It will also test whether performance can",
2133 "be enhanced by shifting load from the reciprocal to the real space",
2134 "part of the Ewald sum. ",
2135 "Simply pass your [REF].tpr[ref] file to [THISMODULE] together with other options",
2136 "for [gmx-mdrun] as needed.[PAR]",
2137 "Which executables are used can be set in the environment variables",
2138 "MPIRUN and MDRUN. If these are not present, 'mpirun' and 'mdrun'",
2139 "will be used as defaults. Note that for certain MPI frameworks you",
2140 "need to provide a machine- or hostfile. This can also be passed",
2141 "via the MPIRUN variable, e.g.[PAR]",
2142 "[TT]export MPIRUN=\"/usr/local/mpirun -machinefile hosts\"[tt][PAR]",
2143 "Before doing the actual benchmark runs, [THISMODULE] will do a quick",
2144 "check whether mdrun works as expected with the provided parallel settings",
2145 "if the [TT]-check[tt] option is activated (the default).",
2146 "Please call [THISMODULE] with the normal options you would pass to",
2147 "[gmx-mdrun] and add [TT]-np[tt] for the number of ranks to perform the",
2148 "tests on, or [TT]-ntmpi[tt] for the number of threads. You can also add [TT]-r[tt]",
2149 "to repeat each test several times to get better statistics. [PAR]",
2150 "[THISMODULE] can test various real space / reciprocal space workloads",
2151 "for you. With [TT]-ntpr[tt] you control how many extra [REF].tpr[ref] files will be",
2152 "written with enlarged cutoffs and smaller Fourier grids respectively.",
2153 "Typically, the first test (number 0) will be with the settings from the input",
2154 "[REF].tpr[ref] file; the last test (number [TT]ntpr[tt]) will have the Coulomb cutoff",
2155 "specified by [TT]-rmax[tt] with a somewhat smaller PME grid at the same time. ",
2156 "In this last test, the Fourier spacing is multiplied with [TT]rmax[tt]/rcoulomb. ",
2157 "The remaining [REF].tpr[ref] files will have equally-spaced Coulomb radii (and Fourier "
2158 "spacings) between these extremes. [BB]Note[bb] that you can set [TT]-ntpr[tt] to 1",
2159 "if you just seek the optimal number of PME-only ranks; in that case",
2160 "your input [REF].tpr[ref] file will remain unchanged.[PAR]",
2161 "For the benchmark runs, the default of 1000 time steps should suffice for most",
2162 "MD systems. The dynamic load balancing needs about 100 time steps",
2163 "to adapt to local load imbalances, therefore the time step counters",
2164 "are by default reset after 100 steps. For large systems (>1M atoms), as well as ",
2165 "for a higher accuracy of the measurements, you should set [TT]-resetstep[tt] to a higher value.",
2166 "From the 'DD' load imbalance entries in the md.log output file you",
2167 "can tell after how many steps the load is sufficiently balanced. Example call:[PAR]"
2168 "[TT]gmx tune_pme -np 64 -s protein.tpr -launch[tt][PAR]",
2169 "After calling [gmx-mdrun] several times, detailed performance information",
2170 "is available in the output file [TT]perf.out[tt].",
2171 "[BB]Note[bb] that during the benchmarks, a couple of temporary files are written",
2172 "(options [TT]-b*[tt]), these will be automatically deleted after each test.[PAR]",
2173 "If you want the simulation to be started automatically with the",
2174 "optimized parameters, use the command line option [TT]-launch[tt].[PAR]",
2175 "Basic support for GPU-enabled [TT]mdrun[tt] exists. Give a string containing the IDs",
2176 "of the GPUs that you wish to use in the optimization in the [TT]-gpu_id[tt]",
2177 "command-line argument. Unlike [TT]mdrun -gpu_id[tt], this does not imply a mapping",
2178 "but merely the eligible set. [TT]g_tune_pme[tt] will construct calls to",
2179 "mdrun that use this set appropriately, assuming that PP ranks with low indices",
2180 "should map to GPUs with low indices, and increasing both monotonically",
2181 "over the respective sets.[PAR]",
2186 int pmeentries
= 0; /* How many values for -npme do we actually test for each tpr file */
2187 real maxPMEfraction
= 0.50;
2188 real minPMEfraction
= 0.25;
2189 int maxPMEnodes
, minPMEnodes
;
2190 float guessPMEratio
; /* guessed PME:PP ratio based on the tpr file */
2191 float guessPMEnodes
;
2192 int npme_fixed
= -2; /* If >= -1, use only this number
2193 * of PME-only nodes */
2195 real rmin
= 0.0, rmax
= 0.0; /* min and max value for rcoulomb if scaling is requested */
2196 real rcoulomb
= -1.0; /* Coulomb radius as set in .tpr file */
2197 gmx_bool bScaleRvdw
= TRUE
;
2198 gmx_int64_t bench_nsteps
= BENCHSTEPS
;
2199 gmx_int64_t new_sim_nsteps
= -1; /* -1 indicates: not set by the user */
2200 gmx_int64_t cpt_steps
= 0; /* Step counter in .cpt input file */
2201 int presteps
= 100; /* Do a full cycle reset after presteps steps */
2202 gmx_bool bOverwrite
= FALSE
, bKeepTPR
;
2203 gmx_bool bLaunch
= FALSE
;
2204 char *ExtraArgs
= NULL
;
2205 char **tpr_names
= NULL
;
2206 const char *simulation_tpr
= NULL
;
2207 int best_npme
, best_tpr
;
2208 int sim_part
= 1; /* For benchmarks with checkpoint files */
2212 /* Default program names if nothing else is found */
2213 char *cmd_mpirun
= NULL
, *cmd_mdrun
= NULL
;
2214 char *cmd_args_bench
, *cmd_args_launch
;
2215 char *cmd_np
= NULL
;
2217 /* IDs of GPUs that are eligible for computation */
2218 char *eligible_gpu_ids
= NULL
;
2219 t_eligible_gpu_ids
*gpu_ids
= NULL
;
2221 t_perf
**perfdata
= NULL
;
2227 /* Print out how long the tuning took */
2230 static t_filenm fnm
[] = {
2232 { efOUT
, "-p", "perf", ffWRITE
},
2233 { efLOG
, "-err", "bencherr", ffWRITE
},
2234 { efTPR
, "-so", "tuned", ffWRITE
},
2236 { efTPR
, NULL
, NULL
, ffREAD
},
2237 { efTRN
, "-o", NULL
, ffWRITE
},
2238 { efCOMPRESSED
, "-x", NULL
, ffOPTWR
},
2239 { efCPT
, "-cpi", NULL
, ffOPTRD
},
2240 { efCPT
, "-cpo", NULL
, ffOPTWR
},
2241 { efSTO
, "-c", "confout", ffWRITE
},
2242 { efEDR
, "-e", "ener", ffWRITE
},
2243 { efLOG
, "-g", "md", ffWRITE
},
2244 { efXVG
, "-dhdl", "dhdl", ffOPTWR
},
2245 { efXVG
, "-field", "field", ffOPTWR
},
2246 { efXVG
, "-table", "table", ffOPTRD
},
2247 { efXVG
, "-tabletf", "tabletf", ffOPTRD
},
2248 { efXVG
, "-tablep", "tablep", ffOPTRD
},
2249 { efXVG
, "-tableb", "table", ffOPTRD
},
2250 { efTRX
, "-rerun", "rerun", ffOPTRD
},
2251 { efXVG
, "-tpi", "tpi", ffOPTWR
},
2252 { efXVG
, "-tpid", "tpidist", ffOPTWR
},
2253 { efEDI
, "-ei", "sam", ffOPTRD
},
2254 { efXVG
, "-eo", "edsam", ffOPTWR
},
2255 { efXVG
, "-devout", "deviatie", ffOPTWR
},
2256 { efXVG
, "-runav", "runaver", ffOPTWR
},
2257 { efXVG
, "-px", "pullx", ffOPTWR
},
2258 { efXVG
, "-pf", "pullf", ffOPTWR
},
2259 { efXVG
, "-ro", "rotation", ffOPTWR
},
2260 { efLOG
, "-ra", "rotangles", ffOPTWR
},
2261 { efLOG
, "-rs", "rotslabs", ffOPTWR
},
2262 { efLOG
, "-rt", "rottorque", ffOPTWR
},
2263 { efMTX
, "-mtx", "nm", ffOPTWR
},
2264 { efNDX
, "-dn", "dipole", ffOPTWR
},
2265 { efXVG
, "-swap", "swapions", ffOPTWR
},
2266 /* Output files that are deleted after each benchmark run */
2267 { efTRN
, "-bo", "bench", ffWRITE
},
2268 { efXTC
, "-bx", "bench", ffWRITE
},
2269 { efCPT
, "-bcpo", "bench", ffWRITE
},
2270 { efSTO
, "-bc", "bench", ffWRITE
},
2271 { efEDR
, "-be", "bench", ffWRITE
},
2272 { efLOG
, "-bg", "bench", ffWRITE
},
2273 { efXVG
, "-beo", "benchedo", ffOPTWR
},
2274 { efXVG
, "-bdhdl", "benchdhdl", ffOPTWR
},
2275 { efXVG
, "-bfield", "benchfld", ffOPTWR
},
2276 { efXVG
, "-btpi", "benchtpi", ffOPTWR
},
2277 { efXVG
, "-btpid", "benchtpid", ffOPTWR
},
2278 { efXVG
, "-bdevout", "benchdev", ffOPTWR
},
2279 { efXVG
, "-brunav", "benchrnav", ffOPTWR
},
2280 { efXVG
, "-bpx", "benchpx", ffOPTWR
},
2281 { efXVG
, "-bpf", "benchpf", ffOPTWR
},
2282 { efXVG
, "-bro", "benchrot", ffOPTWR
},
2283 { efLOG
, "-bra", "benchrota", ffOPTWR
},
2284 { efLOG
, "-brs", "benchrots", ffOPTWR
},
2285 { efLOG
, "-brt", "benchrott", ffOPTWR
},
2286 { efMTX
, "-bmtx", "benchn", ffOPTWR
},
2287 { efNDX
, "-bdn", "bench", ffOPTWR
},
2288 { efXVG
, "-bswap", "benchswp", ffOPTWR
}
2291 gmx_bool bThreads
= FALSE
;
2295 const char *procstring
[] =
2296 { NULL
, "-np", "-n", "none", NULL
};
2297 const char *npmevalues_opt
[] =
2298 { NULL
, "auto", "all", "subset", NULL
};
2300 gmx_bool bAppendFiles
= TRUE
;
2301 gmx_bool bKeepAndNumCPT
= FALSE
;
2302 gmx_bool bResetCountersHalfWay
= FALSE
;
2303 gmx_bool bBenchmark
= TRUE
;
2304 gmx_bool bCheck
= TRUE
;
2306 output_env_t oenv
= NULL
;
2309 /***********************/
2310 /* g_tune_pme options: */
2311 /***********************/
2312 { "-np", FALSE
, etINT
, {&nnodes
},
2313 "Number of ranks to run the tests on (must be > 2 for separate PME ranks)" },
2314 { "-npstring", FALSE
, etENUM
, {procstring
},
2315 "Specify the number of ranks to [TT]$MPIRUN[tt] using this string"},
2316 { "-ntmpi", FALSE
, etINT
, {&nthreads
},
2317 "Number of MPI-threads to run the tests on (turns MPI & mpirun off)"},
2318 { "-r", FALSE
, etINT
, {&repeats
},
2319 "Repeat each test this often" },
2320 { "-max", FALSE
, etREAL
, {&maxPMEfraction
},
2321 "Max fraction of PME ranks to test with" },
2322 { "-min", FALSE
, etREAL
, {&minPMEfraction
},
2323 "Min fraction of PME ranks to test with" },
2324 { "-npme", FALSE
, etENUM
, {npmevalues_opt
},
2325 "Within -min and -max, benchmark all possible values for [TT]-npme[tt], or just a reasonable subset. "
2326 "Auto neglects -min and -max and chooses reasonable values around a guess for npme derived from the .tpr"},
2327 { "-fix", FALSE
, etINT
, {&npme_fixed
},
2328 "If >= -1, do not vary the number of PME-only ranks, instead use this fixed value and only vary rcoulomb and the PME grid spacing."},
2329 { "-rmax", FALSE
, etREAL
, {&rmax
},
2330 "If >0, maximal rcoulomb for -ntpr>1 (rcoulomb upscaling results in fourier grid downscaling)" },
2331 { "-rmin", FALSE
, etREAL
, {&rmin
},
2332 "If >0, minimal rcoulomb for -ntpr>1" },
2333 { "-scalevdw", FALSE
, etBOOL
, {&bScaleRvdw
},
2334 "Scale rvdw along with rcoulomb"},
2335 { "-ntpr", FALSE
, etINT
, {&ntprs
},
2336 "Number of [REF].tpr[ref] files to benchmark. Create this many files with different rcoulomb scaling factors depending on -rmin and -rmax. "
2337 "If < 1, automatically choose the number of [REF].tpr[ref] files to test" },
2338 { "-steps", FALSE
, etINT64
, {&bench_nsteps
},
2339 "Take timings for this many steps in the benchmark runs" },
2340 { "-resetstep", FALSE
, etINT
, {&presteps
},
2341 "Let dlb equilibrate this many steps before timings are taken (reset cycle counters after this many steps)" },
2342 { "-nsteps", FALSE
, etINT64
, {&new_sim_nsteps
},
2343 "If non-negative, perform this many steps in the real run (overwrites nsteps from [REF].tpr[ref], add [REF].cpt[ref] steps)" },
2344 { "-launch", FALSE
, etBOOL
, {&bLaunch
},
2345 "Launch the real simulation after optimization" },
2346 { "-bench", FALSE
, etBOOL
, {&bBenchmark
},
2347 "Run the benchmarks or just create the input [REF].tpr[ref] files?" },
2348 { "-check", FALSE
, etBOOL
, {&bCheck
},
2349 "Before the benchmark runs, check whether mdrun works in parallel" },
2350 { "-gpu_id", FALSE
, etSTR
, {&eligible_gpu_ids
},
2351 "List of GPU device id-s that are eligible for use (unlike mdrun, does not imply any mapping)" },
2352 /******************/
2353 /* mdrun options: */
2354 /******************/
2355 /* We let g_tune_pme parse and understand these options, because we need to
2356 * prevent that they appear on the mdrun command line for the benchmarks */
2357 { "-append", FALSE
, etBOOL
, {&bAppendFiles
},
2358 "Append to previous output files when continuing from checkpoint instead of adding the simulation part number to all file names (for launch only)" },
2359 { "-cpnum", FALSE
, etBOOL
, {&bKeepAndNumCPT
},
2360 "Keep and number checkpoint files (launch only)" },
2361 { "-resethway", FALSE
, etBOOL
, {&bResetCountersHalfWay
},
2362 "HIDDENReset the cycle counters after half the number of steps or halfway [TT]-maxh[tt] (launch only)" }
2365 #define NFILE asize(fnm)
2367 seconds
= gmx_gettime();
2369 if (!parse_common_args(&argc
, argv
, PCA_NOEXIT_ON_ARGS
,
2370 NFILE
, fnm
, asize(pa
), pa
, asize(desc
), desc
,
2376 /* Store the remaining unparsed command line entries in a string which
2377 * is then attached to the mdrun command line */
2379 ExtraArgs
[0] = '\0';
2380 for (i
= 1; i
< argc
; i
++) /* argc will now be 1 if everything was understood */
2382 add_to_string(&ExtraArgs
, argv
[i
]);
2383 add_to_string(&ExtraArgs
, " ");
2386 if (opt2parg_bSet("-ntmpi", asize(pa
), pa
))
2389 if (opt2parg_bSet("-npstring", asize(pa
), pa
))
2391 fprintf(stderr
, "WARNING: -npstring has no effect when using threads.\n");
2396 gmx_fatal(FARGS
, "Can't run multi-threaded MPI simulation yet!");
2398 /* and now we just set this; a bit of an ugly hack*/
2401 /* Check for PME:PP ratio and whether tpr triggers additional output files */
2402 guessPMEratio
= inspect_tpr(NFILE
, fnm
, &rcoulomb
);
2404 /* Automatically set -beo options if -eo is set etc. */
2405 couple_files_options(NFILE
, fnm
);
2407 /* Construct the command line arguments for benchmark runs
2408 * as well as for the simulation run */
2411 sprintf(bbuf
, " -ntmpi %d ", nthreads
);
2415 /* This string will be used for MPI runs and will appear after the
2416 * mpirun command. */
2417 if (strcmp(procstring
[0], "none") != 0)
2419 sprintf(bbuf
, " %s %d ", procstring
[0], nnodes
);
2429 create_command_line_snippets(bAppendFiles
, bKeepAndNumCPT
, bResetCountersHalfWay
, presteps
,
2430 NFILE
, fnm
, &cmd_args_bench
, &cmd_args_launch
, ExtraArgs
);
2432 /* Prepare to use checkpoint file if requested */
2434 if (opt2bSet("-cpi", NFILE
, fnm
))
2436 const char *filename
= opt2fn("-cpi", NFILE
, fnm
);
2438 read_checkpoint_part_and_step(filename
,
2439 &cpt_sim_part
, &cpt_steps
);
2440 if (cpt_sim_part
== 0)
2442 gmx_fatal(FARGS
, "Checkpoint file %s could not be read!", filename
);
2444 /* tune_pme will run the next part of the simulation */
2445 sim_part
= cpt_sim_part
+ 1;
2448 /* Open performance output file and write header info */
2449 fp
= gmx_ffopen(opt2fn("-p", NFILE
, fnm
), "w");
2451 /* Make a quick consistency check of command line parameters */
2452 check_input(nnodes
, repeats
, &ntprs
, &rmin
, rcoulomb
, &rmax
,
2453 maxPMEfraction
, minPMEfraction
, npme_fixed
,
2454 bench_nsteps
, fnm
, NFILE
, sim_part
, presteps
,
2456 /* Check any GPU IDs passed make sense, and fill the data structure for them */
2458 parse_digits_from_plain_string(eligible_gpu_ids
, &gpu_ids
->n
, &gpu_ids
->ids
);
2460 /* Determine the maximum and minimum number of PME nodes to test,
2461 * the actual list of settings is build in do_the_tests(). */
2462 if ((nnodes
> 2) && (npme_fixed
< -1))
2464 if (0 == strcmp(npmevalues_opt
[0], "auto"))
2466 /* Determine the npme range automatically based on the PME:PP load guess */
2467 if (guessPMEratio
> 1.0)
2469 /* More PME than PP work, probably we do not need separate PME nodes at all! */
2470 maxPMEnodes
= nnodes
/2;
2471 minPMEnodes
= nnodes
/2;
2475 /* PME : PP load is in the range 0..1, let's test around the guess */
2476 guessPMEnodes
= nnodes
/(1.0 + 1.0/guessPMEratio
);
2477 minPMEnodes
= floor(0.7*guessPMEnodes
);
2478 maxPMEnodes
= ceil(1.6*guessPMEnodes
);
2479 maxPMEnodes
= min(maxPMEnodes
, nnodes
/2);
2484 /* Determine the npme range based on user input */
2485 maxPMEnodes
= floor(maxPMEfraction
*nnodes
);
2486 minPMEnodes
= max(floor(minPMEfraction
*nnodes
), 0);
2487 fprintf(stdout
, "Will try runs with %d ", minPMEnodes
);
2488 if (maxPMEnodes
!= minPMEnodes
)
2490 fprintf(stdout
, "- %d ", maxPMEnodes
);
2492 fprintf(stdout
, "PME-only ranks.\n Note that the automatic number of PME-only ranks and no separate PME ranks are always tested.\n");
2501 /* Get the commands we need to set up the runs from environment variables */
2502 get_program_paths(bThreads
, &cmd_mpirun
, &cmd_mdrun
);
2503 if (bBenchmark
&& repeats
> 0)
2505 check_mdrun_works(bThreads
, cmd_mpirun
, cmd_np
, cmd_mdrun
, NULL
!= eligible_gpu_ids
);
2508 /* Print some header info to file */
2510 fprintf(fp
, "\n P E R F O R M A N C E R E S U L T S\n");
2512 fprintf(fp
, "%s for GROMACS %s\n", output_env_get_program_display_name(oenv
),
2516 fprintf(fp
, "Number of ranks : %d\n", nnodes
);
2517 fprintf(fp
, "The mpirun command is : %s\n", cmd_mpirun
);
2518 if (strcmp(procstring
[0], "none") != 0)
2520 fprintf(fp
, "Passing # of ranks via : %s\n", procstring
[0]);
2524 fprintf(fp
, "Not setting number of ranks in system call\n");
2529 fprintf(fp
, "Number of threads : %d\n", nnodes
);
2532 fprintf(fp
, "The mdrun command is : %s\n", cmd_mdrun
);
2533 fprintf(fp
, "mdrun args benchmarks : %s\n", cmd_args_bench
);
2534 fprintf(fp
, "Benchmark steps : ");
2535 fprintf(fp
, "%"GMX_PRId64
, bench_nsteps
);
2537 fprintf(fp
, "dlb equilibration steps : %d\n", presteps
);
2540 fprintf(fp
, "Checkpoint time step : ");
2541 fprintf(fp
, "%"GMX_PRId64
, cpt_steps
);
2544 fprintf(fp
, "mdrun args at launchtime: %s\n", cmd_args_launch
);
2546 if (new_sim_nsteps
>= 0)
2549 fprintf(stderr
, "Note: Simulation input file %s will have ", opt2fn("-so", NFILE
, fnm
));
2550 fprintf(stderr
, "%"GMX_PRId64
, new_sim_nsteps
+cpt_steps
);
2551 fprintf(stderr
, " steps.\n");
2552 fprintf(fp
, "Simulation steps : ");
2553 fprintf(fp
, "%"GMX_PRId64
, new_sim_nsteps
);
2558 fprintf(fp
, "Repeats for each test : %d\n", repeats
);
2561 if (npme_fixed
>= -1)
2563 fprintf(fp
, "Fixing -npme at : %d\n", npme_fixed
);
2566 fprintf(fp
, "Input file : %s\n", opt2fn("-s", NFILE
, fnm
));
2567 fprintf(fp
, " PME/PP load estimate : %g\n", guessPMEratio
);
2569 /* Allocate memory for the inputinfo struct: */
2571 info
->nr_inputfiles
= ntprs
;
2572 for (i
= 0; i
< ntprs
; i
++)
2574 snew(info
->rcoulomb
, ntprs
);
2575 snew(info
->rvdw
, ntprs
);
2576 snew(info
->rlist
, ntprs
);
2577 snew(info
->rlistlong
, ntprs
);
2578 snew(info
->nkx
, ntprs
);
2579 snew(info
->nky
, ntprs
);
2580 snew(info
->nkz
, ntprs
);
2581 snew(info
->fsx
, ntprs
);
2582 snew(info
->fsy
, ntprs
);
2583 snew(info
->fsz
, ntprs
);
2585 /* Make alternative tpr files to test: */
2586 snew(tpr_names
, ntprs
);
2587 for (i
= 0; i
< ntprs
; i
++)
2589 snew(tpr_names
[i
], STRLEN
);
2592 /* It can be that ntprs is reduced by make_benchmark_tprs if not enough
2593 * different grids could be found. */
2594 make_benchmark_tprs(opt2fn("-s", NFILE
, fnm
), tpr_names
, bench_nsteps
+presteps
,
2595 cpt_steps
, rmin
, rmax
, bScaleRvdw
, &ntprs
, info
, fp
);
2597 /********************************************************************************/
2598 /* Main loop over all scenarios we need to test: tpr files, PME nodes, repeats */
2599 /********************************************************************************/
2600 snew(perfdata
, ntprs
);
2603 do_the_tests(fp
, tpr_names
, maxPMEnodes
, minPMEnodes
, npme_fixed
, npmevalues_opt
[0], perfdata
, &pmeentries
,
2604 repeats
, nnodes
, ntprs
, bThreads
, cmd_mpirun
, cmd_np
, cmd_mdrun
,
2605 cmd_args_bench
, fnm
, NFILE
, presteps
, cpt_steps
, bCheck
, gpu_ids
);
2607 fprintf(fp
, "\nTuning took%8.1f minutes.\n", (gmx_gettime()-seconds
)/60.0);
2609 /* Analyse the results and give a suggestion for optimal settings: */
2610 bKeepTPR
= analyze_data(fp
, opt2fn("-p", NFILE
, fnm
), perfdata
, nnodes
, ntprs
, pmeentries
,
2611 repeats
, info
, &best_tpr
, &best_npme
);
2613 /* Take the best-performing tpr file and enlarge nsteps to original value */
2614 if (bKeepTPR
&& !bOverwrite
)
2616 simulation_tpr
= opt2fn("-s", NFILE
, fnm
);
2620 simulation_tpr
= opt2fn("-so", NFILE
, fnm
);
2621 modify_PMEsettings(bOverwrite
? (new_sim_nsteps
+cpt_steps
) : info
->orig_sim_steps
,
2622 info
->orig_init_step
, tpr_names
[best_tpr
], simulation_tpr
);
2625 /* Let's get rid of the temporary benchmark input files */
2626 for (i
= 0; i
< ntprs
; i
++)
2628 fprintf(stdout
, "Deleting temporary benchmark input file %s\n", tpr_names
[i
]);
2629 remove(tpr_names
[i
]);
2632 /* Now start the real simulation if the user requested it ... */
2633 launch_simulation(bLaunch
, fp
, bThreads
, cmd_mpirun
, cmd_np
, cmd_mdrun
,
2634 cmd_args_launch
, simulation_tpr
, nnodes
, best_npme
, gpu_ids
);
2638 /* ... or simply print the performance results to screen: */
2641 finalize(opt2fn("-p", NFILE
, fnm
));