Fix domain decomposition setup for >46k cores
[gromacs.git] / src / mdlib / domdec_setup.c
blobb6d001cde82992ad17d57c547483fc5e37ade8be
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
3 *
4 * This file is part of Gromacs Copyright (c) 1991-2008
5 * David van der Spoel, Erik Lindahl, Berk Hess, University of Groningen.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * To help us fund GROMACS development, we humbly ask that you cite
13 * the research papers on the package. Check out http://www.gromacs.org
15 * And Hey:
16 * Gnomes, ROck Monsters And Chili Sauce
19 #ifdef HAVE_CONFIG_H
20 #include <config.h>
21 #endif
23 #include <stdio.h>
24 #include "domdec.h"
25 #include "network.h"
26 #include "perf_est.h"
27 #include "physics.h"
28 #include "smalloc.h"
29 #include "typedefs.h"
30 #include "vec.h"
31 #include "names.h"
33 /* Margin for setting up the DD grid */
34 #define DD_GRID_MARGIN_PRES_SCALE 1.05
36 static int factorize(int n,int **fac,int **mfac)
38 int d,ndiv;
40 if (n <= 0)
42 gmx_fatal(FARGS, "Can only factorize positive integers.");
45 /* Decompose n in factors */
46 snew(*fac,n/2);
47 snew(*mfac,n/2);
48 d = 2;
49 ndiv = 0;
50 while (n > 1)
52 while (n % d == 0)
54 if (ndiv == 0 || (*fac)[ndiv-1] != d)
56 ndiv++;
57 (*fac)[ndiv-1] = d;
59 (*mfac)[ndiv-1]++;
60 n /= d;
62 d++;
65 return ndiv;
68 static gmx_bool largest_divisor(int n)
70 int ndiv,*div,*mdiv,ldiv;
72 ndiv = factorize(n,&div,&mdiv);
73 ldiv = div[ndiv-1];
74 sfree(div);
75 sfree(mdiv);
77 return ldiv;
80 static int lcd(int n1,int n2)
82 int d,i;
84 d = 1;
85 for(i=2; (i<=n1 && i<=n2); i++)
87 if (n1 % i == 0 && n2 % i == 0)
89 d = i;
93 return d;
96 static gmx_bool fits_pme_ratio(int nnodes,int npme,float ratio)
98 return ((double)npme/(double)nnodes > 0.95*ratio);
101 static gmx_bool fits_pp_pme_perf(FILE *fplog,
102 t_inputrec *ir,matrix box,gmx_mtop_t *mtop,
103 int nnodes,int npme,float ratio)
105 int ndiv,*div,*mdiv,ldiv;
106 int npp_root3,npme_root2;
108 ndiv = factorize(nnodes-npme,&div,&mdiv);
109 ldiv = div[ndiv-1];
110 sfree(div);
111 sfree(mdiv);
113 npp_root3 = (int)(pow(nnodes-npme,1.0/3.0) + 0.5);
114 npme_root2 = (int)(sqrt(npme) + 0.5);
116 /* The check below gives a reasonable division:
117 * factor 5 allowed at 5 or more PP nodes,
118 * factor 7 allowed at 49 or more PP nodes.
120 if (ldiv > 3 + npp_root3)
122 return FALSE;
125 /* Check if the number of PP and PME nodes have a reasonable sized
126 * denominator in common, such that we can use 2D PME decomposition
127 * when required (which requires nx_pp == nx_pme).
128 * The factor of 2 allows for a maximum ratio of 2^2=4
129 * between nx_pme and ny_pme.
131 if (lcd(nnodes-npme,npme)*2 < npme_root2)
133 return FALSE;
136 /* Does this division gives a reasonable PME load? */
137 return fits_pme_ratio(nnodes,npme,ratio);
140 static int guess_npme(FILE *fplog,gmx_mtop_t *mtop,t_inputrec *ir,matrix box,
141 int nnodes)
143 float ratio;
144 int npme,nkx,nky;
145 t_inputrec ir_try;
147 ratio = pme_load_estimate(mtop,ir,box);
149 if (fplog)
151 fprintf(fplog,"Guess for relative PME load: %.2f\n",ratio);
154 /* We assume the optimal node ratio is close to the load ratio.
155 * The communication load is neglected,
156 * but (hopefully) this will balance out between PP and PME.
159 if (!fits_pme_ratio(nnodes,nnodes/2,ratio))
161 /* We would need more than nnodes/2 PME only nodes,
162 * which is not possible. Since the PME load is very high,
163 * we will not loose much performance when all nodes do PME.
166 return 0;
169 /* First try to find npme as a factor of nnodes up to nnodes/3.
170 * We start with a minimum PME node fraction of 1/16
171 * and avoid ratios which lead to large prime factors in nnodes-npme.
173 npme = (nnodes + 15)/16;
174 while (npme <= nnodes/3) {
175 if (nnodes % npme == 0)
177 /* Note that fits_perf might change the PME grid,
178 * in the current implementation it does not.
180 if (fits_pp_pme_perf(fplog,ir,box,mtop,nnodes,npme,ratio))
182 break;
185 npme++;
187 if (npme > nnodes/3)
189 /* Try any possible number for npme */
190 npme = 1;
191 while (npme <= nnodes/2)
193 /* Note that fits_perf may change the PME grid */
194 if (fits_pp_pme_perf(fplog,ir,box,mtop,nnodes,npme,ratio))
196 break;
198 npme++;
201 if (npme > nnodes/2)
203 gmx_fatal(FARGS,"Could not find an appropriate number of separate PME nodes. i.e. >= %5f*#nodes (%d) and <= #nodes/2 (%d) and reasonable performance wise (grid_x=%d, grid_y=%d).\n"
204 "Use the -npme option of mdrun or change the number of processors or the PME grid dimensions, see the manual for details.",
205 ratio,(int)(0.95*ratio*nnodes+0.5),nnodes/2,ir->nkx,ir->nky);
206 /* Keep the compiler happy */
207 npme = 0;
209 else
211 if (fplog)
213 fprintf(fplog,
214 "Will use %d particle-particle and %d PME only nodes\n"
215 "This is a guess, check the performance at the end of the log file\n",
216 nnodes-npme,npme);
218 fprintf(stderr,"\n"
219 "Will use %d particle-particle and %d PME only nodes\n"
220 "This is a guess, check the performance at the end of the log file\n",
221 nnodes-npme,npme);
224 return npme;
227 static int div_up(int n,int f)
229 return (n + f - 1)/f;
232 real comm_box_frac(ivec dd_nc,real cutoff,gmx_ddbox_t *ddbox)
234 int i,j,k,npp;
235 rvec bt,nw;
236 real comm_vol;
238 for(i=0; i<DIM; i++)
240 bt[i] = ddbox->box_size[i]*ddbox->skew_fac[i];
241 nw[i] = dd_nc[i]*cutoff/bt[i];
244 npp = 1;
245 comm_vol = 0;
246 for(i=0; i<DIM; i++)
248 if (dd_nc[i] > 1)
250 npp *= dd_nc[i];
251 comm_vol += nw[i];
252 for(j=i+1; j<DIM; j++)
254 if (dd_nc[j] > 1)
256 comm_vol += nw[i]*nw[j]*M_PI/4;
257 for(k=j+1; k<DIM; k++)
259 if (dd_nc[k] > 1)
261 comm_vol += nw[i]*nw[j]*nw[k]*M_PI/6;
269 return comm_vol;
272 static gmx_bool inhomogeneous_z(const t_inputrec *ir)
274 return ((EEL_PME(ir->coulombtype) || ir->coulombtype==eelEWALD) &&
275 ir->ePBC==epbcXYZ && ir->ewald_geometry==eewg3DC);
278 /* Avoid integer overflows */
279 static float comm_pme_cost_vol(int npme, int a, int b, int c)
281 float comm_vol;
283 comm_vol = npme - 1;
284 comm_vol *= npme;
285 comm_vol *= div_up(a, npme);
286 comm_vol *= div_up(b, npme);
287 comm_vol *= c;
288 return comm_vol;
291 static float comm_cost_est(gmx_domdec_t *dd,real limit,real cutoff,
292 matrix box,gmx_ddbox_t *ddbox,
293 int natoms,t_inputrec *ir,
294 float pbcdxr,
295 int npme_tot,ivec nc)
297 ivec npme={1,1,1};
298 int i,j,k,nk,overlap;
299 rvec bt;
300 float comm_vol,comm_vol_xf,comm_pme,cost_pbcdx;
301 /* This is the cost of a pbc_dx call relative to the cost
302 * of communicating the coordinate and force of an atom.
303 * This will be machine dependent.
304 * These factors are for x86 with SMP or Infiniband.
306 float pbcdx_rect_fac = 0.1;
307 float pbcdx_tric_fac = 0.2;
308 float temp;
310 /* Check the DD algorithm restrictions */
311 if ((ir->ePBC == epbcXY && ir->nwall < 2 && nc[ZZ] > 1) ||
312 (ir->ePBC == epbcSCREW && (nc[XX] == 1 || nc[YY] > 1 || nc[ZZ] > 1)))
314 return -1;
317 if (inhomogeneous_z(ir) && nc[ZZ] > 1)
319 return -1;
322 /* Check if the triclinic requirements are met */
323 for(i=0; i<DIM; i++)
325 for(j=i+1; j<ddbox->npbcdim; j++)
327 if (box[j][i] != 0 || ir->deform[j][i] != 0 ||
328 (ir->epc != epcNO && ir->compress[j][i] != 0))
330 if (nc[j] > 1 && nc[i] == 1)
332 return -1;
338 for(i=0; i<DIM; i++)
340 bt[i] = ddbox->box_size[i]*ddbox->skew_fac[i];
342 /* Without PBC there are no cell size limits with 2 cells */
343 if (!(i >= ddbox->npbcdim && nc[i] <= 2) && bt[i] < nc[i]*limit)
345 return -1;
349 if (npme_tot > 1)
351 /* The following choices should match those
352 * in init_domain_decomposition in domdec.c.
354 if (nc[XX] == 1 && nc[YY] > 1)
356 npme[XX] = 1;
357 npme[YY] = npme_tot;
359 else if (nc[YY] == 1)
361 npme[XX] = npme_tot;
362 npme[YY] = 1;
364 else
366 /* Will we use 1D or 2D PME decomposition? */
367 npme[XX] = (npme_tot % nc[XX] == 0) ? nc[XX] : npme_tot;
368 npme[YY] = npme_tot/npme[XX];
372 /* When two dimensions are (nearly) equal, use more cells
373 * for the smallest index, so the decomposition does not
374 * depend sensitively on the rounding of the box elements.
376 for(i=0; i<DIM; i++)
378 for(j=i+1; j<DIM; j++)
380 /* Check if the box size is nearly identical,
381 * in that case we prefer nx > ny and ny > nz.
383 if (fabs(bt[j] - bt[i]) < 0.01*bt[i] && nc[j] > nc[i])
385 /* The XX/YY check is a bit compact. If nc[YY]==npme[YY]
386 * this means the swapped nc has nc[XX]==npme[XX],
387 * and we can also swap X and Y for PME.
389 /* Check if dimension i and j are equivalent for PME.
390 * For x/y: if nc[YY]!=npme[YY], we can not swap x/y
391 * For y/z: we can not have PME decomposition in z
393 if (npme_tot <= 1 ||
394 !((i == XX && j == YY && nc[YY] != npme[YY]) ||
395 (i == YY && j == ZZ && npme[YY] > 1)))
397 return -1;
403 /* This function determines only half of the communication cost.
404 * All PP, PME and PP-PME communication is symmetric
405 * and the "back"-communication cost is identical to the forward cost.
408 comm_vol = comm_box_frac(nc,cutoff,ddbox);
410 comm_pme = 0;
411 for(i=0; i<2; i++)
413 /* Determine the largest volume for PME x/f redistribution */
414 if (nc[i] % npme[i] != 0)
416 if (nc[i] > npme[i])
418 comm_vol_xf = (npme[i]==2 ? 1.0/3.0 : 0.5);
420 else
422 comm_vol_xf = 1.0 - lcd(nc[i],npme[i])/(double)npme[i];
424 comm_pme += 3*natoms*comm_vol_xf;
427 /* Grid overlap communication */
428 if (npme[i] > 1)
430 nk = (i==0 ? ir->nkx : ir->nky);
431 overlap = (nk % npme[i] == 0 ? ir->pme_order-1 : ir->pme_order);
432 temp = npme[i];
433 temp *= overlap;
434 temp *= ir->nkx;
435 temp *= ir->nky;
436 temp *= ir->nkz;
437 temp /= nk;
438 comm_pme += temp;
439 /* Old line comm_pme += npme[i]*overlap*ir->nkx*ir->nky*ir->nkz/nk; */
443 /* PME FFT communication volume.
444 * This only takes the communication into account and not imbalance
445 * in the calculation. But the imbalance in communication and calculation
446 * are similar and therefore these formulas also prefer load balance
447 * in the FFT and pme_solve calculation.
449 comm_pme += comm_pme_cost_vol(npme[YY], ir->nky, ir->nkz, ir->nkx);
450 comm_pme += comm_pme_cost_vol(npme[XX], ir->nkx, ir->nky, ir->nkz);
452 /* Add cost of pbc_dx for bondeds */
453 cost_pbcdx = 0;
454 if ((nc[XX] == 1 || nc[YY] == 1) || (nc[ZZ] == 1 && ir->ePBC != epbcXY))
456 if ((ddbox->tric_dir[XX] && nc[XX] == 1) ||
457 (ddbox->tric_dir[YY] && nc[YY] == 1))
459 cost_pbcdx = pbcdxr*pbcdx_tric_fac;
461 else
463 cost_pbcdx = pbcdxr*pbcdx_rect_fac;
467 if (debug)
469 fprintf(debug,
470 "nc %2d %2d %2d %2d %2d vol pp %6.4f pbcdx %6.4f pme %9.3e tot %9.3e\n",
471 nc[XX],nc[YY],nc[ZZ],npme[XX],npme[YY],
472 comm_vol,cost_pbcdx,comm_pme,
473 3*natoms*(comm_vol + cost_pbcdx) + comm_pme);
476 return 3*natoms*(comm_vol + cost_pbcdx) + comm_pme;
479 static void assign_factors(gmx_domdec_t *dd,
480 real limit,real cutoff,
481 matrix box,gmx_ddbox_t *ddbox,
482 int natoms,t_inputrec *ir,
483 float pbcdxr,int npme,
484 int ndiv,int *div,int *mdiv,ivec ir_try,ivec opt)
486 int x,y,z,i;
487 float ce;
489 if (ndiv == 0)
491 ce = comm_cost_est(dd,limit,cutoff,box,ddbox,
492 natoms,ir,pbcdxr,npme,ir_try);
493 if (ce >= 0 && (opt[XX] == 0 ||
494 ce < comm_cost_est(dd,limit,cutoff,box,ddbox,
495 natoms,ir,pbcdxr,
496 npme,opt)))
498 copy_ivec(ir_try,opt);
501 return;
504 for(x=mdiv[0]; x>=0; x--)
506 for(i=0; i<x; i++)
508 ir_try[XX] *= div[0];
510 for(y=mdiv[0]-x; y>=0; y--)
512 for(i=0; i<y; i++)
514 ir_try[YY] *= div[0];
516 for(i=0; i<mdiv[0]-x-y; i++)
518 ir_try[ZZ] *= div[0];
521 /* recurse */
522 assign_factors(dd,limit,cutoff,box,ddbox,natoms,ir,pbcdxr,npme,
523 ndiv-1,div+1,mdiv+1,ir_try,opt);
525 for(i=0; i<mdiv[0]-x-y; i++)
527 ir_try[ZZ] /= div[0];
529 for(i=0; i<y; i++)
531 ir_try[YY] /= div[0];
534 for(i=0; i<x; i++)
536 ir_try[XX] /= div[0];
541 static real optimize_ncells(FILE *fplog,
542 int nnodes_tot,int npme_only,
543 gmx_bool bDynLoadBal,real dlb_scale,
544 gmx_mtop_t *mtop,matrix box,gmx_ddbox_t *ddbox,
545 t_inputrec *ir,
546 gmx_domdec_t *dd,
547 real cellsize_limit,real cutoff,
548 gmx_bool bInterCGBondeds,gmx_bool bInterCGMultiBody,
549 ivec nc)
551 int npp,npme,ndiv,*div,*mdiv,d,nmax;
552 gmx_bool bExcl_pbcdx;
553 float pbcdxr;
554 real limit;
555 ivec itry;
557 limit = cellsize_limit;
559 dd->nc[XX] = 1;
560 dd->nc[YY] = 1;
561 dd->nc[ZZ] = 1;
563 npp = nnodes_tot - npme_only;
564 if (EEL_PME(ir->coulombtype))
566 npme = (npme_only > 0 ? npme_only : npp);
568 else
570 npme = 0;
573 if (bInterCGBondeds)
575 /* For Ewald exclusions pbc_dx is not called */
576 bExcl_pbcdx =
577 (IR_EXCL_FORCES(*ir) && !EEL_FULL(ir->coulombtype));
578 pbcdxr = (double)n_bonded_dx(mtop,bExcl_pbcdx)/(double)mtop->natoms;
580 else
582 /* Every molecule is a single charge group: no pbc required */
583 pbcdxr = 0;
585 /* Add a margin for DLB and/or pressure scaling */
586 if (bDynLoadBal)
588 if (dlb_scale >= 1.0)
590 gmx_fatal(FARGS,"The value for option -dds should be smaller than 1");
592 if (fplog)
594 fprintf(fplog,"Scaling the initial minimum size with 1/%g (option -dds) = %g\n",dlb_scale,1/dlb_scale);
596 limit /= dlb_scale;
598 else if (ir->epc != epcNO)
600 if (fplog)
602 fprintf(fplog,"To account for pressure scaling, scaling the initial minimum size with %g\n",DD_GRID_MARGIN_PRES_SCALE);
603 limit *= DD_GRID_MARGIN_PRES_SCALE;
607 if (fplog)
609 fprintf(fplog,"Optimizing the DD grid for %d cells with a minimum initial size of %.3f nm\n",npp,limit);
611 if (inhomogeneous_z(ir))
613 fprintf(fplog,"Ewald_geometry=%s: assuming inhomogeneous particle distribution in z, will not decompose in z.\n",eewg_names[ir->ewald_geometry]);
616 if (limit > 0)
618 fprintf(fplog,"The maximum allowed number of cells is:");
619 for(d=0; d<DIM; d++)
621 nmax = (int)(ddbox->box_size[d]*ddbox->skew_fac[d]/limit);
622 if (d >= ddbox->npbcdim && nmax < 2)
624 nmax = 2;
626 if (d == ZZ && inhomogeneous_z(ir))
628 nmax = 1;
630 fprintf(fplog," %c %d",'X' + d,nmax);
632 fprintf(fplog,"\n");
636 if (debug)
638 fprintf(debug,"Average nr of pbc_dx calls per atom %.2f\n",pbcdxr);
641 /* Decompose npp in factors */
642 ndiv = factorize(npp,&div,&mdiv);
644 itry[XX] = 1;
645 itry[YY] = 1;
646 itry[ZZ] = 1;
647 clear_ivec(nc);
648 assign_factors(dd,limit,cutoff,box,ddbox,mtop->natoms,ir,pbcdxr,
649 npme,ndiv,div,mdiv,itry,nc);
651 sfree(div);
652 sfree(mdiv);
654 return limit;
657 real dd_choose_grid(FILE *fplog,
658 t_commrec *cr,gmx_domdec_t *dd,t_inputrec *ir,
659 gmx_mtop_t *mtop,matrix box,gmx_ddbox_t *ddbox,
660 gmx_bool bDynLoadBal,real dlb_scale,
661 real cellsize_limit,real cutoff_dd,
662 gmx_bool bInterCGBondeds,gmx_bool bInterCGMultiBody)
664 gmx_large_int_t nnodes_div,ldiv;
665 real limit;
667 if (MASTER(cr))
669 nnodes_div = cr->nnodes;
670 if (EEL_PME(ir->coulombtype))
672 if (cr->npmenodes > 0)
674 if (cr->nnodes <= 2)
676 gmx_fatal(FARGS,
677 "Can not have separate PME nodes with 2 or less nodes");
679 if (cr->npmenodes >= cr->nnodes)
681 gmx_fatal(FARGS,
682 "Can not have %d separate PME nodes with just %d total nodes",
683 cr->npmenodes, cr->nnodes);
686 /* If the user purposely selected the number of PME nodes,
687 * only check for large primes in the PP node count.
689 nnodes_div -= cr->npmenodes;
692 else
694 cr->npmenodes = 0;
697 if (cr->nnodes > 12)
699 ldiv = largest_divisor(nnodes_div);
700 /* Check if the largest divisor is more than nnodes^2/3 */
701 if (ldiv*ldiv*ldiv > nnodes_div*nnodes_div)
703 gmx_fatal(FARGS,"The number of nodes you selected (%d) contains a large prime factor %d. In most cases this will lead to bad performance. Choose a number with smaller prime factors or set the decomposition (option -dd) manually.",
704 nnodes_div,ldiv);
708 if (EEL_PME(ir->coulombtype))
710 if (cr->npmenodes < 0)
712 if (cr->nnodes <= 10)
714 cr->npmenodes = 0;
716 else
718 cr->npmenodes = guess_npme(fplog,mtop,ir,box,cr->nnodes);
721 if (fplog)
723 fprintf(fplog,"Using %d separate PME nodes\n",cr->npmenodes);
727 limit = optimize_ncells(fplog,cr->nnodes,cr->npmenodes,
728 bDynLoadBal,dlb_scale,
729 mtop,box,ddbox,ir,dd,
730 cellsize_limit,cutoff_dd,
731 bInterCGBondeds,bInterCGMultiBody,
732 dd->nc);
734 else
736 limit = 0;
738 /* Communicate the information set by the master to all nodes */
739 gmx_bcast(sizeof(dd->nc),dd->nc,cr);
740 if (EEL_PME(ir->coulombtype))
742 gmx_bcast(sizeof(ir->nkx),&ir->nkx,cr);
743 gmx_bcast(sizeof(ir->nky),&ir->nky,cr);
744 gmx_bcast(sizeof(cr->npmenodes),&cr->npmenodes,cr);
746 else
748 cr->npmenodes = 0;
751 return limit;