Clean up PME domain count passing
[gromacs.git] / src / gromacs / ewald / pme.cpp
blobe386eba265be933f608dabaacb1ef8c49ae7858d
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \internal \file
39 * \brief This file contains function definitions necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
41 * and LJ).
43 * \author Erik Lindahl <erik@kth.se>
44 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
47 /* IMPORTANT FOR DEVELOPERS:
49 * Triclinic pme stuff isn't entirely trivial, and we've experienced
50 * some bugs during development (many of them due to me). To avoid
51 * this in the future, please check the following things if you make
52 * changes in this file:
54 * 1. You should obtain identical (at least to the PME precision)
55 * energies, forces, and virial for
56 * a rectangular box and a triclinic one where the z (or y) axis is
57 * tilted a whole box side. For instance you could use these boxes:
59 * rectangular triclinic
60 * 2 0 0 2 0 0
61 * 0 2 0 0 2 0
62 * 0 0 6 2 2 6
64 * 2. You should check the energy conservation in a triclinic box.
66 * It might seem an overkill, but better safe than sorry.
67 * /Erik 001109
70 #include "gmxpre.h"
72 #include "pme.h"
74 #include "config.h"
76 #include <assert.h>
77 #include <stdio.h>
78 #include <stdlib.h>
79 #include <string.h>
81 #include <cmath>
83 #include <algorithm>
84 #include <list>
86 #include "gromacs/domdec/domdec.h"
87 #include "gromacs/ewald/ewald-utils.h"
88 #include "gromacs/fft/parallel_3dfft.h"
89 #include "gromacs/fileio/pdbio.h"
90 #include "gromacs/gmxlib/network.h"
91 #include "gromacs/gmxlib/nrnb.h"
92 #include "gromacs/math/gmxcomplex.h"
93 #include "gromacs/math/invertmatrix.h"
94 #include "gromacs/math/units.h"
95 #include "gromacs/math/vec.h"
96 #include "gromacs/math/vectypes.h"
97 #include "gromacs/mdtypes/commrec.h"
98 #include "gromacs/mdtypes/forcerec.h"
99 #include "gromacs/mdtypes/inputrec.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/timing/cyclecounter.h"
103 #include "gromacs/timing/wallcycle.h"
104 #include "gromacs/timing/walltime_accounting.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/exceptions.h"
107 #include "gromacs/utility/fatalerror.h"
108 #include "gromacs/utility/gmxmpi.h"
109 #include "gromacs/utility/gmxomp.h"
110 #include "gromacs/utility/logger.h"
111 #include "gromacs/utility/real.h"
112 #include "gromacs/utility/smalloc.h"
113 #include "gromacs/utility/stringutil.h"
114 #include "gromacs/utility/unique_cptr.h"
116 #include "calculate-spline-moduli.h"
117 #include "pme-gather.h"
118 #include "pme-gpu-internal.h"
119 #include "pme-grid.h"
120 #include "pme-internal.h"
121 #include "pme-redistribute.h"
122 #include "pme-solve.h"
123 #include "pme-spline-work.h"
124 #include "pme-spread.h"
126 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error)
128 std::list<std::string> errorReasons;
129 if (!EEL_PME(ir->coulombtype))
131 errorReasons.push_back("systems that do not use PME for electrostatics");
133 if (ir->pme_order != 4)
135 errorReasons.push_back("interpolation orders other than 4");
137 if (ir->efep != efepNO)
139 errorReasons.push_back("free energy calculations (multiple grids)");
141 if (EVDW_PME(ir->vdwtype))
143 errorReasons.push_back("Lennard-Jones PME");
145 #if GMX_DOUBLE
147 errorReasons.push_back("double precision");
149 #endif
150 #if GMX_GPU != GMX_GPU_CUDA
152 errorReasons.push_back("non-CUDA build of GROMACS");
154 #endif
155 if (ir->cutoff_scheme == ecutsGROUP)
157 errorReasons.push_back("group cutoff scheme");
159 if (EI_TPI(ir->eI))
161 errorReasons.push_back("test particle insertion");
164 bool inputSupported = errorReasons.empty();
165 if (!inputSupported && error)
167 std::string regressionTestMarker = "PME GPU does not support";
168 // this prefix is tested for in the regression tests script gmxtest.pl
169 *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
171 return inputSupported;
174 /*! \brief \libinternal
175 * Finds out if PME with given inputs is possible to run on GPU.
176 * This function is an internal final check, validating the whole PME structure on creation,
177 * but it still duplicates the preliminary checks from the above (externally exposed) pme_gpu_supports_input() - just in case.
179 * \param[in] pme The PME structure.
180 * \param[out] error The error message if the input is not supported on GPU.
181 * \returns True if this PME input is possible to run on GPU, false otherwise.
183 static bool pme_gpu_check_restrictions(const gmx_pme_t *pme, std::string *error)
185 std::list<std::string> errorReasons;
186 if (pme->nnodes != 1)
188 errorReasons.push_back("PME decomposition");
190 if (pme->pme_order != 4)
192 errorReasons.push_back("interpolation orders other than 4");
194 if (pme->bFEP)
196 errorReasons.push_back("free energy calculations (multiple grids)");
198 if (pme->doLJ)
200 errorReasons.push_back("Lennard-Jones PME");
202 #if GMX_DOUBLE
204 errorReasons.push_back("double precision");
206 #endif
207 #if GMX_GPU != GMX_GPU_CUDA
209 errorReasons.push_back("non-CUDA build of GROMACS");
211 #endif
213 bool inputSupported = errorReasons.empty();
214 if (!inputSupported && error)
216 std::string regressionTestMarker = "PME GPU does not support";
217 // this prefix is tested for in the regression tests script gmxtest.pl
218 *error = regressionTestMarker + ": " + gmx::joinStrings(errorReasons, "; ") + ".";
220 return inputSupported;
223 PmeRunMode pme_run_mode(const gmx_pme_t *pme)
225 GMX_ASSERT(pme != nullptr, "Expecting valid PME data pointer");
226 return pme->runMode;
229 /*! \brief Number of bytes in a cache line.
231 * Must also be a multiple of the SIMD and SIMD4 register size, to
232 * preserve alignment.
234 const int gmxCacheLineSize = 64;
236 //! Set up coordinate communication
237 static void setup_coordinate_communication(pme_atomcomm_t *atc)
239 int nslab, n, i;
240 int fw, bw;
242 nslab = atc->nslab;
244 n = 0;
245 for (i = 1; i <= nslab/2; i++)
247 fw = (atc->nodeid + i) % nslab;
248 bw = (atc->nodeid - i + nslab) % nslab;
249 if (n < nslab - 1)
251 atc->node_dest[n] = fw;
252 atc->node_src[n] = bw;
253 n++;
255 if (n < nslab - 1)
257 atc->node_dest[n] = bw;
258 atc->node_src[n] = fw;
259 n++;
264 /*! \brief Round \p n up to the next multiple of \p f */
265 static int mult_up(int n, int f)
267 return ((n + f - 1)/f)*f;
270 /*! \brief Return estimate of the load imbalance from the PME grid not being a good match for the number of PME ranks */
271 static double estimate_pme_load_imbalance(struct gmx_pme_t *pme)
273 int nma, nmi;
274 double n1, n2, n3;
276 nma = pme->nnodes_major;
277 nmi = pme->nnodes_minor;
279 n1 = mult_up(pme->nkx, nma)*mult_up(pme->nky, nmi)*pme->nkz;
280 n2 = mult_up(pme->nkx, nma)*mult_up(pme->nkz, nmi)*pme->nky;
281 n3 = mult_up(pme->nky, nma)*mult_up(pme->nkz, nmi)*pme->nkx;
283 /* pme_solve is roughly double the cost of an fft */
285 return (n1 + n2 + 3*n3)/(double)(6*pme->nkx*pme->nky*pme->nkz);
288 /*! \brief Initialize atom communication data structure */
289 static void init_atomcomm(struct gmx_pme_t *pme, pme_atomcomm_t *atc,
290 int dimind, gmx_bool bSpread)
292 int thread;
294 atc->dimind = dimind;
295 atc->nslab = 1;
296 atc->nodeid = 0;
297 atc->pd_nalloc = 0;
298 #if GMX_MPI
299 if (pme->nnodes > 1)
301 atc->mpi_comm = pme->mpi_comm_d[dimind];
302 MPI_Comm_size(atc->mpi_comm, &atc->nslab);
303 MPI_Comm_rank(atc->mpi_comm, &atc->nodeid);
305 if (debug)
307 fprintf(debug, "For PME atom communication in dimind %d: nslab %d rank %d\n", atc->dimind, atc->nslab, atc->nodeid);
309 #endif
311 atc->bSpread = bSpread;
312 atc->pme_order = pme->pme_order;
314 if (atc->nslab > 1)
316 snew(atc->node_dest, atc->nslab);
317 snew(atc->node_src, atc->nslab);
318 setup_coordinate_communication(atc);
320 snew(atc->count_thread, pme->nthread);
321 for (thread = 0; thread < pme->nthread; thread++)
323 snew(atc->count_thread[thread], atc->nslab);
325 atc->count = atc->count_thread[0];
326 snew(atc->rcount, atc->nslab);
327 snew(atc->buf_index, atc->nslab);
330 atc->nthread = pme->nthread;
331 if (atc->nthread > 1)
333 snew(atc->thread_plist, atc->nthread);
335 snew(atc->spline, atc->nthread);
336 for (thread = 0; thread < atc->nthread; thread++)
338 if (atc->nthread > 1)
340 snew(atc->thread_plist[thread].n, atc->nthread+2*gmxCacheLineSize);
341 atc->thread_plist[thread].n += gmxCacheLineSize;
346 /*! \brief Destroy an atom communication data structure and its child structs */
347 static void destroy_atomcomm(pme_atomcomm_t *atc)
349 sfree(atc->pd);
350 if (atc->nslab > 1)
352 sfree(atc->node_dest);
353 sfree(atc->node_src);
354 for (int i = 0; i < atc->nthread; i++)
356 sfree(atc->count_thread[i]);
358 sfree(atc->count_thread);
359 sfree(atc->rcount);
360 sfree(atc->buf_index);
362 sfree(atc->x);
363 sfree(atc->coefficient);
364 sfree(atc->f);
366 sfree(atc->idx);
367 sfree(atc->fractx);
369 sfree(atc->thread_idx);
370 for (int i = 0; i < atc->nthread; i++)
372 if (atc->nthread > 1)
374 int *n_ptr = atc->thread_plist[i].n - gmxCacheLineSize;
375 sfree(n_ptr);
376 sfree(atc->thread_plist[i].i);
378 sfree(atc->spline[i].ind);
379 for (int d = 0; d < ZZ; d++)
381 sfree(atc->spline[i].theta[d]);
382 sfree(atc->spline[i].dtheta[d]);
384 sfree_aligned(atc->spline[i].ptr_dtheta_z);
385 sfree_aligned(atc->spline[i].ptr_theta_z);
387 if (atc->nthread > 1)
389 sfree(atc->thread_plist);
391 sfree(atc->spline);
394 /*! \brief Initialize data structure for communication */
395 static void
396 init_overlap_comm(pme_overlap_t * ol,
397 int norder,
398 #if GMX_MPI
399 MPI_Comm comm,
400 #endif
401 int nnodes,
402 int nodeid,
403 int ndata,
404 int commplainsize)
406 gmx_bool bCont;
407 #if GMX_MPI
408 MPI_Status stat;
410 ol->mpi_comm = comm;
411 #endif
413 ol->nnodes = nnodes;
414 ol->nodeid = nodeid;
416 /* Linear translation of the PME grid won't affect reciprocal space
417 * calculations, so to optimize we only interpolate "upwards",
418 * which also means we only have to consider overlap in one direction.
419 * I.e., particles on this node might also be spread to grid indices
420 * that belong to higher nodes (modulo nnodes)
423 ol->s2g0.resize(ol->nnodes + 1);
424 ol->s2g1.resize(ol->nnodes);
425 if (debug)
427 fprintf(debug, "PME slab boundaries:");
429 for (int i = 0; i < nnodes; i++)
431 /* s2g0 the local interpolation grid start.
432 * s2g1 the local interpolation grid end.
433 * Since in calc_pidx we divide particles, and not grid lines,
434 * spatially uniform along dimension x or y, we need to round
435 * s2g0 down and s2g1 up.
437 ol->s2g0[i] = (i * ndata + 0) / nnodes;
438 ol->s2g1[i] = ((i + 1) * ndata + nnodes - 1) / nnodes + norder - 1;
440 if (debug)
442 fprintf(debug, " %3d %3d", ol->s2g0[i], ol->s2g1[i]);
445 ol->s2g0[nnodes] = ndata;
446 if (debug)
448 fprintf(debug, "\n");
451 /* Determine with how many nodes we need to communicate the grid overlap */
452 int testRankCount = 0;
455 testRankCount++;
456 bCont = FALSE;
457 for (int i = 0; i < nnodes; i++)
459 if ((i + testRankCount < nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount]) ||
460 (i + testRankCount >= nnodes && ol->s2g1[i] > ol->s2g0[i + testRankCount - nnodes] + ndata))
462 bCont = TRUE;
466 while (bCont && testRankCount < nnodes);
468 ol->comm_data.resize(testRankCount - 1);
469 ol->send_size = 0;
471 for (size_t b = 0; b < ol->comm_data.size(); b++)
473 pme_grid_comm_t *pgc = &ol->comm_data[b];
475 /* Send */
476 pgc->send_id = (ol->nodeid + (b + 1)) % ol->nnodes;
477 int fft_start = ol->s2g0[pgc->send_id];
478 int fft_end = ol->s2g0[pgc->send_id + 1];
479 if (pgc->send_id < nodeid)
481 fft_start += ndata;
482 fft_end += ndata;
484 int send_index1 = ol->s2g1[nodeid];
485 send_index1 = std::min(send_index1, fft_end);
486 pgc->send_index0 = fft_start;
487 pgc->send_nindex = std::max(0, send_index1 - pgc->send_index0);
488 ol->send_size += pgc->send_nindex;
490 /* We always start receiving to the first index of our slab */
491 pgc->recv_id = (ol->nodeid - (b + 1) + ol->nnodes) % ol->nnodes;
492 fft_start = ol->s2g0[ol->nodeid];
493 fft_end = ol->s2g0[ol->nodeid + 1];
494 int recv_index1 = ol->s2g1[pgc->recv_id];
495 if (pgc->recv_id > nodeid)
497 recv_index1 -= ndata;
499 recv_index1 = std::min(recv_index1, fft_end);
500 pgc->recv_index0 = fft_start;
501 pgc->recv_nindex = std::max(0, recv_index1 - pgc->recv_index0);
504 #if GMX_MPI
505 /* Communicate the buffer sizes to receive */
506 for (size_t b = 0; b < ol->comm_data.size(); b++)
508 MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b,
509 &ol->comm_data[b].recv_size, 1, MPI_INT, ol->comm_data[b].recv_id, b,
510 ol->mpi_comm, &stat);
512 #endif
514 /* For non-divisible grid we need pme_order iso pme_order-1 */
515 ol->sendbuf.resize(norder * commplainsize);
516 ol->recvbuf.resize(norder * commplainsize);
519 int minimalPmeGridSize(int pmeOrder)
521 /* The actual grid size limitations are:
522 * serial: >= pme_order
523 * DD, no OpenMP: >= 2*(pme_order - 1)
524 * DD, OpenMP: >= pme_order + 1
525 * But we use the maximum for simplicity since in practice there is not
526 * much performance difference between pme_order and 2*(pme_order -1).
528 int minimalSize = 2*(pmeOrder - 1);
530 GMX_RELEASE_ASSERT(pmeOrder >= 3, "pmeOrder has to be >= 3");
531 GMX_RELEASE_ASSERT(minimalSize >= pmeOrder + 1, "The grid size should be >= pmeOrder + 1");
533 return minimalSize;
536 bool gmx_pme_check_restrictions(int pme_order,
537 int nkx, int nky, int nkz,
538 int numPmeDomainsAlongX,
539 bool useThreads,
540 bool errorsAreFatal)
542 if (pme_order > PME_ORDER_MAX)
544 if (!errorsAreFatal)
546 return false;
549 std::string message = gmx::formatString(
550 "pme_order (%d) is larger than the maximum allowed value (%d). Modify and recompile the code if you really need such a high order.",
551 pme_order, PME_ORDER_MAX);
552 GMX_THROW(InconsistentInputError(message));
555 const int minGridSize = minimalPmeGridSize(pme_order);
556 if (nkx < minGridSize ||
557 nky < minGridSize ||
558 nkz < minGridSize)
560 if (!errorsAreFatal)
562 return false;
564 std::string message = gmx::formatString(
565 "The PME grid sizes need to be >= 2*(pme_order-1) (%d)",
566 minGridSize);
567 GMX_THROW(InconsistentInputError(message));
570 /* Check for a limitation of the (current) sum_fftgrid_dd code.
571 * We only allow multiple communication pulses in dim 1, not in dim 0.
573 if (useThreads && (nkx < numPmeDomainsAlongX*pme_order &&
574 nkx != numPmeDomainsAlongX*(pme_order - 1)))
576 if (!errorsAreFatal)
578 return false;
580 gmx_fatal(FARGS, "The number of PME grid lines per rank along x is %g. But when using OpenMP threads, the number of grid lines per rank along x should be >= pme_order (%d) or = pmeorder-1. To resolve this issue, use fewer ranks along x (and possibly more along y and/or z) by specifying -dd manually.",
581 nkx/static_cast<double>(numPmeDomainsAlongX), pme_order);
584 return true;
587 /*! \brief Round \p enumerator */
588 static int div_round_up(int enumerator, int denominator)
590 return (enumerator + denominator - 1)/denominator;
593 gmx_pme_t *gmx_pme_init(const t_commrec *cr,
594 const NumPmeDomains &numPmeDomains,
595 const t_inputrec *ir,
596 int homenr,
597 gmx_bool bFreeEnergy_q,
598 gmx_bool bFreeEnergy_lj,
599 gmx_bool bReproducible,
600 real ewaldcoeff_q,
601 real ewaldcoeff_lj,
602 int nthread,
603 PmeRunMode runMode,
604 PmeGpu *pmeGpu,
605 gmx_device_info_t *gpuInfo,
606 const gmx::MDLogger & /*mdlog*/)
608 int use_threads, sum_use_threads, i;
609 ivec ndata;
611 if (debug)
613 fprintf(debug, "Creating PME data structures.\n");
616 unique_cptr<gmx_pme_t, gmx_pme_destroy> pme(new gmx_pme_t());
618 pme->sum_qgrid_tmp = nullptr;
619 pme->sum_qgrid_dd_tmp = nullptr;
621 pme->buf_nalloc = 0;
623 pme->nnodes = 1;
624 pme->bPPnode = TRUE;
626 pme->nnodes_major = numPmeDomains.x;
627 pme->nnodes_minor = numPmeDomains.y;
629 #if GMX_MPI
630 if (numPmeDomains.x*numPmeDomains.y > 1)
632 pme->mpi_comm = cr->mpi_comm_mygroup;
634 MPI_Comm_rank(pme->mpi_comm, &pme->nodeid);
635 MPI_Comm_size(pme->mpi_comm, &pme->nnodes);
636 if (pme->nnodes != numPmeDomains.x*numPmeDomains.y)
638 gmx_incons("PME rank count mismatch");
641 else
643 pme->mpi_comm = MPI_COMM_NULL;
645 #endif
647 if (pme->nnodes == 1)
649 #if GMX_MPI
650 pme->mpi_comm_d[0] = MPI_COMM_NULL;
651 pme->mpi_comm_d[1] = MPI_COMM_NULL;
652 #endif
653 pme->ndecompdim = 0;
654 pme->nodeid_major = 0;
655 pme->nodeid_minor = 0;
656 #if GMX_MPI
657 pme->mpi_comm_d[0] = pme->mpi_comm_d[1] = MPI_COMM_NULL;
658 #endif
660 else
662 if (numPmeDomains.y == 1)
664 #if GMX_MPI
665 pme->mpi_comm_d[0] = pme->mpi_comm;
666 pme->mpi_comm_d[1] = MPI_COMM_NULL;
667 #endif
668 pme->ndecompdim = 1;
669 pme->nodeid_major = pme->nodeid;
670 pme->nodeid_minor = 0;
673 else if (numPmeDomains.x == 1)
675 #if GMX_MPI
676 pme->mpi_comm_d[0] = MPI_COMM_NULL;
677 pme->mpi_comm_d[1] = pme->mpi_comm;
678 #endif
679 pme->ndecompdim = 1;
680 pme->nodeid_major = 0;
681 pme->nodeid_minor = pme->nodeid;
683 else
685 if (pme->nnodes % numPmeDomains.x != 0)
687 gmx_incons("For 2D PME decomposition, #PME ranks must be divisible by the number of domains along x");
689 pme->ndecompdim = 2;
691 #if GMX_MPI
692 MPI_Comm_split(pme->mpi_comm, pme->nodeid % numPmeDomains.y,
693 pme->nodeid, &pme->mpi_comm_d[0]); /* My communicator along major dimension */
694 MPI_Comm_split(pme->mpi_comm, pme->nodeid/numPmeDomains.y,
695 pme->nodeid, &pme->mpi_comm_d[1]); /* My communicator along minor dimension */
697 MPI_Comm_rank(pme->mpi_comm_d[0], &pme->nodeid_major);
698 MPI_Comm_size(pme->mpi_comm_d[0], &pme->nnodes_major);
699 MPI_Comm_rank(pme->mpi_comm_d[1], &pme->nodeid_minor);
700 MPI_Comm_size(pme->mpi_comm_d[1], &pme->nnodes_minor);
701 #endif
703 pme->bPPnode = thisRankHasDuty(cr, DUTY_PP);
706 pme->nthread = nthread;
708 /* Check if any of the PME MPI ranks uses threads */
709 use_threads = (pme->nthread > 1 ? 1 : 0);
710 #if GMX_MPI
711 if (pme->nnodes > 1)
713 MPI_Allreduce(&use_threads, &sum_use_threads, 1, MPI_INT,
714 MPI_SUM, pme->mpi_comm);
716 else
717 #endif
719 sum_use_threads = use_threads;
721 pme->bUseThreads = (sum_use_threads > 0);
723 if (ir->ePBC == epbcSCREW)
725 gmx_fatal(FARGS, "pme does not (yet) work with pbc = screw");
728 /* NOTE:
729 * It is likely that the current gmx_pme_do() routine supports calculating
730 * only Coulomb or LJ while gmx_pme_init() configures for both,
731 * but that has never been tested.
732 * It is likely that the current gmx_pme_do() routine supports calculating,
733 * not calculating free-energy for Coulomb and/or LJ while gmx_pme_init()
734 * configures with free-energy, but that has never been tested.
736 pme->doCoulomb = EEL_PME(ir->coulombtype);
737 pme->doLJ = EVDW_PME(ir->vdwtype);
738 pme->bFEP_q = ((ir->efep != efepNO) && bFreeEnergy_q);
739 pme->bFEP_lj = ((ir->efep != efepNO) && bFreeEnergy_lj);
740 pme->bFEP = (pme->bFEP_q || pme->bFEP_lj);
741 pme->nkx = ir->nkx;
742 pme->nky = ir->nky;
743 pme->nkz = ir->nkz;
744 pme->bP3M = (ir->coulombtype == eelP3M_AD || getenv("GMX_PME_P3M") != nullptr);
745 pme->pme_order = ir->pme_order;
746 pme->ewaldcoeff_q = ewaldcoeff_q;
747 pme->ewaldcoeff_lj = ewaldcoeff_lj;
749 /* Always constant electrostatics coefficients */
750 pme->epsilon_r = ir->epsilon_r;
752 /* Always constant LJ coefficients */
753 pme->ljpme_combination_rule = ir->ljpme_combination_rule;
755 // The box requires scaling with nwalls = 2, we store that condition as well
756 // as the scaling factor
757 delete pme->boxScaler;
758 pme->boxScaler = new EwaldBoxZScaler(*ir);
760 /* If we violate restrictions, generate a fatal error here */
761 gmx_pme_check_restrictions(pme->pme_order,
762 pme->nkx, pme->nky, pme->nkz,
763 pme->nnodes_major,
764 pme->bUseThreads,
765 true);
767 if (pme->nnodes > 1)
769 double imbal;
771 #if GMX_MPI
772 MPI_Type_contiguous(DIM, GMX_MPI_REAL, &(pme->rvec_mpi));
773 MPI_Type_commit(&(pme->rvec_mpi));
774 #endif
776 /* Note that the coefficient spreading and force gathering, which usually
777 * takes about the same amount of time as FFT+solve_pme,
778 * is always fully load balanced
779 * (unless the coefficient distribution is inhomogeneous).
782 imbal = estimate_pme_load_imbalance(pme.get());
783 if (imbal >= 1.2 && pme->nodeid_major == 0 && pme->nodeid_minor == 0)
785 fprintf(stderr,
786 "\n"
787 "NOTE: The load imbalance in PME FFT and solve is %d%%.\n"
788 " For optimal PME load balancing\n"
789 " PME grid_x (%d) and grid_y (%d) should be divisible by #PME_ranks_x (%d)\n"
790 " and PME grid_y (%d) and grid_z (%d) should be divisible by #PME_ranks_y (%d)\n"
791 "\n",
792 (int)((imbal-1)*100 + 0.5),
793 pme->nkx, pme->nky, pme->nnodes_major,
794 pme->nky, pme->nkz, pme->nnodes_minor);
798 /* For non-divisible grid we need pme_order iso pme_order-1 */
799 /* In sum_qgrid_dd x overlap is copied in place: take padding into account.
800 * y is always copied through a buffer: we don't need padding in z,
801 * but we do need the overlap in x because of the communication order.
803 init_overlap_comm(&pme->overlap[0], pme->pme_order,
804 #if GMX_MPI
805 pme->mpi_comm_d[0],
806 #endif
807 pme->nnodes_major, pme->nodeid_major,
808 pme->nkx,
809 (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1));
811 /* Along overlap dim 1 we can send in multiple pulses in sum_fftgrid_dd.
812 * We do this with an offset buffer of equal size, so we need to allocate
813 * extra for the offset. That's what the (+1)*pme->nkz is for.
815 init_overlap_comm(&pme->overlap[1], pme->pme_order,
816 #if GMX_MPI
817 pme->mpi_comm_d[1],
818 #endif
819 pme->nnodes_minor, pme->nodeid_minor,
820 pme->nky,
821 (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz);
823 /* Double-check for a limitation of the (current) sum_fftgrid_dd code.
824 * Note that gmx_pme_check_restrictions checked for this already.
826 if (pme->bUseThreads && (pme->overlap[0].comm_data.size() > 1))
828 gmx_incons("More than one communication pulse required for grid overlap communication along the major dimension while using threads");
831 snew(pme->bsp_mod[XX], pme->nkx);
832 snew(pme->bsp_mod[YY], pme->nky);
833 snew(pme->bsp_mod[ZZ], pme->nkz);
835 pme->gpu = pmeGpu; /* Carrying over the single GPU structure */
836 pme->runMode = runMode;
838 /* The required size of the interpolation grid, including overlap.
839 * The allocated size (pmegrid_n?) might be slightly larger.
841 pme->pmegrid_nx = pme->overlap[0].s2g1[pme->nodeid_major] -
842 pme->overlap[0].s2g0[pme->nodeid_major];
843 pme->pmegrid_ny = pme->overlap[1].s2g1[pme->nodeid_minor] -
844 pme->overlap[1].s2g0[pme->nodeid_minor];
845 pme->pmegrid_nz_base = pme->nkz;
846 pme->pmegrid_nz = pme->pmegrid_nz_base + pme->pme_order - 1;
847 set_grid_alignment(&pme->pmegrid_nz, pme->pme_order);
848 pme->pmegrid_start_ix = pme->overlap[0].s2g0[pme->nodeid_major];
849 pme->pmegrid_start_iy = pme->overlap[1].s2g0[pme->nodeid_minor];
850 pme->pmegrid_start_iz = 0;
852 make_gridindex_to_localindex(pme->nkx,
853 pme->pmegrid_start_ix,
854 pme->pmegrid_nx - (pme->pme_order-1),
855 &pme->nnx, &pme->fshx);
856 make_gridindex_to_localindex(pme->nky,
857 pme->pmegrid_start_iy,
858 pme->pmegrid_ny - (pme->pme_order-1),
859 &pme->nny, &pme->fshy);
860 make_gridindex_to_localindex(pme->nkz,
861 pme->pmegrid_start_iz,
862 pme->pmegrid_nz_base,
863 &pme->nnz, &pme->fshz);
865 pme->spline_work = make_pme_spline_work(pme->pme_order);
867 ndata[0] = pme->nkx;
868 ndata[1] = pme->nky;
869 ndata[2] = pme->nkz;
870 /* It doesn't matter if we allocate too many grids here,
871 * we only allocate and use the ones we need.
873 if (pme->doLJ)
875 pme->ngrids = ((ir->ljpme_combination_rule == eljpmeLB) ? DO_Q_AND_LJ_LB : DO_Q_AND_LJ);
877 else
879 pme->ngrids = DO_Q;
881 snew(pme->fftgrid, pme->ngrids);
882 snew(pme->cfftgrid, pme->ngrids);
883 snew(pme->pfft_setup, pme->ngrids);
885 for (i = 0; i < pme->ngrids; ++i)
887 if ((i < DO_Q && pme->doCoulomb && (i == 0 ||
888 bFreeEnergy_q)) ||
889 (i >= DO_Q && pme->doLJ && (i == 2 ||
890 bFreeEnergy_lj ||
891 ir->ljpme_combination_rule == eljpmeLB)))
893 pmegrids_init(&pme->pmegrid[i],
894 pme->pmegrid_nx, pme->pmegrid_ny, pme->pmegrid_nz,
895 pme->pmegrid_nz_base,
896 pme->pme_order,
897 pme->bUseThreads,
898 pme->nthread,
899 pme->overlap[0].s2g1[pme->nodeid_major]-pme->overlap[0].s2g0[pme->nodeid_major+1],
900 pme->overlap[1].s2g1[pme->nodeid_minor]-pme->overlap[1].s2g0[pme->nodeid_minor+1]);
901 /* This routine will allocate the grid data to fit the FFTs */
902 const auto allocateRealGridForGpu = (pme->runMode == PmeRunMode::Mixed) ? gmx::PinningPolicy::CanBePinned : gmx::PinningPolicy::CannotBePinned;
903 gmx_parallel_3dfft_init(&pme->pfft_setup[i], ndata,
904 &pme->fftgrid[i], &pme->cfftgrid[i],
905 pme->mpi_comm_d,
906 bReproducible, pme->nthread, allocateRealGridForGpu);
911 if (!pme->bP3M)
913 /* Use plain SPME B-spline interpolation */
914 make_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
916 else
918 /* Use the P3M grid-optimized influence function */
919 make_p3m_bspline_moduli(pme->bsp_mod, pme->nkx, pme->nky, pme->nkz, pme->pme_order);
922 /* Use atc[0] for spreading */
923 init_atomcomm(pme.get(), &pme->atc[0], numPmeDomains.x > 1 ? 0 : 1, TRUE);
924 if (pme->ndecompdim >= 2)
926 init_atomcomm(pme.get(), &pme->atc[1], 1, FALSE);
929 if (pme->nnodes == 1)
931 pme->atc[0].n = homenr;
932 pme_realloc_atomcomm_things(&pme->atc[0]);
935 pme->lb_buf1 = nullptr;
936 pme->lb_buf2 = nullptr;
937 pme->lb_buf_nalloc = 0;
939 if (pme_gpu_active(pme.get()))
941 if (!pme->gpu)
943 // Initial check of validity of the data
944 std::string errorString;
945 bool canRunOnGpu = pme_gpu_check_restrictions(pme.get(), &errorString);
946 if (!canRunOnGpu)
948 GMX_THROW(gmx::NotImplementedError(errorString));
952 pme_gpu_reinit(pme.get(), gpuInfo);
955 pme_init_all_work(&pme->solve_work, pme->nthread, pme->nkx);
957 // no exception was thrown during the init, so we hand over the PME structure handle
958 return pme.release();
961 void gmx_pme_reinit(struct gmx_pme_t **pmedata,
962 const t_commrec *cr,
963 struct gmx_pme_t * pme_src,
964 const t_inputrec * ir,
965 const ivec grid_size,
966 real ewaldcoeff_q,
967 real ewaldcoeff_lj)
969 int homenr;
971 // Create a copy of t_inputrec fields that are used in gmx_pme_init().
972 // TODO: This would be better as just copying a sub-structure that contains
973 // all the PME parameters and nothing else.
974 t_inputrec irc;
975 irc.ePBC = ir->ePBC;
976 irc.coulombtype = ir->coulombtype;
977 irc.vdwtype = ir->vdwtype;
978 irc.efep = ir->efep;
979 irc.pme_order = ir->pme_order;
980 irc.epsilon_r = ir->epsilon_r;
981 irc.ljpme_combination_rule = ir->ljpme_combination_rule;
982 irc.nkx = grid_size[XX];
983 irc.nky = grid_size[YY];
984 irc.nkz = grid_size[ZZ];
986 if (pme_src->nnodes == 1)
988 homenr = pme_src->atc[0].n;
990 else
992 homenr = -1;
997 const gmx::MDLogger dummyLogger;
998 // This is reinit which is currently only changing grid size/coefficients,
999 // so we don't expect the actual logging.
1000 // TODO: when PME is an object, it should take reference to mdlog on construction and save it.
1001 GMX_ASSERT(pmedata, "Invalid PME pointer");
1002 NumPmeDomains numPmeDomains = { pme_src->nnodes_major, pme_src->nnodes_minor };
1003 *pmedata = gmx_pme_init(cr, numPmeDomains,
1004 &irc, homenr, pme_src->bFEP_q, pme_src->bFEP_lj, FALSE, ewaldcoeff_q, ewaldcoeff_lj,
1005 pme_src->nthread, pme_src->runMode, pme_src->gpu, nullptr, dummyLogger);
1006 //TODO this is mostly passing around current values
1008 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1010 /* We can easily reuse the allocated pme grids in pme_src */
1011 reuse_pmegrids(&pme_src->pmegrid[PME_GRID_QA], &(*pmedata)->pmegrid[PME_GRID_QA]);
1012 /* We would like to reuse the fft grids, but that's harder */
1015 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V)
1017 pme_atomcomm_t *atc;
1018 pmegrids_t *grid;
1020 if (pme->nnodes > 1)
1022 gmx_incons("gmx_pme_calc_energy called in parallel");
1024 if (pme->bFEP_q > 1)
1026 gmx_incons("gmx_pme_calc_energy with free energy");
1029 atc = &pme->atc_energy;
1030 atc->nthread = 1;
1031 if (atc->spline == nullptr)
1033 snew(atc->spline, atc->nthread);
1035 atc->nslab = 1;
1036 atc->bSpread = TRUE;
1037 atc->pme_order = pme->pme_order;
1038 atc->n = n;
1039 pme_realloc_atomcomm_things(atc);
1040 atc->x = x;
1041 atc->coefficient = q;
1043 /* We only use the A-charges grid */
1044 grid = &pme->pmegrid[PME_GRID_QA];
1046 /* Only calculate the spline coefficients, don't actually spread */
1047 spread_on_grid(pme, atc, nullptr, TRUE, FALSE, pme->fftgrid[PME_GRID_QA], FALSE, PME_GRID_QA);
1049 *V = gather_energy_bsplines(pme, grid->grid.grid, atc);
1052 /*! \brief Calculate initial Lorentz-Berthelot coefficients for LJ-PME */
1053 static void
1054 calc_initial_lb_coeffs(struct gmx_pme_t *pme, real *local_c6, real *local_sigma)
1056 int i;
1057 for (i = 0; i < pme->atc[0].n; ++i)
1059 real sigma4;
1060 sigma4 = local_sigma[i];
1061 sigma4 = sigma4*sigma4;
1062 sigma4 = sigma4*sigma4;
1063 pme->atc[0].coefficient[i] = local_c6[i] / sigma4;
1067 /*! \brief Calculate next Lorentz-Berthelot coefficients for LJ-PME */
1068 static void
1069 calc_next_lb_coeffs(struct gmx_pme_t *pme, real *local_sigma)
1071 int i;
1073 for (i = 0; i < pme->atc[0].n; ++i)
1075 pme->atc[0].coefficient[i] *= local_sigma[i];
1079 int gmx_pme_do(struct gmx_pme_t *pme,
1080 int start, int homenr,
1081 rvec x[], rvec f[],
1082 real chargeA[], real chargeB[],
1083 real c6A[], real c6B[],
1084 real sigmaA[], real sigmaB[],
1085 matrix box, const t_commrec *cr,
1086 int maxshift_x, int maxshift_y,
1087 t_nrnb *nrnb, gmx_wallcycle *wcycle,
1088 matrix vir_q, matrix vir_lj,
1089 real *energy_q, real *energy_lj,
1090 real lambda_q, real lambda_lj,
1091 real *dvdlambda_q, real *dvdlambda_lj,
1092 int flags)
1094 GMX_ASSERT(pme->runMode == PmeRunMode::CPU, "gmx_pme_do should not be called on the GPU PME run.");
1096 int d, i, j, npme, grid_index, max_grid_index;
1097 int n_d;
1098 pme_atomcomm_t *atc = nullptr;
1099 pmegrids_t *pmegrid = nullptr;
1100 real *grid = nullptr;
1101 rvec *f_d;
1102 real *coefficient = nullptr;
1103 real energy_AB[4];
1104 matrix vir_AB[4];
1105 real scale, lambda;
1106 gmx_bool bClearF;
1107 gmx_parallel_3dfft_t pfft_setup;
1108 real * fftgrid;
1109 t_complex * cfftgrid;
1110 int thread;
1111 gmx_bool bFirst, bDoSplines;
1112 int fep_state;
1113 int fep_states_lj = pme->bFEP_lj ? 2 : 1;
1114 const gmx_bool bCalcEnerVir = flags & GMX_PME_CALC_ENER_VIR;
1115 const gmx_bool bBackFFT = flags & (GMX_PME_CALC_F | GMX_PME_CALC_POT);
1116 const gmx_bool bCalcF = flags & GMX_PME_CALC_F;
1118 assert(pme->nnodes > 0);
1119 assert(pme->nnodes == 1 || pme->ndecompdim > 0);
1121 if (pme->nnodes > 1)
1123 atc = &pme->atc[0];
1124 atc->npd = homenr;
1125 if (atc->npd > atc->pd_nalloc)
1127 atc->pd_nalloc = over_alloc_dd(atc->npd);
1128 srenew(atc->pd, atc->pd_nalloc);
1130 for (d = pme->ndecompdim-1; d >= 0; d--)
1132 atc = &pme->atc[d];
1133 atc->maxshift = (atc->dimind == 0 ? maxshift_x : maxshift_y);
1136 else
1138 atc = &pme->atc[0];
1139 /* This could be necessary for TPI */
1140 pme->atc[0].n = homenr;
1141 if (DOMAINDECOMP(cr))
1143 pme_realloc_atomcomm_things(atc);
1145 atc->x = x;
1146 atc->f = f;
1149 matrix scaledBox;
1150 pme->boxScaler->scaleBox(box, scaledBox);
1152 gmx::invertBoxMatrix(scaledBox, pme->recipbox);
1153 bFirst = TRUE;
1155 /* For simplicity, we construct the splines for all particles if
1156 * more than one PME calculations is needed. Some optimization
1157 * could be done by keeping track of which atoms have splines
1158 * constructed, and construct new splines on each pass for atoms
1159 * that don't yet have them.
1162 bDoSplines = pme->bFEP || (pme->doCoulomb && pme->doLJ);
1164 /* We need a maximum of four separate PME calculations:
1165 * grid_index=0: Coulomb PME with charges from state A
1166 * grid_index=1: Coulomb PME with charges from state B
1167 * grid_index=2: LJ PME with C6 from state A
1168 * grid_index=3: LJ PME with C6 from state B
1169 * For Lorentz-Berthelot combination rules, a separate loop is used to
1170 * calculate all the terms
1173 /* If we are doing LJ-PME with LB, we only do Q here */
1174 max_grid_index = (pme->ljpme_combination_rule == eljpmeLB) ? DO_Q : DO_Q_AND_LJ;
1176 for (grid_index = 0; grid_index < max_grid_index; ++grid_index)
1178 /* Check if we should do calculations at this grid_index
1179 * If grid_index is odd we should be doing FEP
1180 * If grid_index < 2 we should be doing electrostatic PME
1181 * If grid_index >= 2 we should be doing LJ-PME
1183 if ((grid_index < DO_Q && (!pme->doCoulomb ||
1184 (grid_index == 1 && !pme->bFEP_q))) ||
1185 (grid_index >= DO_Q && (!pme->doLJ ||
1186 (grid_index == 3 && !pme->bFEP_lj))))
1188 continue;
1190 /* Unpack structure */
1191 pmegrid = &pme->pmegrid[grid_index];
1192 fftgrid = pme->fftgrid[grid_index];
1193 cfftgrid = pme->cfftgrid[grid_index];
1194 pfft_setup = pme->pfft_setup[grid_index];
1195 switch (grid_index)
1197 case 0: coefficient = chargeA + start; break;
1198 case 1: coefficient = chargeB + start; break;
1199 case 2: coefficient = c6A + start; break;
1200 case 3: coefficient = c6B + start; break;
1203 grid = pmegrid->grid.grid;
1205 if (debug)
1207 fprintf(debug, "PME: number of ranks = %d, rank = %d\n",
1208 cr->nnodes, cr->nodeid);
1209 fprintf(debug, "Grid = %p\n", (void*)grid);
1210 if (grid == nullptr)
1212 gmx_fatal(FARGS, "No grid!");
1216 if (pme->nnodes == 1)
1218 atc->coefficient = coefficient;
1220 else
1222 wallcycle_start(wcycle, ewcPME_REDISTXF);
1223 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, coefficient);
1225 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1228 if (debug)
1230 fprintf(debug, "Rank= %6d, pme local particles=%6d\n",
1231 cr->nodeid, atc->n);
1234 if (flags & GMX_PME_SPREAD)
1236 wallcycle_start(wcycle, ewcPME_SPREAD);
1238 /* Spread the coefficients on a grid */
1239 spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1241 if (bFirst)
1243 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1245 inc_nrnb(nrnb, eNR_SPREADBSP,
1246 pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1248 if (!pme->bUseThreads)
1250 wrap_periodic_pmegrid(pme, grid);
1252 /* sum contributions to local grid from other nodes */
1253 #if GMX_MPI
1254 if (pme->nnodes > 1)
1256 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1258 #endif
1260 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1263 wallcycle_stop(wcycle, ewcPME_SPREAD);
1265 /* TODO If the OpenMP and single-threaded implementations
1266 converge, then spread_on_grid() and
1267 copy_pmegrid_to_fftgrid() will perhaps live in the same
1268 source file.
1272 /* Here we start a large thread parallel region */
1273 #pragma omp parallel num_threads(pme->nthread) private(thread)
1277 thread = gmx_omp_get_thread_num();
1278 if (flags & GMX_PME_SOLVE)
1280 int loop_count;
1282 /* do 3d-fft */
1283 if (thread == 0)
1285 wallcycle_start(wcycle, ewcPME_FFT);
1287 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1288 thread, wcycle);
1289 if (thread == 0)
1291 wallcycle_stop(wcycle, ewcPME_FFT);
1294 /* solve in k-space for our local cells */
1295 if (thread == 0)
1297 wallcycle_start(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1299 if (grid_index < DO_Q)
1301 loop_count =
1302 solve_pme_yzx(pme, cfftgrid,
1303 scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1304 bCalcEnerVir,
1305 pme->nthread, thread);
1307 else
1309 loop_count =
1310 solve_pme_lj_yzx(pme, &cfftgrid, FALSE,
1311 scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1312 bCalcEnerVir,
1313 pme->nthread, thread);
1316 if (thread == 0)
1318 wallcycle_stop(wcycle, (grid_index < DO_Q ? ewcPME_SOLVE : ewcLJPME));
1319 inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1323 if (bBackFFT)
1325 /* do 3d-invfft */
1326 if (thread == 0)
1328 wallcycle_start(wcycle, ewcPME_FFT);
1330 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1331 thread, wcycle);
1332 if (thread == 0)
1334 wallcycle_stop(wcycle, ewcPME_FFT);
1337 if (pme->nodeid == 0)
1339 real ntot = pme->nkx*pme->nky*pme->nkz;
1340 npme = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1341 inc_nrnb(nrnb, eNR_FFT, 2*npme);
1344 /* Note: this wallcycle region is closed below
1345 outside an OpenMP region, so take care if
1346 refactoring code here. */
1347 wallcycle_start(wcycle, ewcPME_GATHER);
1350 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1352 } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1354 /* End of thread parallel section.
1355 * With MPI we have to synchronize here before gmx_sum_qgrid_dd.
1358 if (bBackFFT)
1360 /* distribute local grid to all nodes */
1361 #if GMX_MPI
1362 if (pme->nnodes > 1)
1364 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1366 #endif
1368 unwrap_periodic_pmegrid(pme, grid);
1371 if (bCalcF)
1373 /* interpolate forces for our local atoms */
1376 /* If we are running without parallelization,
1377 * atc->f is the actual force array, not a buffer,
1378 * therefore we should not clear it.
1380 lambda = grid_index < DO_Q ? lambda_q : lambda_lj;
1381 bClearF = (bFirst && PAR(cr));
1382 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1383 for (thread = 0; thread < pme->nthread; thread++)
1387 gather_f_bsplines(pme, grid, bClearF, atc,
1388 &atc->spline[thread],
1389 pme->bFEP ? (grid_index % 2 == 0 ? 1.0-lambda : lambda) : 1.0);
1391 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1395 inc_nrnb(nrnb, eNR_GATHERFBSP,
1396 pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1397 /* Note: this wallcycle region is opened above inside an OpenMP
1398 region, so take care if refactoring code here. */
1399 wallcycle_stop(wcycle, ewcPME_GATHER);
1402 if (bCalcEnerVir)
1404 /* This should only be called on the master thread
1405 * and after the threads have synchronized.
1407 if (grid_index < 2)
1409 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1411 else
1413 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[grid_index], vir_AB[grid_index]);
1416 bFirst = FALSE;
1417 } /* of grid_index-loop */
1419 /* For Lorentz-Berthelot combination rules in LJ-PME, we need to calculate
1420 * seven terms. */
1422 if (pme->doLJ && pme->ljpme_combination_rule == eljpmeLB)
1424 /* Loop over A- and B-state if we are doing FEP */
1425 for (fep_state = 0; fep_state < fep_states_lj; ++fep_state)
1427 real *local_c6 = nullptr, *local_sigma = nullptr, *RedistC6 = nullptr, *RedistSigma = nullptr;
1428 if (pme->nnodes == 1)
1430 if (pme->lb_buf1 == nullptr)
1432 pme->lb_buf_nalloc = pme->atc[0].n;
1433 snew(pme->lb_buf1, pme->lb_buf_nalloc);
1435 pme->atc[0].coefficient = pme->lb_buf1;
1436 switch (fep_state)
1438 case 0:
1439 local_c6 = c6A;
1440 local_sigma = sigmaA;
1441 break;
1442 case 1:
1443 local_c6 = c6B;
1444 local_sigma = sigmaB;
1445 break;
1446 default:
1447 gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1450 else
1452 atc = &pme->atc[0];
1453 switch (fep_state)
1455 case 0:
1456 RedistC6 = c6A;
1457 RedistSigma = sigmaA;
1458 break;
1459 case 1:
1460 RedistC6 = c6B;
1461 RedistSigma = sigmaB;
1462 break;
1463 default:
1464 gmx_incons("Trying to access wrong FEP-state in LJ-PME routine");
1466 wallcycle_start(wcycle, ewcPME_REDISTXF);
1468 do_redist_pos_coeffs(pme, cr, start, homenr, bFirst, x, RedistC6);
1469 if (pme->lb_buf_nalloc < atc->n)
1471 pme->lb_buf_nalloc = atc->nalloc;
1472 srenew(pme->lb_buf1, pme->lb_buf_nalloc);
1473 srenew(pme->lb_buf2, pme->lb_buf_nalloc);
1475 local_c6 = pme->lb_buf1;
1476 for (i = 0; i < atc->n; ++i)
1478 local_c6[i] = atc->coefficient[i];
1481 do_redist_pos_coeffs(pme, cr, start, homenr, FALSE, x, RedistSigma);
1482 local_sigma = pme->lb_buf2;
1483 for (i = 0; i < atc->n; ++i)
1485 local_sigma[i] = atc->coefficient[i];
1488 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1490 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1492 /*Seven terms in LJ-PME with LB, grid_index < 2 reserved for electrostatics*/
1493 for (grid_index = 2; grid_index < 9; ++grid_index)
1495 /* Unpack structure */
1496 pmegrid = &pme->pmegrid[grid_index];
1497 fftgrid = pme->fftgrid[grid_index];
1498 pfft_setup = pme->pfft_setup[grid_index];
1499 calc_next_lb_coeffs(pme, local_sigma);
1500 grid = pmegrid->grid.grid;
1502 if (flags & GMX_PME_SPREAD)
1504 wallcycle_start(wcycle, ewcPME_SPREAD);
1505 /* Spread the c6 on a grid */
1506 spread_on_grid(pme, &pme->atc[0], pmegrid, bFirst, TRUE, fftgrid, bDoSplines, grid_index);
1508 if (bFirst)
1510 inc_nrnb(nrnb, eNR_WEIGHTS, DIM*atc->n);
1513 inc_nrnb(nrnb, eNR_SPREADBSP,
1514 pme->pme_order*pme->pme_order*pme->pme_order*atc->n);
1515 if (pme->nthread == 1)
1517 wrap_periodic_pmegrid(pme, grid);
1518 /* sum contributions to local grid from other nodes */
1519 #if GMX_MPI
1520 if (pme->nnodes > 1)
1522 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD);
1524 #endif
1525 copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index);
1527 wallcycle_stop(wcycle, ewcPME_SPREAD);
1529 /*Here we start a large thread parallel region*/
1530 #pragma omp parallel num_threads(pme->nthread) private(thread)
1534 thread = gmx_omp_get_thread_num();
1535 if (flags & GMX_PME_SOLVE)
1537 /* do 3d-fft */
1538 if (thread == 0)
1540 wallcycle_start(wcycle, ewcPME_FFT);
1543 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_REAL_TO_COMPLEX,
1544 thread, wcycle);
1545 if (thread == 0)
1547 wallcycle_stop(wcycle, ewcPME_FFT);
1551 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1553 bFirst = FALSE;
1555 if (flags & GMX_PME_SOLVE)
1557 /* solve in k-space for our local cells */
1558 #pragma omp parallel num_threads(pme->nthread) private(thread)
1562 int loop_count;
1563 thread = gmx_omp_get_thread_num();
1564 if (thread == 0)
1566 wallcycle_start(wcycle, ewcLJPME);
1569 loop_count =
1570 solve_pme_lj_yzx(pme, &pme->cfftgrid[2], TRUE,
1571 scaledBox[XX][XX]*scaledBox[YY][YY]*scaledBox[ZZ][ZZ],
1572 bCalcEnerVir,
1573 pme->nthread, thread);
1574 if (thread == 0)
1576 wallcycle_stop(wcycle, ewcLJPME);
1577 inc_nrnb(nrnb, eNR_SOLVEPME, loop_count);
1580 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1584 if (bCalcEnerVir)
1586 /* This should only be called on the master thread and
1587 * after the threads have synchronized.
1589 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy_AB[2+fep_state], vir_AB[2+fep_state]);
1592 if (bBackFFT)
1594 bFirst = !pme->doCoulomb;
1595 calc_initial_lb_coeffs(pme, local_c6, local_sigma);
1596 for (grid_index = 8; grid_index >= 2; --grid_index)
1598 /* Unpack structure */
1599 pmegrid = &pme->pmegrid[grid_index];
1600 fftgrid = pme->fftgrid[grid_index];
1601 pfft_setup = pme->pfft_setup[grid_index];
1602 grid = pmegrid->grid.grid;
1603 calc_next_lb_coeffs(pme, local_sigma);
1604 #pragma omp parallel num_threads(pme->nthread) private(thread)
1608 thread = gmx_omp_get_thread_num();
1609 /* do 3d-invfft */
1610 if (thread == 0)
1612 wallcycle_start(wcycle, ewcPME_FFT);
1615 gmx_parallel_3dfft_execute(pfft_setup, GMX_FFT_COMPLEX_TO_REAL,
1616 thread, wcycle);
1617 if (thread == 0)
1619 wallcycle_stop(wcycle, ewcPME_FFT);
1622 if (pme->nodeid == 0)
1624 real ntot = pme->nkx*pme->nky*pme->nkz;
1625 npme = static_cast<int>(ntot*std::log(ntot)/std::log(2.0));
1626 inc_nrnb(nrnb, eNR_FFT, 2*npme);
1628 wallcycle_start(wcycle, ewcPME_GATHER);
1631 copy_fftgrid_to_pmegrid(pme, fftgrid, grid, grid_index, pme->nthread, thread);
1633 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1634 } /*#pragma omp parallel*/
1636 /* distribute local grid to all nodes */
1637 #if GMX_MPI
1638 if (pme->nnodes > 1)
1640 gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD);
1642 #endif
1644 unwrap_periodic_pmegrid(pme, grid);
1646 if (bCalcF)
1648 /* interpolate forces for our local atoms */
1649 bClearF = (bFirst && PAR(cr));
1650 scale = pme->bFEP ? (fep_state < 1 ? 1.0-lambda_lj : lambda_lj) : 1.0;
1651 scale *= lb_scale_factor[grid_index-2];
1653 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
1654 for (thread = 0; thread < pme->nthread; thread++)
1658 gather_f_bsplines(pme, grid, bClearF, &pme->atc[0],
1659 &pme->atc[0].spline[thread],
1660 scale);
1662 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1666 inc_nrnb(nrnb, eNR_GATHERFBSP,
1667 pme->pme_order*pme->pme_order*pme->pme_order*pme->atc[0].n);
1669 wallcycle_stop(wcycle, ewcPME_GATHER);
1671 bFirst = FALSE;
1672 } /* for (grid_index = 8; grid_index >= 2; --grid_index) */
1673 } /* if (bCalcF) */
1674 } /* for (fep_state = 0; fep_state < fep_states_lj; ++fep_state) */
1675 } /* if ((flags & GMX_PME_DO_LJ) && pme->ljpme_combination_rule == eljpmeLB) */
1677 if (bCalcF && pme->nnodes > 1)
1679 wallcycle_start(wcycle, ewcPME_REDISTXF);
1680 for (d = 0; d < pme->ndecompdim; d++)
1682 atc = &pme->atc[d];
1683 if (d == pme->ndecompdim - 1)
1685 n_d = homenr;
1686 f_d = f + start;
1688 else
1690 n_d = pme->atc[d+1].n;
1691 f_d = pme->atc[d+1].f;
1693 if (DOMAINDECOMP(cr))
1695 dd_pmeredist_f(pme, atc, n_d, f_d,
1696 d == pme->ndecompdim-1 && pme->bPPnode);
1700 wallcycle_stop(wcycle, ewcPME_REDISTXF);
1703 if (bCalcEnerVir)
1705 if (pme->doCoulomb)
1707 if (!pme->bFEP_q)
1709 *energy_q = energy_AB[0];
1710 m_add(vir_q, vir_AB[0], vir_q);
1712 else
1714 *energy_q = (1.0-lambda_q)*energy_AB[0] + lambda_q*energy_AB[1];
1715 *dvdlambda_q += energy_AB[1] - energy_AB[0];
1716 for (i = 0; i < DIM; i++)
1718 for (j = 0; j < DIM; j++)
1720 vir_q[i][j] += (1.0-lambda_q)*vir_AB[0][i][j] +
1721 lambda_q*vir_AB[1][i][j];
1725 if (debug)
1727 fprintf(debug, "Electrostatic PME mesh energy: %g\n", *energy_q);
1730 else
1732 *energy_q = 0;
1735 if (pme->doLJ)
1737 if (!pme->bFEP_lj)
1739 *energy_lj = energy_AB[2];
1740 m_add(vir_lj, vir_AB[2], vir_lj);
1742 else
1744 *energy_lj = (1.0-lambda_lj)*energy_AB[2] + lambda_lj*energy_AB[3];
1745 *dvdlambda_lj += energy_AB[3] - energy_AB[2];
1746 for (i = 0; i < DIM; i++)
1748 for (j = 0; j < DIM; j++)
1750 vir_lj[i][j] += (1.0-lambda_lj)*vir_AB[2][i][j] + lambda_lj*vir_AB[3][i][j];
1754 if (debug)
1756 fprintf(debug, "Lennard-Jones PME mesh energy: %g\n", *energy_lj);
1759 else
1761 *energy_lj = 0;
1764 return 0;
1767 void gmx_pme_destroy(gmx_pme_t *pme)
1769 if (!pme)
1771 return;
1774 delete pme->boxScaler;
1776 sfree(pme->nnx);
1777 sfree(pme->nny);
1778 sfree(pme->nnz);
1779 sfree(pme->fshx);
1780 sfree(pme->fshy);
1781 sfree(pme->fshz);
1783 for (int i = 0; i < pme->ngrids; ++i)
1785 pmegrids_destroy(&pme->pmegrid[i]);
1787 if (pme->pfft_setup)
1789 for (int i = 0; i < pme->ngrids; ++i)
1791 gmx_parallel_3dfft_destroy(pme->pfft_setup[i]);
1794 sfree(pme->fftgrid);
1795 sfree(pme->cfftgrid);
1796 sfree(pme->pfft_setup);
1798 for (int i = 0; i < std::max(1, pme->ndecompdim); i++) //pme->atc[0] is always allocated
1800 destroy_atomcomm(&pme->atc[i]);
1803 for (int i = 0; i < DIM; i++)
1805 sfree(pme->bsp_mod[i]);
1808 sfree(pme->lb_buf1);
1809 sfree(pme->lb_buf2);
1811 sfree(pme->bufv);
1812 sfree(pme->bufr);
1814 if (pme->solve_work)
1816 pme_free_all_work(&pme->solve_work, pme->nthread);
1819 sfree(pme->sum_qgrid_tmp);
1820 sfree(pme->sum_qgrid_dd_tmp);
1822 destroy_pme_spline_work(pme->spline_work);
1824 if (pme_gpu_active(pme) && pme->gpu)
1826 pme_gpu_destroy(pme->gpu);
1829 delete pme;
1832 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges)
1834 if (pme_gpu_active(pme))
1836 pme_gpu_reinit_atoms(pme->gpu, nAtoms, charges);
1838 // TODO: handle the CPU case here; handle the whole t_mdatoms