Do not force separate PME rank to recompute reciprocal box every step
[gromacs.git] / src / gromacs / ewald / pme-only.cpp
blob3f219be17315b1d026e6154506c5d6670b015fb5
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* IMPORTANT FOR DEVELOPERS:
39 * Triclinic pme stuff isn't entirely trivial, and we've experienced
40 * some bugs during development (many of them due to me). To avoid
41 * this in the future, please check the following things if you make
42 * changes in this file:
44 * 1. You should obtain identical (at least to the PME precision)
45 * energies, forces, and virial for
46 * a rectangular box and a triclinic one where the z (or y) axis is
47 * tilted a whole box side. For instance you could use these boxes:
49 * rectangular triclinic
50 * 2 0 0 2 0 0
51 * 0 2 0 0 2 0
52 * 0 0 6 2 2 6
54 * 2. You should check the energy conservation in a triclinic box.
56 * It might seem an overkill, but better safe than sorry.
57 * /Erik 001109
60 #include "gmxpre.h"
62 #include "config.h"
64 #include <assert.h>
65 #include <stdio.h>
66 #include <stdlib.h>
67 #include <string.h>
69 #include <cmath>
71 #include <memory>
72 #include <numeric>
73 #include <vector>
75 #include "gromacs/compat/make_unique.h"
76 #include "gromacs/domdec/domdec.h"
77 #include "gromacs/ewald/pme.h"
78 #include "gromacs/fft/parallel_3dfft.h"
79 #include "gromacs/fileio/pdbio.h"
80 #include "gromacs/gmxlib/network.h"
81 #include "gromacs/gmxlib/nrnb.h"
82 #include "gromacs/gpu_utils/hostallocator.h"
83 #include "gromacs/math/gmxcomplex.h"
84 #include "gromacs/math/units.h"
85 #include "gromacs/math/vec.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/timing/cyclecounter.h"
89 #include "gromacs/timing/wallcycle.h"
90 #include "gromacs/utility/fatalerror.h"
91 #include "gromacs/utility/futil.h"
92 #include "gromacs/utility/gmxmpi.h"
93 #include "gromacs/utility/gmxomp.h"
94 #include "gromacs/utility/smalloc.h"
96 #include "pme-internal.h"
97 #include "pme-pp-communication.h"
99 //! Contains information about the PP ranks that partner this PME rank.
100 struct PpRanks
102 //! The MPI rank ID of this partner PP rank.
103 int rankId;
104 //! The number of atoms to communicate with this partner PP rank.
105 int numAtoms;
108 /*! \brief Master PP-PME communication data structure */
109 struct gmx_pme_pp {
110 MPI_Comm mpi_comm_mysim; /**< MPI communicator for this simulation */
111 std::vector<PpRanks> ppRanks; /**< The PP partner ranks */
112 int peerRankId; /**< The peer PP rank id */
113 //@{
114 /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks */
115 gmx::HostVector<real> chargeA;
116 std::vector<real> chargeB;
117 std::vector<real> sqrt_c6A;
118 std::vector<real> sqrt_c6B;
119 std::vector<real> sigmaA;
120 std::vector<real> sigmaB;
121 //@}
122 gmx::HostVector<gmx::RVec> x; /**< Vector of atom coordinates to transfer to PME ranks */
123 std::vector<gmx::RVec> f; /**< Vector of atom forces received from PME ranks */
124 //@{
125 /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
126 std::vector<MPI_Request> req;
127 std::vector<MPI_Status> stat;
128 //@}
131 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
132 static std::unique_ptr<gmx_pme_pp> gmx_pme_pp_init(t_commrec *cr)
134 auto pme_pp = gmx::compat::make_unique<gmx_pme_pp>();
136 #if GMX_MPI
137 int rank;
139 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
140 MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
141 auto ppRanks = get_pme_ddranks(cr, rank);
142 pme_pp->ppRanks.reserve(ppRanks.size());
143 for (const auto &ppRankId : ppRanks)
145 pme_pp->ppRanks.push_back({ppRankId, 0});
147 // The peer PP rank is the last one.
148 pme_pp->peerRankId = pme_pp->ppRanks.back().rankId;
149 pme_pp->req.resize(eCommType_NR*pme_pp->ppRanks.size());
150 pme_pp->stat.resize(eCommType_NR*pme_pp->ppRanks.size());
151 #else
152 GMX_UNUSED_VALUE(cr);
153 #endif
155 return pme_pp;
158 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
159 gmx_walltime_accounting_t walltime_accounting,
160 t_nrnb *nrnb, t_inputrec *ir,
161 gmx_int64_t step)
163 /* Reset all the counters related to performance over the run */
164 wallcycle_stop(wcycle, ewcRUN);
165 wallcycle_reset_all(wcycle);
166 init_nrnb(nrnb);
167 if (ir->nsteps >= 0)
169 /* ir->nsteps is not used here, but we update it for consistency */
170 ir->nsteps -= step - ir->init_step;
172 ir->init_step = step;
173 wallcycle_start(wcycle, ewcRUN);
174 walltime_accounting_start(walltime_accounting);
177 static gmx_pme_t *gmx_pmeonly_switch(std::vector<gmx_pme_t *> *pmedata,
178 const ivec grid_size,
179 real ewaldcoeff_q, real ewaldcoeff_lj,
180 t_commrec *cr, const t_inputrec *ir)
182 GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
183 for (auto &pme : *pmedata)
185 GMX_ASSERT(pme, "Bad PME tuning list element pointer");
186 if (pme->nkx == grid_size[XX] &&
187 pme->nky == grid_size[YY] &&
188 pme->nkz == grid_size[ZZ])
190 /* Here we have found an existing PME data structure that suits us.
191 * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
192 * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
193 * So, just some grid size updates in the GPU kernel parameters.
194 * TODO: this should be something like gmx_pme_update_split_params()
196 gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
197 return pme;
201 const auto &pme = pmedata->back();
202 gmx_pme_t *newStructure = nullptr;
203 // Copy last structure with new grid params
204 gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
205 pmedata->push_back(newStructure);
206 return newStructure;
209 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
211 * \param[in,out] pme_pp PME-PP communication structure.
212 * \param[out] natoms Number of received atoms.
213 * \param[out] box System box, if received.
214 * \param[out] maxshift_x Maximum shift in X direction, if received.
215 * \param[out] maxshift_y Maximum shift in Y direction, if received.
216 * \param[out] lambda_q Free-energy lambda for electrostatics, if received.
217 * \param[out] lambda_lj Free-energy lambda for Lennard-Jones, if received.
218 * \param[out] bEnerVir Set to true if this is an energy/virial calculation step, otherwise set to false.
219 * \param[out] step MD integration step number.
220 * \param[out] grid_size PME grid size, if received.
221 * \param[out] ewaldcoeff_q Ewald cut-off parameter for electrostatics, if received.
222 * \param[out] ewaldcoeff_lj Ewald cut-off parameter for Lennard-Jones, if received.
223 * \param[out] atomSetChanged Set to true only if the local domain atom data (charges/coefficients)
224 * has been received (after DD) and should be reinitialized. Otherwise not changed.
226 * \retval pmerecvqxX All parameters were set, chargeA and chargeB can be NULL.
227 * \retval pmerecvqxFINISH No parameters were set.
228 * \retval pmerecvqxSWITCHGRID Only grid_size and *ewaldcoeff were set.
229 * \retval pmerecvqxRESETCOUNTERS *step was set.
231 static int gmx_pme_recv_coeffs_coords(gmx_pme_pp *pme_pp,
232 int *natoms,
233 matrix box,
234 int *maxshift_x,
235 int *maxshift_y,
236 real *lambda_q,
237 real *lambda_lj,
238 gmx_bool *bEnerVir,
239 gmx_int64_t *step,
240 ivec *grid_size,
241 real *ewaldcoeff_q,
242 real *ewaldcoeff_lj,
243 bool *atomSetChanged)
245 int status = -1;
246 int nat = 0;
248 #if GMX_MPI
249 unsigned int flags = 0;
250 int messages = 0;
254 gmx_pme_comm_n_box_t cnb;
255 cnb.flags = 0;
257 /* Receive the send count, box and time step from the peer PP node */
258 MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
259 pme_pp->peerRankId, eCommType_CNB,
260 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
262 /* We accumulate all received flags */
263 flags |= cnb.flags;
265 *step = cnb.step;
267 if (debug)
269 fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
270 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
271 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
272 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
273 (cnb.flags & PP_PME_SWITCHGRID) ? " switch grid" : "",
274 (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
277 if (cnb.flags & PP_PME_FINISH)
279 status = pmerecvqxFINISH;
282 if (cnb.flags & PP_PME_SWITCHGRID)
284 /* Special case, receive the new parameters and return */
285 copy_ivec(cnb.grid_size, *grid_size);
286 *ewaldcoeff_q = cnb.ewaldcoeff_q;
287 *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
289 status = pmerecvqxSWITCHGRID;
292 if (cnb.flags & PP_PME_RESETCOUNTERS)
294 /* Special case, receive the step (set above) and return */
295 status = pmerecvqxRESETCOUNTERS;
298 if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
300 *atomSetChanged = true;
302 /* Receive the send counts from the other PP nodes */
303 for (auto &sender : pme_pp->ppRanks)
305 if (sender.rankId == pme_pp->peerRankId)
307 sender.numAtoms = cnb.natoms;
309 else
311 MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms),
312 MPI_BYTE,
313 sender.rankId, eCommType_CNB,
314 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
317 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
318 messages = 0;
320 nat = 0;
321 for (const auto &sender : pme_pp->ppRanks)
323 nat += sender.numAtoms;
326 if (cnb.flags & PP_PME_CHARGE)
328 pme_pp->chargeA.resize(nat);
330 if (cnb.flags & PP_PME_CHARGEB)
332 pme_pp->chargeB.resize(nat);
334 if (cnb.flags & PP_PME_SQRTC6)
336 pme_pp->sqrt_c6A.resize(nat);
338 if (cnb.flags & PP_PME_SQRTC6B)
340 pme_pp->sqrt_c6B.resize(nat);
342 if (cnb.flags & PP_PME_SIGMA)
344 pme_pp->sigmaA.resize(nat);
346 if (cnb.flags & PP_PME_SIGMAB)
348 pme_pp->sigmaB.resize(nat);
350 pme_pp->x.resize(nat);
351 pme_pp->f.resize(nat);
353 /* maxshift is sent when the charges are sent */
354 *maxshift_x = cnb.maxshift_x;
355 *maxshift_y = cnb.maxshift_y;
357 /* Receive the charges in place */
358 for (int q = 0; q < eCommType_NR; q++)
360 real *bufferPtr;
362 if (!(cnb.flags & (PP_PME_CHARGE<<q)))
364 continue;
366 switch (q)
368 case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data(); break;
369 case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data(); break;
370 case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
371 case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
372 case eCommType_SigmaA: bufferPtr = pme_pp->sigmaA.data(); break;
373 case eCommType_SigmaB: bufferPtr = pme_pp->sigmaB.data(); break;
374 default: gmx_incons("Wrong eCommType");
376 nat = 0;
377 for (const auto &sender : pme_pp->ppRanks)
379 if (sender.numAtoms > 0)
381 MPI_Irecv(bufferPtr+nat,
382 sender.numAtoms*sizeof(real),
383 MPI_BYTE,
384 sender.rankId, q,
385 pme_pp->mpi_comm_mysim,
386 &pme_pp->req[messages++]);
387 nat += sender.numAtoms;
388 if (debug)
390 fprintf(debug, "Received from PP rank %d: %d %s\n",
391 sender.rankId, sender.numAtoms,
392 (q == eCommType_ChargeA ||
393 q == eCommType_ChargeB) ? "charges" : "params");
400 if (cnb.flags & PP_PME_COORD)
402 /* The box, FE flag and lambda are sent along with the coordinates
403 * */
404 copy_mat(cnb.box, box);
405 *lambda_q = cnb.lambda_q;
406 *lambda_lj = cnb.lambda_lj;
407 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
408 *step = cnb.step;
410 /* Receive the coordinates in place */
411 nat = 0;
412 for (const auto &sender : pme_pp->ppRanks)
414 if (sender.numAtoms > 0)
416 MPI_Irecv(pme_pp->x[nat], sender.numAtoms*sizeof(rvec),
417 MPI_BYTE,
418 sender.rankId, eCommType_COORD,
419 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
420 nat += sender.numAtoms;
421 if (debug)
423 fprintf(debug, "Received from PP rank %d: %d "
424 "coordinates\n",
425 sender.rankId, sender.numAtoms);
430 status = pmerecvqxX;
433 /* Wait for the coordinates and/or charges to arrive */
434 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
435 messages = 0;
437 while (status == -1);
438 #else
439 GMX_UNUSED_VALUE(pme_pp);
440 GMX_UNUSED_VALUE(box);
441 GMX_UNUSED_VALUE(maxshift_x);
442 GMX_UNUSED_VALUE(maxshift_y);
443 GMX_UNUSED_VALUE(lambda_q);
444 GMX_UNUSED_VALUE(lambda_lj);
445 GMX_UNUSED_VALUE(bEnerVir);
446 GMX_UNUSED_VALUE(step);
447 GMX_UNUSED_VALUE(grid_size);
448 GMX_UNUSED_VALUE(ewaldcoeff_q);
449 GMX_UNUSED_VALUE(ewaldcoeff_lj);
450 GMX_UNUSED_VALUE(atomSetChanged);
452 status = pmerecvqxX;
453 #endif
455 if (status == pmerecvqxX)
457 *natoms = nat;
460 return status;
463 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
464 static void gmx_pme_send_force_vir_ener(gmx_pme_pp *pme_pp,
465 const rvec *f,
466 matrix vir_q, real energy_q,
467 matrix vir_lj, real energy_lj,
468 real dvdlambda_q, real dvdlambda_lj,
469 float cycles)
471 #if GMX_MPI
472 gmx_pme_comm_vir_ene_t cve;
473 int messages, ind_start, ind_end;
474 cve.cycles = cycles;
476 /* Now the evaluated forces have to be transferred to the PP nodes */
477 messages = 0;
478 ind_end = 0;
479 for (const auto &receiver : pme_pp->ppRanks)
481 ind_start = ind_end;
482 ind_end = ind_start + receiver.numAtoms;
483 if (MPI_Isend(const_cast<void *>(static_cast<const void *>(f[ind_start])),
484 (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
485 receiver.rankId, 0,
486 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
488 gmx_comm("MPI_Isend failed in do_pmeonly");
492 /* send virial and energy to our last PP node */
493 copy_mat(vir_q, cve.vir_q);
494 copy_mat(vir_lj, cve.vir_lj);
495 cve.energy_q = energy_q;
496 cve.energy_lj = energy_lj;
497 cve.dvdlambda_q = dvdlambda_q;
498 cve.dvdlambda_lj = dvdlambda_lj;
499 /* check for the signals to send back to a PP node */
500 cve.stop_cond = gmx_get_stop_condition();
502 cve.cycles = cycles;
504 if (debug)
506 fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
507 pme_pp->peerRankId);
509 MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
510 pme_pp->peerRankId, 1,
511 pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
513 /* Wait for the forces to arrive */
514 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
515 #else
516 gmx_call("MPI not enabled");
517 GMX_UNUSED_VALUE(pme_pp);
518 GMX_UNUSED_VALUE(f);
519 GMX_UNUSED_VALUE(vir_q);
520 GMX_UNUSED_VALUE(energy_q);
521 GMX_UNUSED_VALUE(vir_lj);
522 GMX_UNUSED_VALUE(energy_lj);
523 GMX_UNUSED_VALUE(dvdlambda_q);
524 GMX_UNUSED_VALUE(dvdlambda_lj);
525 GMX_UNUSED_VALUE(cycles);
526 #endif
529 int gmx_pmeonly(struct gmx_pme_t *pme,
530 t_commrec *cr, t_nrnb *mynrnb,
531 gmx_wallcycle_t wcycle,
532 gmx_walltime_accounting_t walltime_accounting,
533 t_inputrec *ir, PmeRunMode runMode)
535 int ret;
536 int natoms = 0;
537 matrix box;
538 real lambda_q = 0;
539 real lambda_lj = 0;
540 int maxshift_x = 0, maxshift_y = 0;
541 real energy_q, energy_lj, dvdlambda_q, dvdlambda_lj;
542 matrix vir_q, vir_lj;
543 float cycles;
544 int count;
545 gmx_bool bEnerVir = FALSE;
546 gmx_int64_t step;
548 /* This data will only use with PME tuning, i.e. switching PME grids */
549 std::vector<gmx_pme_t *> pmedata;
550 pmedata.push_back(pme);
552 auto pme_pp = gmx_pme_pp_init(cr);
553 //TODO the variable below should be queried from the task assignment info
554 const bool useGpuForPme = (runMode == PmeRunMode::GPU) || (runMode == PmeRunMode::Mixed);
555 if (useGpuForPme)
557 changePinningPolicy(&pme_pp->chargeA, gmx::PinningPolicy::CanBePinned);
558 changePinningPolicy(&pme_pp->x, gmx::PinningPolicy::CanBePinned);
561 init_nrnb(mynrnb);
563 count = 0;
564 do /****** this is a quasi-loop over time steps! */
566 /* The reason for having a loop here is PME grid tuning/switching */
569 /* Domain decomposition */
570 ivec newGridSize;
571 bool atomSetChanged = false;
572 real ewaldcoeff_q = 0, ewaldcoeff_lj = 0;
573 ret = gmx_pme_recv_coeffs_coords(pme_pp.get(),
574 &natoms,
575 box,
576 &maxshift_x, &maxshift_y,
577 &lambda_q, &lambda_lj,
578 &bEnerVir,
579 &step,
580 &newGridSize,
581 &ewaldcoeff_q,
582 &ewaldcoeff_lj,
583 &atomSetChanged);
585 if (ret == pmerecvqxSWITCHGRID)
587 /* Switch the PME grid to newGridSize */
588 pme = gmx_pmeonly_switch(&pmedata, newGridSize, ewaldcoeff_q, ewaldcoeff_lj, cr, ir);
591 if (atomSetChanged)
593 gmx_pme_reinit_atoms(pme, natoms, pme_pp->chargeA.data());
596 if (ret == pmerecvqxRESETCOUNTERS)
598 /* Reset the cycle and flop counters */
599 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, ir, step);
602 while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
604 if (ret == pmerecvqxFINISH)
606 /* We should stop: break out of the loop */
607 break;
610 if (count == 0)
612 wallcycle_start(wcycle, ewcRUN);
613 walltime_accounting_start(walltime_accounting);
616 wallcycle_start(wcycle, ewcPMEMESH);
618 dvdlambda_q = 0;
619 dvdlambda_lj = 0;
620 clear_mat(vir_q);
621 clear_mat(vir_lj);
622 energy_q = 0;
623 energy_lj = 0;
625 // TODO Make a struct of array refs onto these per-atom fields
626 // of pme_pp (maybe box, energy and virial, too; and likewise
627 // from mdatoms for the other call to gmx_pme_do), so we have
628 // fewer lines of code and less parameter passing.
629 const int pmeFlags = GMX_PME_DO_ALL_F | (bEnerVir ? GMX_PME_CALC_ENER_VIR : 0);
630 gmx::ArrayRef<const gmx::RVec> forces;
631 if (useGpuForPme)
633 const bool boxChanged = false;
634 //TODO this should be set properly by gmx_pme_recv_coeffs_coords,
635 // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
636 pme_gpu_prepare_computation(pme, boxChanged, box, wcycle, pmeFlags);
637 pme_gpu_launch_spread(pme, as_rvec_array(pme_pp->x.data()), wcycle);
638 pme_gpu_launch_complex_transforms(pme, wcycle);
639 pme_gpu_launch_gather(pme, wcycle, PmeForceOutputHandling::Set);
640 pme_gpu_wait_for_gpu(pme, wcycle, &forces, vir_q, &energy_q);
642 else
644 gmx_pme_do(pme, 0, natoms, as_rvec_array(pme_pp->x.data()), as_rvec_array(pme_pp->f.data()),
645 pme_pp->chargeA.data(), pme_pp->chargeB.data(),
646 pme_pp->sqrt_c6A.data(), pme_pp->sqrt_c6B.data(),
647 pme_pp->sigmaA.data(), pme_pp->sigmaB.data(), box,
648 cr, maxshift_x, maxshift_y, mynrnb, wcycle,
649 vir_q, vir_lj,
650 &energy_q, &energy_lj, lambda_q, lambda_lj, &dvdlambda_q, &dvdlambda_lj,
651 pmeFlags);
652 forces = pme_pp->f;
655 cycles = wallcycle_stop(wcycle, ewcPMEMESH);
657 gmx_pme_send_force_vir_ener(pme_pp.get(), as_rvec_array(forces.data()),
658 vir_q, energy_q, vir_lj, energy_lj,
659 dvdlambda_q, dvdlambda_lj, cycles);
661 count++;
662 } /***** end of quasi-loop, we stop with the break above */
663 while (TRUE);
665 walltime_accounting_end(walltime_accounting);
667 return 0;