Add function to get residue start and end
[gromacs.git] / src / gromacs / ewald / pme_only.cpp
bloba6e4f23183251b838f5a437b364f881f18e1d74d
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /* IMPORTANT FOR DEVELOPERS:
40 * Triclinic pme stuff isn't entirely trivial, and we've experienced
41 * some bugs during development (many of them due to me). To avoid
42 * this in the future, please check the following things if you make
43 * changes in this file:
45 * 1. You should obtain identical (at least to the PME precision)
46 * energies, forces, and virial for
47 * a rectangular box and a triclinic one where the z (or y) axis is
48 * tilted a whole box side. For instance you could use these boxes:
50 * rectangular triclinic
51 * 2 0 0 2 0 0
52 * 0 2 0 0 2 0
53 * 0 0 6 2 2 6
55 * 2. You should check the energy conservation in a triclinic box.
57 * It might seem an overkill, but better safe than sorry.
58 * /Erik 001109
61 #include "gmxpre.h"
63 #include "pme_only.h"
65 #include "config.h"
67 #include <cassert>
68 #include <cmath>
69 #include <cstdio>
70 #include <cstdlib>
71 #include <cstring>
73 #include <memory>
74 #include <numeric>
75 #include <vector>
77 #include "gromacs/domdec/domdec.h"
78 #include "gromacs/ewald/pme.h"
79 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
80 #include "gromacs/ewald/pme_force_sender_gpu.h"
81 #include "gromacs/fft/parallel_3dfft.h"
82 #include "gromacs/fileio/pdbio.h"
83 #include "gromacs/gmxlib/network.h"
84 #include "gromacs/gmxlib/nrnb.h"
85 #include "gromacs/gpu_utils/device_stream_manager.h"
86 #include "gromacs/gpu_utils/hostallocator.h"
87 #include "gromacs/math/gmxcomplex.h"
88 #include "gromacs/math/units.h"
89 #include "gromacs/math/vec.h"
90 #include "gromacs/mdtypes/commrec.h"
91 #include "gromacs/mdtypes/forceoutput.h"
92 #include "gromacs/mdtypes/inputrec.h"
93 #include "gromacs/mdtypes/simulation_workload.h"
94 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
95 #include "gromacs/timing/cyclecounter.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/utility/fatalerror.h"
98 #include "gromacs/utility/futil.h"
99 #include "gromacs/utility/gmxmpi.h"
100 #include "gromacs/utility/gmxomp.h"
101 #include "gromacs/utility/smalloc.h"
103 #include "pme_gpu_internal.h"
104 #include "pme_output.h"
105 #include "pme_pp_communication.h"
107 /*! \brief environment variable to enable GPU P2P communication */
108 static const bool c_enableGpuPmePpComms =
109 (getenv("GMX_GPU_PME_PP_COMMS") != nullptr) && GMX_THREAD_MPI && (GMX_GPU == GMX_GPU_CUDA);
111 /*! \brief Master PP-PME communication data structure */
112 struct gmx_pme_pp
114 MPI_Comm mpi_comm_mysim; /**< MPI communicator for this simulation */
115 std::vector<PpRanks> ppRanks; /**< The PP partner ranks */
116 int peerRankId; /**< The peer PP rank id */
117 //@{
118 /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks */
119 gmx::PaddedHostVector<real> chargeA;
120 std::vector<real> chargeB;
121 std::vector<real> sqrt_c6A;
122 std::vector<real> sqrt_c6B;
123 std::vector<real> sigmaA;
124 std::vector<real> sigmaB;
125 //@}
126 gmx::HostVector<gmx::RVec> x; /**< Vector of atom coordinates to transfer to PME ranks */
127 std::vector<gmx::RVec> f; /**< Vector of atom forces received from PME ranks */
128 //@{
129 /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
130 std::vector<MPI_Request> req;
131 std::vector<MPI_Status> stat;
132 //@}
134 /*! \brief object for receiving coordinates using communications operating on GPU memory space */
135 std::unique_ptr<gmx::PmeCoordinateReceiverGpu> pmeCoordinateReceiverGpu;
136 /*! \brief object for sending PME force using communications operating on GPU memory space */
137 std::unique_ptr<gmx::PmeForceSenderGpu> pmeForceSenderGpu;
139 /*! \brief whether GPU direct communications are active for PME-PP transfers */
140 bool useGpuDirectComm = false;
143 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
144 static std::unique_ptr<gmx_pme_pp> gmx_pme_pp_init(const t_commrec* cr)
146 auto pme_pp = std::make_unique<gmx_pme_pp>();
148 #if GMX_MPI
149 int rank;
151 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
152 MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
153 auto ppRanks = get_pme_ddranks(cr, rank);
154 pme_pp->ppRanks.reserve(ppRanks.size());
155 for (const auto& ppRankId : ppRanks)
157 pme_pp->ppRanks.push_back({ ppRankId, 0 });
159 // The peer PP rank is the last one.
160 pme_pp->peerRankId = pme_pp->ppRanks.back().rankId;
161 pme_pp->req.resize(eCommType_NR * pme_pp->ppRanks.size());
162 pme_pp->stat.resize(eCommType_NR * pme_pp->ppRanks.size());
163 #else
164 GMX_UNUSED_VALUE(cr);
165 #endif
167 return pme_pp;
170 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle,
171 gmx_walltime_accounting_t walltime_accounting,
172 t_nrnb* nrnb,
173 int64_t step,
174 bool useGpuForPme)
176 /* Reset all the counters related to performance over the run */
177 wallcycle_stop(wcycle, ewcRUN);
178 wallcycle_reset_all(wcycle);
179 *nrnb = { 0 };
180 wallcycle_start(wcycle, ewcRUN);
181 walltime_accounting_reset_time(walltime_accounting, step);
183 if (useGpuForPme)
185 resetGpuProfiler();
189 static gmx_pme_t* gmx_pmeonly_switch(std::vector<gmx_pme_t*>* pmedata,
190 const ivec grid_size,
191 real ewaldcoeff_q,
192 real ewaldcoeff_lj,
193 const t_commrec* cr,
194 const t_inputrec* ir)
196 GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
197 for (auto& pme : *pmedata)
199 GMX_ASSERT(pme, "Bad PME tuning list element pointer");
200 if (gmx_pme_grid_matches(*pme, grid_size))
202 /* Here we have found an existing PME data structure that suits us.
203 * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
204 * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
205 * So, just some grid size updates in the GPU kernel parameters.
206 * TODO: this should be something like gmx_pme_update_split_params()
208 gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
209 return pme;
213 const auto& pme = pmedata->back();
214 gmx_pme_t* newStructure = nullptr;
215 // Copy last structure with new grid params
216 gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
217 pmedata->push_back(newStructure);
218 return newStructure;
221 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
223 * \param[in] pme PME data structure.
224 * \param[in,out] pme_pp PME-PP communication structure.
225 * \param[out] natoms Number of received atoms.
226 * \param[out] box System box, if received.
227 * \param[out] maxshift_x Maximum shift in X direction, if received.
228 * \param[out] maxshift_y Maximum shift in Y direction, if received.
229 * \param[out] lambda_q Free-energy lambda for electrostatics, if received.
230 * \param[out] lambda_lj Free-energy lambda for Lennard-Jones, if received.
231 * \param[out] computeEnergyAndVirial Set to true if this is an energy/virial calculation
232 * step, otherwise set to false.
233 * \param[out] step MD integration step number.
234 * \param[out] grid_size PME grid size, if received.
235 * \param[out] ewaldcoeff_q Ewald cut-off parameter for electrostatics, if received.
236 * \param[out] ewaldcoeff_lj Ewald cut-off parameter for Lennard-Jones, if received.
237 * \param[in] useGpuForPme Flag on whether PME is on GPU.
238 * \param[in] stateGpu GPU state propagator object.
239 * \param[in] runMode PME run mode.
241 * \retval pmerecvqxX All parameters were set, chargeA and chargeB can be NULL.
242 * \retval pmerecvqxFINISH No parameters were set.
243 * \retval pmerecvqxSWITCHGRID Only grid_size and *ewaldcoeff were set.
244 * \retval pmerecvqxRESETCOUNTERS *step was set.
246 static int gmx_pme_recv_coeffs_coords(struct gmx_pme_t* pme,
247 gmx_pme_pp* pme_pp,
248 int* natoms,
249 matrix box,
250 int* maxshift_x,
251 int* maxshift_y,
252 real* lambda_q,
253 real* lambda_lj,
254 gmx_bool* computeEnergyAndVirial,
255 int64_t* step,
256 ivec* grid_size,
257 real* ewaldcoeff_q,
258 real* ewaldcoeff_lj,
259 bool useGpuForPme,
260 gmx::StatePropagatorDataGpu* stateGpu,
261 PmeRunMode gmx_unused runMode)
263 int status = -1;
264 int nat = 0;
266 #if GMX_MPI
267 unsigned int flags = 0;
268 int messages = 0;
269 bool atomSetChanged = false;
273 gmx_pme_comm_n_box_t cnb;
274 cnb.flags = 0;
276 /* Receive the send count, box and time step from the peer PP node */
277 MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE, pme_pp->peerRankId, eCommType_CNB,
278 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);
280 /* We accumulate all received flags */
281 flags |= cnb.flags;
283 *step = cnb.step;
285 if (debug)
287 fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
288 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
289 (cnb.flags & PP_PME_COORD) ? " coordinates" : "",
290 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
291 (cnb.flags & PP_PME_SWITCHGRID) ? " switch grid" : "",
292 (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
295 pme_pp->useGpuDirectComm = ((cnb.flags & PP_PME_GPUCOMMS) != 0);
296 GMX_ASSERT(!pme_pp->useGpuDirectComm || (pme_pp->pmeForceSenderGpu != nullptr),
297 "The use of GPU direct communication for PME-PP is enabled, "
298 "but the PME GPU force reciever object does not exist");
300 if (cnb.flags & PP_PME_FINISH)
302 status = pmerecvqxFINISH;
305 if (cnb.flags & PP_PME_SWITCHGRID)
307 /* Special case, receive the new parameters and return */
308 copy_ivec(cnb.grid_size, *grid_size);
309 *ewaldcoeff_q = cnb.ewaldcoeff_q;
310 *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
312 status = pmerecvqxSWITCHGRID;
315 if (cnb.flags & PP_PME_RESETCOUNTERS)
317 /* Special case, receive the step (set above) and return */
318 status = pmerecvqxRESETCOUNTERS;
321 if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
323 atomSetChanged = true;
325 /* Receive the send counts from the other PP nodes */
326 for (auto& sender : pme_pp->ppRanks)
328 if (sender.rankId == pme_pp->peerRankId)
330 sender.numAtoms = cnb.natoms;
332 else
334 MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms), MPI_BYTE, sender.rankId,
335 eCommType_CNB, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
338 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
339 messages = 0;
341 nat = 0;
342 for (const auto& sender : pme_pp->ppRanks)
344 nat += sender.numAtoms;
347 if (cnb.flags & PP_PME_CHARGE)
349 pme_pp->chargeA.resizeWithPadding(nat);
351 if (cnb.flags & PP_PME_CHARGEB)
353 pme_pp->chargeB.resize(nat);
355 if (cnb.flags & PP_PME_SQRTC6)
357 pme_pp->sqrt_c6A.resize(nat);
359 if (cnb.flags & PP_PME_SQRTC6B)
361 pme_pp->sqrt_c6B.resize(nat);
363 if (cnb.flags & PP_PME_SIGMA)
365 pme_pp->sigmaA.resize(nat);
367 if (cnb.flags & PP_PME_SIGMAB)
369 pme_pp->sigmaB.resize(nat);
371 pme_pp->x.resize(nat);
372 pme_pp->f.resize(nat);
374 /* maxshift is sent when the charges are sent */
375 *maxshift_x = cnb.maxshift_x;
376 *maxshift_y = cnb.maxshift_y;
378 /* Receive the charges in place */
379 for (int q = 0; q < eCommType_NR; q++)
381 real* bufferPtr;
383 if (!(cnb.flags & (PP_PME_CHARGE << q)))
385 continue;
387 switch (q)
389 case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data(); break;
390 case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data(); break;
391 case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
392 case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
393 case eCommType_SigmaA: bufferPtr = pme_pp->sigmaA.data(); break;
394 case eCommType_SigmaB: bufferPtr = pme_pp->sigmaB.data(); break;
395 default: gmx_incons("Wrong eCommType");
397 nat = 0;
398 for (const auto& sender : pme_pp->ppRanks)
400 if (sender.numAtoms > 0)
402 MPI_Irecv(bufferPtr + nat, sender.numAtoms * sizeof(real), MPI_BYTE,
403 sender.rankId, q, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
404 nat += sender.numAtoms;
405 if (debug)
407 fprintf(debug, "Received from PP rank %d: %d %s\n", sender.rankId,
408 sender.numAtoms,
409 (q == eCommType_ChargeA || q == eCommType_ChargeB) ? "charges"
410 : "params");
417 if (cnb.flags & PP_PME_COORD)
419 if (atomSetChanged)
421 gmx_pme_reinit_atoms(pme, nat, pme_pp->chargeA.data());
422 if (useGpuForPme)
424 stateGpu->reinit(nat, nat);
425 pme_gpu_set_device_x(pme, stateGpu->getCoordinates());
427 if (pme_pp->useGpuDirectComm)
429 GMX_ASSERT(runMode == PmeRunMode::GPU,
430 "GPU Direct PME-PP communication has been enabled, "
431 "but PME run mode is not PmeRunMode::GPU\n");
433 // This rank will have its data accessed directly by PP rank, so needs to send the remote addresses.
434 pme_pp->pmeCoordinateReceiverGpu->sendCoordinateBufferAddressToPpRanks(
435 stateGpu->getCoordinates());
436 pme_pp->pmeForceSenderGpu->sendForceBufferAddressToPpRanks(
437 reinterpret_cast<rvec*>(pme_gpu_get_device_f(pme)));
442 /* The box, FE flag and lambda are sent along with the coordinates
443 * */
444 copy_mat(cnb.box, box);
445 *lambda_q = cnb.lambda_q;
446 *lambda_lj = cnb.lambda_lj;
447 *computeEnergyAndVirial = ((cnb.flags & PP_PME_ENER_VIR) != 0U);
448 *step = cnb.step;
450 /* Receive the coordinates in place */
451 nat = 0;
452 for (const auto& sender : pme_pp->ppRanks)
454 if (sender.numAtoms > 0)
456 if (pme_pp->useGpuDirectComm)
458 pme_pp->pmeCoordinateReceiverGpu->launchReceiveCoordinatesFromPpCudaDirect(
459 sender.rankId);
461 else
463 MPI_Irecv(pme_pp->x[nat], sender.numAtoms * sizeof(rvec), MPI_BYTE, sender.rankId,
464 eCommType_COORD, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
466 nat += sender.numAtoms;
467 if (debug)
469 fprintf(debug,
470 "Received from PP rank %d: %d "
471 "coordinates\n",
472 sender.rankId, sender.numAtoms);
477 if (pme_pp->useGpuDirectComm)
479 pme_pp->pmeCoordinateReceiverGpu->enqueueWaitReceiveCoordinatesFromPpCudaDirect();
482 status = pmerecvqxX;
485 /* Wait for the coordinates and/or charges to arrive */
486 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
487 messages = 0;
488 } while (status == -1);
489 #else
490 GMX_UNUSED_VALUE(pme);
491 GMX_UNUSED_VALUE(pme_pp);
492 GMX_UNUSED_VALUE(box);
493 GMX_UNUSED_VALUE(maxshift_x);
494 GMX_UNUSED_VALUE(maxshift_y);
495 GMX_UNUSED_VALUE(lambda_q);
496 GMX_UNUSED_VALUE(lambda_lj);
497 GMX_UNUSED_VALUE(computeEnergyAndVirial);
498 GMX_UNUSED_VALUE(step);
499 GMX_UNUSED_VALUE(grid_size);
500 GMX_UNUSED_VALUE(ewaldcoeff_q);
501 GMX_UNUSED_VALUE(ewaldcoeff_lj);
502 GMX_UNUSED_VALUE(useGpuForPme);
503 GMX_UNUSED_VALUE(stateGpu);
505 status = pmerecvqxX;
506 #endif
508 if (status == pmerecvqxX)
510 *natoms = nat;
513 return status;
516 #if GMX_MPI
517 /*! \brief Send force data to PP ranks */
518 static void sendFToPP(void* sendbuf, PpRanks receiver, gmx_pme_pp* pme_pp, int* messages)
521 if (pme_pp->useGpuDirectComm)
523 GMX_ASSERT((pme_pp->pmeForceSenderGpu != nullptr),
524 "The use of GPU direct communication for PME-PP is enabled, "
525 "but the PME GPU force reciever object does not exist");
527 pme_pp->pmeForceSenderGpu->sendFToPpCudaDirect(receiver.rankId);
529 else
531 // Send using MPI
532 MPI_Isend(sendbuf, receiver.numAtoms * sizeof(rvec), MPI_BYTE, receiver.rankId, 0,
533 pme_pp->mpi_comm_mysim, &pme_pp->req[*messages]);
534 *messages = *messages + 1;
537 #endif
539 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
540 static void gmx_pme_send_force_vir_ener(const gmx_pme_t& pme,
541 gmx_pme_pp* pme_pp,
542 const PmeOutput& output,
543 real dvdlambda_q,
544 real dvdlambda_lj,
545 float cycles)
547 #if GMX_MPI
548 gmx_pme_comm_vir_ene_t cve;
549 int messages, ind_start, ind_end;
550 cve.cycles = cycles;
552 /* Now the evaluated forces have to be transferred to the PP nodes */
553 messages = 0;
554 ind_end = 0;
555 for (const auto& receiver : pme_pp->ppRanks)
557 ind_start = ind_end;
558 ind_end = ind_start + receiver.numAtoms;
559 void* sendbuf = const_cast<void*>(static_cast<const void*>(output.forces_[ind_start]));
560 if (pme_pp->useGpuDirectComm)
562 // Data will be transferred directly from GPU.
563 rvec* d_f = reinterpret_cast<rvec*>(pme_gpu_get_device_f(&pme));
564 sendbuf = reinterpret_cast<void*>(&d_f[ind_start]);
566 sendFToPP(sendbuf, receiver, pme_pp, &messages);
569 /* send virial and energy to our last PP node */
570 copy_mat(output.coulombVirial_, cve.vir_q);
571 copy_mat(output.lennardJonesVirial_, cve.vir_lj);
572 cve.energy_q = output.coulombEnergy_;
573 cve.energy_lj = output.lennardJonesEnergy_;
574 cve.dvdlambda_q = dvdlambda_q;
575 cve.dvdlambda_lj = dvdlambda_lj;
576 /* check for the signals to send back to a PP node */
577 cve.stop_cond = gmx_get_stop_condition();
579 cve.cycles = cycles;
581 if (debug)
583 fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n", pme_pp->peerRankId);
585 MPI_Isend(&cve, sizeof(cve), MPI_BYTE, pme_pp->peerRankId, 1, pme_pp->mpi_comm_mysim,
586 &pme_pp->req[messages++]);
588 /* Wait for the forces to arrive */
589 MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
590 #else
591 GMX_RELEASE_ASSERT(false, "Invalid call to gmx_pme_send_force_vir_ener");
592 GMX_UNUSED_VALUE(pme);
593 GMX_UNUSED_VALUE(pme_pp);
594 GMX_UNUSED_VALUE(output);
595 GMX_UNUSED_VALUE(dvdlambda_q);
596 GMX_UNUSED_VALUE(dvdlambda_lj);
597 GMX_UNUSED_VALUE(cycles);
598 #endif
601 int gmx_pmeonly(struct gmx_pme_t* pme,
602 const t_commrec* cr,
603 t_nrnb* mynrnb,
604 gmx_wallcycle* wcycle,
605 gmx_walltime_accounting_t walltime_accounting,
606 t_inputrec* ir,
607 PmeRunMode runMode,
608 const gmx::DeviceStreamManager* deviceStreamManager)
610 int ret;
611 int natoms = 0;
612 matrix box;
613 real lambda_q = 0;
614 real lambda_lj = 0;
615 int maxshift_x = 0, maxshift_y = 0;
616 real dvdlambda_q, dvdlambda_lj;
617 float cycles;
618 int count;
619 bool computeEnergyAndVirial = false;
620 int64_t step;
622 /* This data will only use with PME tuning, i.e. switching PME grids */
623 std::vector<gmx_pme_t*> pmedata;
624 pmedata.push_back(pme);
626 auto pme_pp = gmx_pme_pp_init(cr);
628 std::unique_ptr<gmx::StatePropagatorDataGpu> stateGpu;
629 // TODO the variable below should be queried from the task assignment info
630 const bool useGpuForPme = (runMode == PmeRunMode::GPU) || (runMode == PmeRunMode::Mixed);
631 if (useGpuForPme)
633 GMX_RELEASE_ASSERT(
634 deviceStreamManager != nullptr,
635 "Device stream manager can not be nullptr when using GPU in PME-only rank.");
636 GMX_RELEASE_ASSERT(deviceStreamManager->streamIsValid(gmx::DeviceStreamType::Pme),
637 "Device stream can not be nullptr when using GPU in PME-only rank");
638 changePinningPolicy(&pme_pp->chargeA, pme_get_pinning_policy());
639 changePinningPolicy(&pme_pp->x, pme_get_pinning_policy());
640 if (c_enableGpuPmePpComms)
642 pme_pp->pmeCoordinateReceiverGpu = std::make_unique<gmx::PmeCoordinateReceiverGpu>(
643 deviceStreamManager->stream(gmx::DeviceStreamType::Pme), pme_pp->mpi_comm_mysim,
644 pme_pp->ppRanks);
645 pme_pp->pmeForceSenderGpu = std::make_unique<gmx::PmeForceSenderGpu>(
646 deviceStreamManager->stream(gmx::DeviceStreamType::Pme), pme_pp->mpi_comm_mysim,
647 pme_pp->ppRanks);
649 // TODO: Special PME-only constructor is used here. There is no mechanism to prevent from using the other constructor here.
650 // This should be made safer.
651 stateGpu = std::make_unique<gmx::StatePropagatorDataGpu>(
652 &deviceStreamManager->stream(gmx::DeviceStreamType::Pme), deviceStreamManager->context(),
653 GpuApiCallBehavior::Async, pme_gpu_get_block_size(pme), wcycle);
656 clear_nrnb(mynrnb);
658 count = 0;
659 do /****** this is a quasi-loop over time steps! */
661 /* The reason for having a loop here is PME grid tuning/switching */
664 /* Domain decomposition */
665 ivec newGridSize;
666 real ewaldcoeff_q = 0, ewaldcoeff_lj = 0;
667 ret = gmx_pme_recv_coeffs_coords(pme, pme_pp.get(), &natoms, box, &maxshift_x, &maxshift_y,
668 &lambda_q, &lambda_lj, &computeEnergyAndVirial, &step,
669 &newGridSize, &ewaldcoeff_q, &ewaldcoeff_lj,
670 useGpuForPme, stateGpu.get(), runMode);
672 if (ret == pmerecvqxSWITCHGRID)
674 /* Switch the PME grid to newGridSize */
675 pme = gmx_pmeonly_switch(&pmedata, newGridSize, ewaldcoeff_q, ewaldcoeff_lj, cr, ir);
678 if (ret == pmerecvqxRESETCOUNTERS)
680 /* Reset the cycle and flop counters */
681 reset_pmeonly_counters(wcycle, walltime_accounting, mynrnb, step, useGpuForPme);
683 } while (ret == pmerecvqxSWITCHGRID || ret == pmerecvqxRESETCOUNTERS);
685 if (ret == pmerecvqxFINISH)
687 /* We should stop: break out of the loop */
688 break;
691 if (count == 0)
693 wallcycle_start(wcycle, ewcRUN);
694 walltime_accounting_start_time(walltime_accounting);
697 wallcycle_start(wcycle, ewcPMEMESH);
699 dvdlambda_q = 0;
700 dvdlambda_lj = 0;
702 // TODO Make a struct of array refs onto these per-atom fields
703 // of pme_pp (maybe box, energy and virial, too; and likewise
704 // from mdatoms for the other call to gmx_pme_do), so we have
705 // fewer lines of code and less parameter passing.
706 gmx::StepWorkload stepWork;
707 stepWork.computeVirial = computeEnergyAndVirial;
708 stepWork.computeEnergy = computeEnergyAndVirial;
709 stepWork.computeForces = true;
710 PmeOutput output;
711 if (useGpuForPme)
713 stepWork.haveDynamicBox = false;
714 stepWork.useGpuPmeFReduction = pme_pp->useGpuDirectComm;
715 // TODO this should be set properly by gmx_pme_recv_coeffs_coords,
716 // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
717 pme_gpu_prepare_computation(pme, box, wcycle, stepWork);
718 if (!pme_pp->useGpuDirectComm)
720 stateGpu->copyCoordinatesToGpu(gmx::ArrayRef<gmx::RVec>(pme_pp->x), gmx::AtomLocality::All);
722 // On the separate PME rank we do not need a synchronizer as we schedule everything in a single stream
723 // TODO: with pme on GPU the receive should make a list of synchronizers and pass it here #3157
724 auto xReadyOnDevice = nullptr;
726 pme_gpu_launch_spread(pme, xReadyOnDevice, wcycle);
727 pme_gpu_launch_complex_transforms(pme, wcycle, stepWork);
728 pme_gpu_launch_gather(pme, wcycle);
729 output = pme_gpu_wait_finish_task(pme, computeEnergyAndVirial, wcycle);
730 pme_gpu_reinit_computation(pme, wcycle);
732 else
734 GMX_ASSERT(pme_pp->x.size() == static_cast<size_t>(natoms),
735 "The coordinate buffer should have size natoms");
737 gmx_pme_do(pme, pme_pp->x, pme_pp->f, pme_pp->chargeA.data(), pme_pp->chargeB.data(),
738 pme_pp->sqrt_c6A.data(), pme_pp->sqrt_c6B.data(), pme_pp->sigmaA.data(),
739 pme_pp->sigmaB.data(), box, cr, maxshift_x, maxshift_y, mynrnb, wcycle,
740 output.coulombVirial_, output.lennardJonesVirial_, &output.coulombEnergy_,
741 &output.lennardJonesEnergy_, lambda_q, lambda_lj, &dvdlambda_q,
742 &dvdlambda_lj, stepWork);
743 output.forces_ = pme_pp->f;
746 cycles = wallcycle_stop(wcycle, ewcPMEMESH);
747 gmx_pme_send_force_vir_ener(*pme, pme_pp.get(), output, dvdlambda_q, dvdlambda_lj, cycles);
749 count++;
750 } /***** end of quasi-loop, we stop with the break above */
751 while (TRUE);
753 walltime_accounting_end_time(walltime_accounting);
755 return 0;