Revertiing my fake merge to make history consistent
[gromacs/adressmacs.git] / src / mdlib / pme_pp.c
blob6787819a729af840e92ba29a2739549309940bfb
1 /*
2 *
3 * This source code is part of
4 *
5 * G R O M A C S
6 *
7 * GROningen MAchine for Chemical Simulations
8 *
9 * VERSION 3.2.0
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
32 * And Hey:
33 * GROwing Monsters And Cloning Shrimps
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
41 #include <stdio.h>
42 #include <string.h>
43 #include <math.h>
44 #include "typedefs.h"
45 #include "smalloc.h"
46 #include "gmx_fatal.h"
47 #include "vec.h"
48 #include "pme.h"
49 #include "network.h"
50 #include "domdec.h"
51 #include "sighandler.h"
53 #ifdef GMX_LIB_MPI
54 #include <mpi.h>
55 #endif
56 #ifdef GMX_THREADS
57 #include "tmpi.h"
58 #endif
60 #include "mpelogging.h"
62 #define PP_PME_CHARGE (1<<0)
63 #define PP_PME_CHARGEB (1<<1)
64 #define PP_PME_COORD (1<<2)
65 #define PP_PME_FEP (1<<3)
66 #define PP_PME_ENER_VIR (1<<4)
67 #define PP_PME_FINISH (1<<5)
69 #define PME_PP_SIGSTOP (1<<0)
70 #define PME_PP_SIGSTOPNSS (1<<1)
72 typedef struct gmx_pme_pp {
73 #ifdef GMX_MPI
74 MPI_Comm mpi_comm_mysim;
75 #endif
76 int nnode; /* The number of PP node to communicate with */
77 int *node; /* The PP node ranks */
78 int node_peer; /* The peer PP node rank */
79 int *nat; /* The number of atom for each PP node */
80 int flags_charge; /* The flags sent along with the last charges */
81 real *chargeA;
82 real *chargeB;
83 rvec *x;
84 rvec *f;
85 int nalloc;
86 #ifdef GMX_MPI
87 MPI_Request *req;
88 MPI_Status *stat;
89 #endif
90 } t_gmx_pme_pp;
92 typedef struct gmx_pme_comm_n_box {
93 int natoms;
94 matrix box;
95 int maxshift_x;
96 int maxshift_y;
97 real lambda;
98 int flags;
99 gmx_large_int_t step;
100 } gmx_pme_comm_n_box_t;
102 typedef struct {
103 matrix vir;
104 real energy;
105 real dvdlambda;
106 float cycles;
107 gmx_stop_cond_t stop_cond;
108 } gmx_pme_comm_vir_ene_t;
113 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
115 struct gmx_pme_pp *pme_pp;
116 int rank;
118 snew(pme_pp,1);
120 #ifdef GMX_MPI
121 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
122 MPI_Comm_rank(cr->mpi_comm_mygroup,&rank);
123 get_pme_ddnodes(cr,rank,&pme_pp->nnode,&pme_pp->node,&pme_pp->node_peer);
124 snew(pme_pp->nat,pme_pp->nnode);
125 snew(pme_pp->req,2*pme_pp->nnode);
126 snew(pme_pp->stat,2*pme_pp->nnode);
127 pme_pp->nalloc = 0;
128 pme_pp->flags_charge = 0;
129 #endif
131 return pme_pp;
134 /* This should be faster with a real non-blocking MPI implementation */
135 /* #define GMX_PME_DELAYED_WAIT */
137 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
139 #ifdef GMX_MPI
140 if (dd->nreq_pme) {
141 MPI_Waitall(dd->nreq_pme,dd->req_pme,MPI_STATUSES_IGNORE);
142 dd->nreq_pme = 0;
144 #endif
147 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
148 real *chargeA, real *chargeB,
149 matrix box, rvec *x,
150 real lambda,
151 int maxshift_x, int maxshift_y,
152 gmx_large_int_t step)
154 gmx_domdec_t *dd;
155 gmx_pme_comm_n_box_t *cnb;
156 int n;
158 dd = cr->dd;
159 n = dd->nat_home;
161 if (debug)
162 fprintf(debug,"PP node %d sending to PME node %d: %d%s%s\n",
163 cr->sim_nodeid,dd->pme_nodeid,n,
164 flags & PP_PME_CHARGE ? " charges" : "",
165 flags & PP_PME_COORD ? " coordinates" : "");
167 #ifdef GMX_PME_DELAYED_WAIT
168 /* When can not use cnb until pending communication has finished */
169 gmx_pme_send_x_q_wait(dd);
170 #endif
172 if (dd->pme_receive_vir_ener) {
173 /* Peer PP node: communicate all data */
174 if (dd->cnb == NULL)
175 snew(dd->cnb,1);
176 cnb = dd->cnb;
178 cnb->flags = flags;
179 cnb->natoms = n;
180 cnb->maxshift_x = maxshift_x;
181 cnb->maxshift_y = maxshift_y;
182 cnb->lambda = lambda;
183 cnb->step = step;
184 if (flags & PP_PME_COORD)
185 copy_mat(box,cnb->box);
186 #ifdef GMX_MPI
187 MPI_Isend(cnb,sizeof(*cnb),MPI_BYTE,
188 dd->pme_nodeid,0,cr->mpi_comm_mysim,
189 &dd->req_pme[dd->nreq_pme++]);
190 #endif
191 } else if (flags & PP_PME_CHARGE) {
192 #ifdef GMX_MPI
193 /* Communicate only the number of atoms */
194 MPI_Isend(&n,sizeof(n),MPI_BYTE,
195 dd->pme_nodeid,0,cr->mpi_comm_mysim,
196 &dd->req_pme[dd->nreq_pme++]);
197 #endif
200 #ifdef GMX_MPI
201 if (flags & PP_PME_CHARGE) {
202 MPI_Isend(chargeA,n*sizeof(real),MPI_BYTE,
203 dd->pme_nodeid,1,cr->mpi_comm_mysim,
204 &dd->req_pme[dd->nreq_pme++]);
206 if (flags & PP_PME_CHARGEB) {
207 MPI_Isend(chargeB,n*sizeof(real),MPI_BYTE,
208 dd->pme_nodeid,2,cr->mpi_comm_mysim,
209 &dd->req_pme[dd->nreq_pme++]);
211 if (flags & PP_PME_COORD) {
212 MPI_Isend(x[0],n*sizeof(rvec),MPI_BYTE,
213 dd->pme_nodeid,3,cr->mpi_comm_mysim,
214 &dd->req_pme[dd->nreq_pme++]);
217 #ifndef GMX_PME_DELAYED_WAIT
218 /* Wait for the data to arrive */
219 /* We can skip this wait as we are sure x and q will not be modified
220 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
222 gmx_pme_send_q_x_wait(dd);
223 #endif
224 #endif
227 void gmx_pme_send_q(t_commrec *cr,
228 bool bFreeEnergy, real *chargeA, real *chargeB,
229 int maxshift_x, int maxshift_y)
231 int flags;
233 flags = PP_PME_CHARGE;
234 if (bFreeEnergy)
235 flags |= PP_PME_CHARGEB;
237 gmx_pme_send_q_x(cr,flags,
238 chargeA,chargeB,NULL,NULL,0,maxshift_x,maxshift_y,-1);
241 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
242 bool bFreeEnergy, real lambda,
243 bool bEnerVir,
244 gmx_large_int_t step)
246 int flags;
248 flags = PP_PME_COORD;
249 if (bFreeEnergy)
250 flags |= PP_PME_FEP;
251 if (bEnerVir)
252 flags |= PP_PME_ENER_VIR;
254 gmx_pme_send_q_x(cr,flags,NULL,NULL,box,x,lambda,0,0,step);
257 void gmx_pme_finish(t_commrec *cr)
259 int flags;
261 flags = PP_PME_FINISH;
263 gmx_pme_send_q_x(cr,flags,NULL,NULL,NULL,NULL,0,0,0,-1);
266 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
267 real **chargeA, real **chargeB,
268 matrix box, rvec **x,rvec **f,
269 int *maxshift_x, int *maxshift_y,
270 bool *bFreeEnergy,real *lambda,
271 bool *bEnerVir,
272 gmx_large_int_t *step)
274 gmx_pme_comm_n_box_t cnb;
275 int nat=0,q,messages,sender;
276 real *charge_pp;
278 messages = 0;
280 /* avoid compiler warning about unused variable without MPI support */
281 cnb.flags = 0;
282 #ifdef GMX_MPI
283 do {
284 /* Receive the send count, box and time step from the peer PP node */
285 MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
286 pme_pp->node_peer,0,
287 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);
289 if (debug)
290 fprintf(debug,"PME only node receiving:%s%s%s\n",
291 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
292 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
293 (cnb.flags & PP_PME_FINISH) ? " finish" : "");
295 if (cnb.flags & PP_PME_CHARGE) {
296 /* Receive the send counts from the other PP nodes */
297 for(sender=0; sender<pme_pp->nnode; sender++) {
298 if (pme_pp->node[sender] == pme_pp->node_peer) {
299 pme_pp->nat[sender] = cnb.natoms;
300 } else {
301 MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
302 MPI_BYTE,
303 pme_pp->node[sender],0,
304 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
307 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
308 messages = 0;
310 nat = 0;
311 for(sender=0; sender<pme_pp->nnode; sender++)
312 nat += pme_pp->nat[sender];
314 if (nat > pme_pp->nalloc) {
315 pme_pp->nalloc = over_alloc_dd(nat);
316 srenew(pme_pp->chargeA,pme_pp->nalloc);
317 if (cnb.flags & PP_PME_CHARGEB)
318 srenew(pme_pp->chargeB,pme_pp->nalloc);
319 srenew(pme_pp->x,pme_pp->nalloc);
320 srenew(pme_pp->f,pme_pp->nalloc);
323 /* maxshift is sent when the charges are sent */
324 *maxshift_x = cnb.maxshift_x;
325 *maxshift_y = cnb.maxshift_y;
327 /* Receive the charges in place */
328 for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
329 if (q == 0)
330 charge_pp = pme_pp->chargeA;
331 else
332 charge_pp = pme_pp->chargeB;
333 nat = 0;
334 for(sender=0; sender<pme_pp->nnode; sender++) {
335 if (pme_pp->nat[sender] > 0) {
336 MPI_Irecv(charge_pp+nat,
337 pme_pp->nat[sender]*sizeof(real),
338 MPI_BYTE,
339 pme_pp->node[sender],1+q,
340 pme_pp->mpi_comm_mysim,
341 &pme_pp->req[messages++]);
342 nat += pme_pp->nat[sender];
343 if (debug)
344 fprintf(debug,"Received from PP node %d: %d "
345 "charges\n",
346 pme_pp->node[sender],pme_pp->nat[sender]);
351 pme_pp->flags_charge = cnb.flags;
354 if (cnb.flags & PP_PME_COORD) {
355 if (!(pme_pp->flags_charge & PP_PME_CHARGE))
356 gmx_incons("PME-only node received coordinates before charges"
359 /* The box, FE flag and lambda are sent along with the coordinates
360 * */
361 copy_mat(cnb.box,box);
362 *bFreeEnergy = (cnb.flags & PP_PME_FEP);
363 *lambda = cnb.lambda;
364 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
366 if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
367 gmx_incons("PME-only node received free energy request, but "
368 "did not receive B-state charges");
370 /* Receive the coordinates in place */
371 nat = 0;
372 for(sender=0; sender<pme_pp->nnode; sender++) {
373 if (pme_pp->nat[sender] > 0) {
374 MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
375 MPI_BYTE,
376 pme_pp->node[sender],3,
377 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
378 nat += pme_pp->nat[sender];
379 if (debug)
380 fprintf(debug,"Received from PP node %d: %d "
381 "coordinates\n",
382 pme_pp->node[sender],pme_pp->nat[sender]);
387 /* Wait for the coordinates and/or charges to arrive */
388 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
389 messages = 0;
390 } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
392 *step = cnb.step;
393 #endif
395 *chargeA = pme_pp->chargeA;
396 *chargeB = pme_pp->chargeB;
397 *x = pme_pp->x;
398 *f = pme_pp->f;
401 return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
404 static void receive_virial_energy(t_commrec *cr,
405 matrix vir,real *energy,real *dvdlambda,
406 float *pme_cycles)
408 gmx_pme_comm_vir_ene_t cve;
410 if (cr->dd->pme_receive_vir_ener) {
411 if (debug)
412 fprintf(debug,
413 "PP node %d receiving from PME node %d: virial and energy\n",
414 cr->sim_nodeid,cr->dd->pme_nodeid);
415 #ifdef GMX_MPI
416 MPI_Recv(&cve,sizeof(cve),MPI_BYTE,cr->dd->pme_nodeid,1,cr->mpi_comm_mysim,
417 MPI_STATUS_IGNORE);
418 #else
419 memset(&cve,0,sizeof(cve));
420 #endif
422 m_add(vir,cve.vir,vir);
423 *energy = cve.energy;
424 *dvdlambda += cve.dvdlambda;
425 *pme_cycles = cve.cycles;
427 if ( cve.stop_cond != gmx_stop_cond_none )
429 gmx_set_stop_condition(cve.stop_cond);
431 } else {
432 *energy = 0;
433 *pme_cycles = 0;
437 void gmx_pme_receive_f(t_commrec *cr,
438 rvec f[], matrix vir,
439 real *energy, real *dvdlambda,
440 float *pme_cycles)
442 int natoms,i;
444 #ifdef GMX_PME_DELAYED_WAIT
445 /* Wait for the x request to finish */
446 gmx_pme_send_q_x_wait(cr->dd);
447 #endif
449 natoms = cr->dd->nat_home;
451 if (natoms > cr->dd->pme_recv_f_alloc)
453 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
454 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
457 #ifdef GMX_MPI
458 MPI_Recv(cr->dd->pme_recv_f_buf[0],
459 natoms*sizeof(rvec),MPI_BYTE,
460 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
461 MPI_STATUS_IGNORE);
462 #endif
464 for(i=0; i<natoms; i++)
465 rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);
468 receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
471 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
472 rvec *f, matrix vir,
473 real energy, real dvdlambda,
474 float cycles)
476 gmx_pme_comm_vir_ene_t cve;
477 int messages,ind_start,ind_end,receiver;
479 cve.cycles = cycles;
481 /* Now the evaluated forces have to be transferred to the PP nodes */
482 messages = 0;
483 ind_end = 0;
484 for (receiver=0; receiver<pme_pp->nnode; receiver++) {
485 ind_start = ind_end;
486 ind_end = ind_start + pme_pp->nat[receiver];
487 #ifdef GMX_MPI
488 if (MPI_Isend(f[ind_start],(ind_end-ind_start)*sizeof(rvec),MPI_BYTE,
489 pme_pp->node[receiver],0,
490 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]) != 0)
491 gmx_comm("MPI_Isend failed in do_pmeonly");
492 #endif
495 /* send virial and energy to our last PP node */
496 copy_mat(vir,cve.vir);
497 cve.energy = energy;
498 cve.dvdlambda = dvdlambda;
499 /* check for the signals to send back to a PP node */
500 cve.stop_cond = gmx_get_stop_condition();
502 cve.cycles = cycles;
504 if (debug)
505 fprintf(debug,"PME node sending to PP node %d: virial and energy\n",
506 pme_pp->node_peer);
507 #ifdef GMX_MPI
508 MPI_Isend(&cve,sizeof(cve),MPI_BYTE,
509 pme_pp->node_peer,1,
510 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
512 /* Wait for the forces to arrive */
513 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
514 #endif