3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * GROwing Monsters And Cloning Shrimps
46 #include "gmx_fatal.h"
51 #include "sighandler.h"
60 #include "mpelogging.h"
62 #define PP_PME_CHARGE (1<<0)
63 #define PP_PME_CHARGEB (1<<1)
64 #define PP_PME_COORD (1<<2)
65 #define PP_PME_FEP (1<<3)
66 #define PP_PME_ENER_VIR (1<<4)
67 #define PP_PME_FINISH (1<<5)
69 #define PME_PP_SIGSTOP (1<<0)
70 #define PME_PP_SIGSTOPNSS (1<<1)
72 typedef struct gmx_pme_pp
{
74 MPI_Comm mpi_comm_mysim
;
76 int nnode
; /* The number of PP node to communicate with */
77 int *node
; /* The PP node ranks */
78 int node_peer
; /* The peer PP node rank */
79 int *nat
; /* The number of atom for each PP node */
80 int flags_charge
; /* The flags sent along with the last charges */
92 typedef struct gmx_pme_comm_n_box
{
100 } gmx_pme_comm_n_box_t
;
107 gmx_stop_cond_t stop_cond
;
108 } gmx_pme_comm_vir_ene_t
;
113 gmx_pme_pp_t
gmx_pme_pp_init(t_commrec
*cr
)
115 struct gmx_pme_pp
*pme_pp
;
121 pme_pp
->mpi_comm_mysim
= cr
->mpi_comm_mysim
;
122 MPI_Comm_rank(cr
->mpi_comm_mygroup
,&rank
);
123 get_pme_ddnodes(cr
,rank
,&pme_pp
->nnode
,&pme_pp
->node
,&pme_pp
->node_peer
);
124 snew(pme_pp
->nat
,pme_pp
->nnode
);
125 snew(pme_pp
->req
,2*pme_pp
->nnode
);
126 snew(pme_pp
->stat
,2*pme_pp
->nnode
);
128 pme_pp
->flags_charge
= 0;
134 /* This should be faster with a real non-blocking MPI implementation */
135 /* #define GMX_PME_DELAYED_WAIT */
137 static void gmx_pme_send_q_x_wait(gmx_domdec_t
*dd
)
141 MPI_Waitall(dd
->nreq_pme
,dd
->req_pme
,MPI_STATUSES_IGNORE
);
147 static void gmx_pme_send_q_x(t_commrec
*cr
, int flags
,
148 real
*chargeA
, real
*chargeB
,
151 int maxshift_x
, int maxshift_y
,
152 gmx_large_int_t step
)
155 gmx_pme_comm_n_box_t
*cnb
;
162 fprintf(debug
,"PP node %d sending to PME node %d: %d%s%s\n",
163 cr
->sim_nodeid
,dd
->pme_nodeid
,n
,
164 flags
& PP_PME_CHARGE
? " charges" : "",
165 flags
& PP_PME_COORD
? " coordinates" : "");
167 #ifdef GMX_PME_DELAYED_WAIT
168 /* When can not use cnb until pending communication has finished */
169 gmx_pme_send_x_q_wait(dd
);
172 if (dd
->pme_receive_vir_ener
) {
173 /* Peer PP node: communicate all data */
180 cnb
->maxshift_x
= maxshift_x
;
181 cnb
->maxshift_y
= maxshift_y
;
182 cnb
->lambda
= lambda
;
184 if (flags
& PP_PME_COORD
)
185 copy_mat(box
,cnb
->box
);
187 MPI_Isend(cnb
,sizeof(*cnb
),MPI_BYTE
,
188 dd
->pme_nodeid
,0,cr
->mpi_comm_mysim
,
189 &dd
->req_pme
[dd
->nreq_pme
++]);
191 } else if (flags
& PP_PME_CHARGE
) {
193 /* Communicate only the number of atoms */
194 MPI_Isend(&n
,sizeof(n
),MPI_BYTE
,
195 dd
->pme_nodeid
,0,cr
->mpi_comm_mysim
,
196 &dd
->req_pme
[dd
->nreq_pme
++]);
202 if (flags
& PP_PME_CHARGE
) {
203 MPI_Isend(chargeA
,n
*sizeof(real
),MPI_BYTE
,
204 dd
->pme_nodeid
,1,cr
->mpi_comm_mysim
,
205 &dd
->req_pme
[dd
->nreq_pme
++]);
207 if (flags
& PP_PME_CHARGEB
) {
208 MPI_Isend(chargeB
,n
*sizeof(real
),MPI_BYTE
,
209 dd
->pme_nodeid
,2,cr
->mpi_comm_mysim
,
210 &dd
->req_pme
[dd
->nreq_pme
++]);
212 if (flags
& PP_PME_COORD
) {
213 MPI_Isend(x
[0],n
*sizeof(rvec
),MPI_BYTE
,
214 dd
->pme_nodeid
,3,cr
->mpi_comm_mysim
,
215 &dd
->req_pme
[dd
->nreq_pme
++]);
219 #ifndef GMX_PME_DELAYED_WAIT
220 /* Wait for the data to arrive */
221 /* We can skip this wait as we are sure x and q will not be modified
222 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
224 gmx_pme_send_q_x_wait(dd
);
229 void gmx_pme_send_q(t_commrec
*cr
,
230 gmx_bool bFreeEnergy
, real
*chargeA
, real
*chargeB
,
231 int maxshift_x
, int maxshift_y
)
235 flags
= PP_PME_CHARGE
;
237 flags
|= PP_PME_CHARGEB
;
239 gmx_pme_send_q_x(cr
,flags
,
240 chargeA
,chargeB
,NULL
,NULL
,0,maxshift_x
,maxshift_y
,-1);
243 void gmx_pme_send_x(t_commrec
*cr
, matrix box
, rvec
*x
,
244 gmx_bool bFreeEnergy
, real lambda
,
246 gmx_large_int_t step
)
250 flags
= PP_PME_COORD
;
254 flags
|= PP_PME_ENER_VIR
;
256 gmx_pme_send_q_x(cr
,flags
,NULL
,NULL
,box
,x
,lambda
,0,0,step
);
259 void gmx_pme_finish(t_commrec
*cr
)
263 flags
= PP_PME_FINISH
;
265 gmx_pme_send_q_x(cr
,flags
,NULL
,NULL
,NULL
,NULL
,0,0,0,-1);
268 int gmx_pme_recv_q_x(struct gmx_pme_pp
*pme_pp
,
269 real
**chargeA
, real
**chargeB
,
270 matrix box
, rvec
**x
,rvec
**f
,
271 int *maxshift_x
, int *maxshift_y
,
272 gmx_bool
*bFreeEnergy
,real
*lambda
,
274 gmx_large_int_t
*step
)
276 gmx_pme_comm_n_box_t cnb
;
277 int nat
=0,q
,messages
,sender
;
282 /* avoid compiler warning about unused variable without MPI support */
286 /* Receive the send count, box and time step from the peer PP node */
287 MPI_Recv(&cnb
,sizeof(cnb
),MPI_BYTE
,
289 pme_pp
->mpi_comm_mysim
,MPI_STATUS_IGNORE
);
292 fprintf(debug
,"PME only node receiving:%s%s%s\n",
293 (cnb
.flags
& PP_PME_CHARGE
) ? " charges" : "",
294 (cnb
.flags
& PP_PME_COORD
) ? " coordinates" : "",
295 (cnb
.flags
& PP_PME_FINISH
) ? " finish" : "");
297 if (cnb
.flags
& PP_PME_CHARGE
) {
298 /* Receive the send counts from the other PP nodes */
299 for(sender
=0; sender
<pme_pp
->nnode
; sender
++) {
300 if (pme_pp
->node
[sender
] == pme_pp
->node_peer
) {
301 pme_pp
->nat
[sender
] = cnb
.natoms
;
303 MPI_Irecv(&(pme_pp
->nat
[sender
]),sizeof(pme_pp
->nat
[0]),
305 pme_pp
->node
[sender
],0,
306 pme_pp
->mpi_comm_mysim
,&pme_pp
->req
[messages
++]);
309 MPI_Waitall(messages
, pme_pp
->req
, pme_pp
->stat
);
313 for(sender
=0; sender
<pme_pp
->nnode
; sender
++)
314 nat
+= pme_pp
->nat
[sender
];
316 if (nat
> pme_pp
->nalloc
) {
317 pme_pp
->nalloc
= over_alloc_dd(nat
);
318 srenew(pme_pp
->chargeA
,pme_pp
->nalloc
);
319 if (cnb
.flags
& PP_PME_CHARGEB
)
320 srenew(pme_pp
->chargeB
,pme_pp
->nalloc
);
321 srenew(pme_pp
->x
,pme_pp
->nalloc
);
322 srenew(pme_pp
->f
,pme_pp
->nalloc
);
325 /* maxshift is sent when the charges are sent */
326 *maxshift_x
= cnb
.maxshift_x
;
327 *maxshift_y
= cnb
.maxshift_y
;
329 /* Receive the charges in place */
330 for(q
=0; q
<((cnb
.flags
& PP_PME_CHARGEB
) ? 2 : 1); q
++) {
332 charge_pp
= pme_pp
->chargeA
;
334 charge_pp
= pme_pp
->chargeB
;
336 for(sender
=0; sender
<pme_pp
->nnode
; sender
++) {
337 if (pme_pp
->nat
[sender
] > 0) {
338 MPI_Irecv(charge_pp
+nat
,
339 pme_pp
->nat
[sender
]*sizeof(real
),
341 pme_pp
->node
[sender
],1+q
,
342 pme_pp
->mpi_comm_mysim
,
343 &pme_pp
->req
[messages
++]);
344 nat
+= pme_pp
->nat
[sender
];
346 fprintf(debug
,"Received from PP node %d: %d "
348 pme_pp
->node
[sender
],pme_pp
->nat
[sender
]);
353 pme_pp
->flags_charge
= cnb
.flags
;
356 if (cnb
.flags
& PP_PME_COORD
) {
357 if (!(pme_pp
->flags_charge
& PP_PME_CHARGE
))
358 gmx_incons("PME-only node received coordinates before charges"
361 /* The box, FE flag and lambda are sent along with the coordinates
363 copy_mat(cnb
.box
,box
);
364 *bFreeEnergy
= (cnb
.flags
& PP_PME_FEP
);
365 *lambda
= cnb
.lambda
;
366 *bEnerVir
= (cnb
.flags
& PP_PME_ENER_VIR
);
368 if (*bFreeEnergy
&& !(pme_pp
->flags_charge
& PP_PME_CHARGEB
))
369 gmx_incons("PME-only node received free energy request, but "
370 "did not receive B-state charges");
372 /* Receive the coordinates in place */
374 for(sender
=0; sender
<pme_pp
->nnode
; sender
++) {
375 if (pme_pp
->nat
[sender
] > 0) {
376 MPI_Irecv(pme_pp
->x
[nat
],pme_pp
->nat
[sender
]*sizeof(rvec
),
378 pme_pp
->node
[sender
],3,
379 pme_pp
->mpi_comm_mysim
,&pme_pp
->req
[messages
++]);
380 nat
+= pme_pp
->nat
[sender
];
382 fprintf(debug
,"Received from PP node %d: %d "
384 pme_pp
->node
[sender
],pme_pp
->nat
[sender
]);
389 /* Wait for the coordinates and/or charges to arrive */
390 MPI_Waitall(messages
, pme_pp
->req
, pme_pp
->stat
);
392 } while (!(cnb
.flags
& (PP_PME_COORD
| PP_PME_FINISH
)));
397 *chargeA
= pme_pp
->chargeA
;
398 *chargeB
= pme_pp
->chargeB
;
403 return ((cnb
.flags
& PP_PME_FINISH
) ? -1 : nat
);
406 static void receive_virial_energy(t_commrec
*cr
,
407 matrix vir
,real
*energy
,real
*dvdlambda
,
410 gmx_pme_comm_vir_ene_t cve
;
412 if (cr
->dd
->pme_receive_vir_ener
) {
415 "PP node %d receiving from PME node %d: virial and energy\n",
416 cr
->sim_nodeid
,cr
->dd
->pme_nodeid
);
418 MPI_Recv(&cve
,sizeof(cve
),MPI_BYTE
,cr
->dd
->pme_nodeid
,1,cr
->mpi_comm_mysim
,
421 memset(&cve
,0,sizeof(cve
));
424 m_add(vir
,cve
.vir
,vir
);
425 *energy
= cve
.energy
;
426 *dvdlambda
+= cve
.dvdlambda
;
427 *pme_cycles
= cve
.cycles
;
429 if ( cve
.stop_cond
!= gmx_stop_cond_none
)
431 gmx_set_stop_condition(cve
.stop_cond
);
439 void gmx_pme_receive_f(t_commrec
*cr
,
440 rvec f
[], matrix vir
,
441 real
*energy
, real
*dvdlambda
,
446 #ifdef GMX_PME_DELAYED_WAIT
447 /* Wait for the x request to finish */
448 gmx_pme_send_q_x_wait(cr
->dd
);
451 natoms
= cr
->dd
->nat_home
;
453 if (natoms
> cr
->dd
->pme_recv_f_alloc
)
455 cr
->dd
->pme_recv_f_alloc
= over_alloc_dd(natoms
);
456 srenew(cr
->dd
->pme_recv_f_buf
, cr
->dd
->pme_recv_f_alloc
);
460 MPI_Recv(cr
->dd
->pme_recv_f_buf
[0],
461 natoms
*sizeof(rvec
),MPI_BYTE
,
462 cr
->dd
->pme_nodeid
,0,cr
->mpi_comm_mysim
,
466 for(i
=0; i
<natoms
; i
++)
467 rvec_inc(f
[i
],cr
->dd
->pme_recv_f_buf
[i
]);
470 receive_virial_energy(cr
,vir
,energy
,dvdlambda
,pme_cycles
);
473 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp
*pme_pp
,
475 real energy
, real dvdlambda
,
478 gmx_pme_comm_vir_ene_t cve
;
479 int messages
,ind_start
,ind_end
,receiver
;
483 /* Now the evaluated forces have to be transferred to the PP nodes */
486 for (receiver
=0; receiver
<pme_pp
->nnode
; receiver
++) {
488 ind_end
= ind_start
+ pme_pp
->nat
[receiver
];
490 if (MPI_Isend(f
[ind_start
],(ind_end
-ind_start
)*sizeof(rvec
),MPI_BYTE
,
491 pme_pp
->node
[receiver
],0,
492 pme_pp
->mpi_comm_mysim
,&pme_pp
->req
[messages
++]) != 0)
493 gmx_comm("MPI_Isend failed in do_pmeonly");
497 /* send virial and energy to our last PP node */
498 copy_mat(vir
,cve
.vir
);
500 cve
.dvdlambda
= dvdlambda
;
501 /* check for the signals to send back to a PP node */
502 cve
.stop_cond
= gmx_get_stop_condition();
507 fprintf(debug
,"PME node sending to PP node %d: virial and energy\n",
510 MPI_Isend(&cve
,sizeof(cve
),MPI_BYTE
,
512 pme_pp
->mpi_comm_mysim
,&pme_pp
->req
[messages
++]);
514 /* Wait for the forces to arrive */
515 MPI_Waitall(messages
, pme_pp
->req
, pme_pp
->stat
);