A freeze group can now be allowed to move rigidly in some dimension by using "freezed...
[gromacs/rigid-bodies.git] / src / mdlib / pme_pp.c
blob4d0e3d345393a37997a922fc372741fcb5f8a606
1 /*
2 *
3 * This source code is part of
4 *
5 * G R O M A C S
6 *
7 * GROningen MAchine for Chemical Simulations
8 *
9 * VERSION 3.2.0
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
32 * And Hey:
33 * GROwing Monsters And Cloning Shrimps
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
41 #include <stdio.h>
42 #include <string.h>
43 #include <math.h>
44 #include "typedefs.h"
45 #include "smalloc.h"
46 #include "gmx_fatal.h"
47 #include "vec.h"
48 #include "pme.h"
49 #include "network.h"
50 #include "domdec.h"
51 #include "sighandler.h"
53 #ifdef GMX_LIB_MPI
54 #include <mpi.h>
55 #endif
56 #ifdef GMX_THREADS
57 #include "tmpi.h"
58 #endif
60 #include "mpelogging.h"
62 #define PP_PME_CHARGE (1<<0)
63 #define PP_PME_CHARGEB (1<<1)
64 #define PP_PME_COORD (1<<2)
65 #define PP_PME_FEP (1<<3)
66 #define PP_PME_ENER_VIR (1<<4)
67 #define PP_PME_FINISH (1<<5)
69 #define PME_PP_SIGSTOP (1<<0)
70 #define PME_PP_SIGSTOPNSS (1<<1)
72 typedef struct gmx_pme_pp {
73 #ifdef GMX_MPI
74 MPI_Comm mpi_comm_mysim;
75 #endif
76 int nnode; /* The number of PP node to communicate with */
77 int *node; /* The PP node ranks */
78 int node_peer; /* The peer PP node rank */
79 int *nat; /* The number of atom for each PP node */
80 int flags_charge; /* The flags sent along with the last charges */
81 real *chargeA;
82 real *chargeB;
83 rvec *x;
84 rvec *f;
85 int nalloc;
86 #ifdef GMX_MPI
87 MPI_Request *req;
88 MPI_Status *stat;
89 #endif
90 } t_gmx_pme_pp;
92 typedef struct gmx_pme_comm_n_box {
93 int natoms;
94 matrix box;
95 int maxshift_x;
96 int maxshift_y;
97 real lambda;
98 int flags;
99 gmx_large_int_t step;
100 } gmx_pme_comm_n_box_t;
102 typedef struct {
103 matrix vir;
104 real energy;
105 real dvdlambda;
106 float cycles;
107 gmx_stop_cond_t stop_cond;
108 } gmx_pme_comm_vir_ene_t;
113 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
115 struct gmx_pme_pp *pme_pp;
116 int rank;
118 snew(pme_pp,1);
120 #ifdef GMX_MPI
121 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
122 MPI_Comm_rank(cr->mpi_comm_mygroup,&rank);
123 get_pme_ddnodes(cr,rank,&pme_pp->nnode,&pme_pp->node,&pme_pp->node_peer);
124 snew(pme_pp->nat,pme_pp->nnode);
125 snew(pme_pp->req,2*pme_pp->nnode);
126 snew(pme_pp->stat,2*pme_pp->nnode);
127 pme_pp->nalloc = 0;
128 pme_pp->flags_charge = 0;
129 #endif
131 return pme_pp;
134 /* This should be faster with a real non-blocking MPI implementation */
135 /* #define GMX_PME_DELAYED_WAIT */
137 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
139 #ifdef GMX_MPI
140 if (dd->nreq_pme) {
141 MPI_Waitall(dd->nreq_pme,dd->req_pme,MPI_STATUSES_IGNORE);
142 dd->nreq_pme = 0;
144 #endif
147 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
148 real *chargeA, real *chargeB,
149 matrix box, rvec *x,
150 real lambda,
151 int maxshift_x, int maxshift_y,
152 gmx_large_int_t step)
154 gmx_domdec_t *dd;
155 gmx_pme_comm_n_box_t *cnb;
156 int n;
158 dd = cr->dd;
159 n = dd->nat_home;
161 if (debug)
162 fprintf(debug,"PP node %d sending to PME node %d: %d%s%s\n",
163 cr->sim_nodeid,dd->pme_nodeid,n,
164 flags & PP_PME_CHARGE ? " charges" : "",
165 flags & PP_PME_COORD ? " coordinates" : "");
167 #ifdef GMX_PME_DELAYED_WAIT
168 /* When can not use cnb until pending communication has finished */
169 gmx_pme_send_x_q_wait(dd);
170 #endif
172 if (dd->pme_receive_vir_ener) {
173 /* Peer PP node: communicate all data */
174 if (dd->cnb == NULL)
175 snew(dd->cnb,1);
176 cnb = dd->cnb;
178 cnb->flags = flags;
179 cnb->natoms = n;
180 cnb->maxshift_x = maxshift_x;
181 cnb->maxshift_y = maxshift_y;
182 cnb->lambda = lambda;
183 cnb->step = step;
184 if (flags & PP_PME_COORD)
185 copy_mat(box,cnb->box);
186 #ifdef GMX_MPI
187 MPI_Isend(cnb,sizeof(*cnb),MPI_BYTE,
188 dd->pme_nodeid,0,cr->mpi_comm_mysim,
189 &dd->req_pme[dd->nreq_pme++]);
190 #endif
191 } else if (flags & PP_PME_CHARGE) {
192 #ifdef GMX_MPI
193 /* Communicate only the number of atoms */
194 MPI_Isend(&n,sizeof(n),MPI_BYTE,
195 dd->pme_nodeid,0,cr->mpi_comm_mysim,
196 &dd->req_pme[dd->nreq_pme++]);
197 #endif
200 #ifdef GMX_MPI
201 if (n > 0) {
202 if (flags & PP_PME_CHARGE) {
203 MPI_Isend(chargeA,n*sizeof(real),MPI_BYTE,
204 dd->pme_nodeid,1,cr->mpi_comm_mysim,
205 &dd->req_pme[dd->nreq_pme++]);
207 if (flags & PP_PME_CHARGEB) {
208 MPI_Isend(chargeB,n*sizeof(real),MPI_BYTE,
209 dd->pme_nodeid,2,cr->mpi_comm_mysim,
210 &dd->req_pme[dd->nreq_pme++]);
212 if (flags & PP_PME_COORD) {
213 MPI_Isend(x[0],n*sizeof(rvec),MPI_BYTE,
214 dd->pme_nodeid,3,cr->mpi_comm_mysim,
215 &dd->req_pme[dd->nreq_pme++]);
219 #ifndef GMX_PME_DELAYED_WAIT
220 /* Wait for the data to arrive */
221 /* We can skip this wait as we are sure x and q will not be modified
222 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
224 gmx_pme_send_q_x_wait(dd);
225 #endif
226 #endif
229 void gmx_pme_send_q(t_commrec *cr,
230 gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
231 int maxshift_x, int maxshift_y)
233 int flags;
235 flags = PP_PME_CHARGE;
236 if (bFreeEnergy)
237 flags |= PP_PME_CHARGEB;
239 gmx_pme_send_q_x(cr,flags,
240 chargeA,chargeB,NULL,NULL,0,maxshift_x,maxshift_y,-1);
243 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
244 gmx_bool bFreeEnergy, real lambda,
245 gmx_bool bEnerVir,
246 gmx_large_int_t step)
248 int flags;
250 flags = PP_PME_COORD;
251 if (bFreeEnergy)
252 flags |= PP_PME_FEP;
253 if (bEnerVir)
254 flags |= PP_PME_ENER_VIR;
256 gmx_pme_send_q_x(cr,flags,NULL,NULL,box,x,lambda,0,0,step);
259 void gmx_pme_finish(t_commrec *cr)
261 int flags;
263 flags = PP_PME_FINISH;
265 gmx_pme_send_q_x(cr,flags,NULL,NULL,NULL,NULL,0,0,0,-1);
268 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
269 real **chargeA, real **chargeB,
270 matrix box, rvec **x,rvec **f,
271 int *maxshift_x, int *maxshift_y,
272 gmx_bool *bFreeEnergy,real *lambda,
273 gmx_bool *bEnerVir,
274 gmx_large_int_t *step)
276 gmx_pme_comm_n_box_t cnb;
277 int nat=0,q,messages,sender;
278 real *charge_pp;
280 messages = 0;
282 /* avoid compiler warning about unused variable without MPI support */
283 cnb.flags = 0;
284 #ifdef GMX_MPI
285 do {
286 /* Receive the send count, box and time step from the peer PP node */
287 MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
288 pme_pp->node_peer,0,
289 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);
291 if (debug)
292 fprintf(debug,"PME only node receiving:%s%s%s\n",
293 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
294 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
295 (cnb.flags & PP_PME_FINISH) ? " finish" : "");
297 if (cnb.flags & PP_PME_CHARGE) {
298 /* Receive the send counts from the other PP nodes */
299 for(sender=0; sender<pme_pp->nnode; sender++) {
300 if (pme_pp->node[sender] == pme_pp->node_peer) {
301 pme_pp->nat[sender] = cnb.natoms;
302 } else {
303 MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
304 MPI_BYTE,
305 pme_pp->node[sender],0,
306 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
309 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
310 messages = 0;
312 nat = 0;
313 for(sender=0; sender<pme_pp->nnode; sender++)
314 nat += pme_pp->nat[sender];
316 if (nat > pme_pp->nalloc) {
317 pme_pp->nalloc = over_alloc_dd(nat);
318 srenew(pme_pp->chargeA,pme_pp->nalloc);
319 if (cnb.flags & PP_PME_CHARGEB)
320 srenew(pme_pp->chargeB,pme_pp->nalloc);
321 srenew(pme_pp->x,pme_pp->nalloc);
322 srenew(pme_pp->f,pme_pp->nalloc);
325 /* maxshift is sent when the charges are sent */
326 *maxshift_x = cnb.maxshift_x;
327 *maxshift_y = cnb.maxshift_y;
329 /* Receive the charges in place */
330 for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
331 if (q == 0)
332 charge_pp = pme_pp->chargeA;
333 else
334 charge_pp = pme_pp->chargeB;
335 nat = 0;
336 for(sender=0; sender<pme_pp->nnode; sender++) {
337 if (pme_pp->nat[sender] > 0) {
338 MPI_Irecv(charge_pp+nat,
339 pme_pp->nat[sender]*sizeof(real),
340 MPI_BYTE,
341 pme_pp->node[sender],1+q,
342 pme_pp->mpi_comm_mysim,
343 &pme_pp->req[messages++]);
344 nat += pme_pp->nat[sender];
345 if (debug)
346 fprintf(debug,"Received from PP node %d: %d "
347 "charges\n",
348 pme_pp->node[sender],pme_pp->nat[sender]);
353 pme_pp->flags_charge = cnb.flags;
356 if (cnb.flags & PP_PME_COORD) {
357 if (!(pme_pp->flags_charge & PP_PME_CHARGE))
358 gmx_incons("PME-only node received coordinates before charges"
361 /* The box, FE flag and lambda are sent along with the coordinates
362 * */
363 copy_mat(cnb.box,box);
364 *bFreeEnergy = (cnb.flags & PP_PME_FEP);
365 *lambda = cnb.lambda;
366 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
368 if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
369 gmx_incons("PME-only node received free energy request, but "
370 "did not receive B-state charges");
372 /* Receive the coordinates in place */
373 nat = 0;
374 for(sender=0; sender<pme_pp->nnode; sender++) {
375 if (pme_pp->nat[sender] > 0) {
376 MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
377 MPI_BYTE,
378 pme_pp->node[sender],3,
379 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
380 nat += pme_pp->nat[sender];
381 if (debug)
382 fprintf(debug,"Received from PP node %d: %d "
383 "coordinates\n",
384 pme_pp->node[sender],pme_pp->nat[sender]);
389 /* Wait for the coordinates and/or charges to arrive */
390 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
391 messages = 0;
392 } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
394 *step = cnb.step;
395 #endif
397 *chargeA = pme_pp->chargeA;
398 *chargeB = pme_pp->chargeB;
399 *x = pme_pp->x;
400 *f = pme_pp->f;
403 return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
406 static void receive_virial_energy(t_commrec *cr,
407 matrix vir,real *energy,real *dvdlambda,
408 float *pme_cycles)
410 gmx_pme_comm_vir_ene_t cve;
412 if (cr->dd->pme_receive_vir_ener) {
413 if (debug)
414 fprintf(debug,
415 "PP node %d receiving from PME node %d: virial and energy\n",
416 cr->sim_nodeid,cr->dd->pme_nodeid);
417 #ifdef GMX_MPI
418 MPI_Recv(&cve,sizeof(cve),MPI_BYTE,cr->dd->pme_nodeid,1,cr->mpi_comm_mysim,
419 MPI_STATUS_IGNORE);
420 #else
421 memset(&cve,0,sizeof(cve));
422 #endif
424 m_add(vir,cve.vir,vir);
425 *energy = cve.energy;
426 *dvdlambda += cve.dvdlambda;
427 *pme_cycles = cve.cycles;
429 if ( cve.stop_cond != gmx_stop_cond_none )
431 gmx_set_stop_condition(cve.stop_cond);
433 } else {
434 *energy = 0;
435 *pme_cycles = 0;
439 void gmx_pme_receive_f(t_commrec *cr,
440 rvec f[], matrix vir,
441 real *energy, real *dvdlambda,
442 float *pme_cycles)
444 int natoms,i;
446 #ifdef GMX_PME_DELAYED_WAIT
447 /* Wait for the x request to finish */
448 gmx_pme_send_q_x_wait(cr->dd);
449 #endif
451 natoms = cr->dd->nat_home;
453 if (natoms > cr->dd->pme_recv_f_alloc)
455 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
456 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
459 #ifdef GMX_MPI
460 MPI_Recv(cr->dd->pme_recv_f_buf[0],
461 natoms*sizeof(rvec),MPI_BYTE,
462 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
463 MPI_STATUS_IGNORE);
464 #endif
466 for(i=0; i<natoms; i++)
467 rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);
470 receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
473 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
474 rvec *f, matrix vir,
475 real energy, real dvdlambda,
476 float cycles)
478 gmx_pme_comm_vir_ene_t cve;
479 int messages,ind_start,ind_end,receiver;
481 cve.cycles = cycles;
483 /* Now the evaluated forces have to be transferred to the PP nodes */
484 messages = 0;
485 ind_end = 0;
486 for (receiver=0; receiver<pme_pp->nnode; receiver++) {
487 ind_start = ind_end;
488 ind_end = ind_start + pme_pp->nat[receiver];
489 #ifdef GMX_MPI
490 if (MPI_Isend(f[ind_start],(ind_end-ind_start)*sizeof(rvec),MPI_BYTE,
491 pme_pp->node[receiver],0,
492 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]) != 0)
493 gmx_comm("MPI_Isend failed in do_pmeonly");
494 #endif
497 /* send virial and energy to our last PP node */
498 copy_mat(vir,cve.vir);
499 cve.energy = energy;
500 cve.dvdlambda = dvdlambda;
501 /* check for the signals to send back to a PP node */
502 cve.stop_cond = gmx_get_stop_condition();
504 cve.cycles = cycles;
506 if (debug)
507 fprintf(debug,"PME node sending to PP node %d: virial and energy\n",
508 pme_pp->node_peer);
509 #ifdef GMX_MPI
510 MPI_Isend(&cve,sizeof(cve),MPI_BYTE,
511 pme_pp->node_peer,1,
512 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
514 /* Wait for the forces to arrive */
515 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
516 #endif