Clean up some mathematical constants
[gromacs.git] / src / mdlib / pme_pp.c
blobc08cdeebf3bf675aea4e02e2e540b33cef9e2411
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
3 *
4 * This source code is part of
5 *
6 * G R O M A C S
7 *
8 * GROningen MAchine for Chemical Simulations
9 *
10 * VERSION 3.2.0
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
33 * And Hey:
34 * GROwing Monsters And Cloning Shrimps
37 #ifdef HAVE_CONFIG_H
38 #include <config.h>
39 #endif
42 #include <stdio.h>
43 #include <string.h>
44 #include <math.h>
45 #include "typedefs.h"
46 #include "smalloc.h"
47 #include "gmx_fatal.h"
48 #include "vec.h"
49 #include "pme.h"
50 #include "network.h"
51 #include "domdec.h"
52 #include "sighandler.h"
54 #ifdef GMX_LIB_MPI
55 #include <mpi.h>
56 #endif
57 #ifdef GMX_THREAD_MPI
58 #include "tmpi.h"
59 #endif
61 #include "mpelogging.h"
63 #define PP_PME_CHARGE (1<<0)
64 #define PP_PME_CHARGEB (1<<1)
65 #define PP_PME_COORD (1<<2)
66 #define PP_PME_FEP (1<<3)
67 #define PP_PME_ENER_VIR (1<<4)
68 #define PP_PME_FINISH (1<<5)
69 #define PP_PME_SWITCH (1<<6)
71 #define PME_PP_SIGSTOP (1<<0)
72 #define PME_PP_SIGSTOPNSS (1<<1)
74 typedef struct gmx_pme_pp {
75 #ifdef GMX_MPI
76 MPI_Comm mpi_comm_mysim;
77 #endif
78 int nnode; /* The number of PP node to communicate with */
79 int *node; /* The PP node ranks */
80 int node_peer; /* The peer PP node rank */
81 int *nat; /* The number of atom for each PP node */
82 int flags_charge; /* The flags sent along with the last charges */
83 real *chargeA;
84 real *chargeB;
85 rvec *x;
86 rvec *f;
87 int nalloc;
88 #ifdef GMX_MPI
89 MPI_Request *req;
90 MPI_Status *stat;
91 #endif
92 } t_gmx_pme_pp;
94 typedef struct gmx_pme_comm_n_box {
95 int natoms;
96 matrix box;
97 int maxshift_x;
98 int maxshift_y;
99 real lambda;
100 int flags;
101 gmx_large_int_t step;
102 ivec grid_size; /* For PME grid tuning */
103 real ewaldcoeff; /* For PME grid tuning */
104 } gmx_pme_comm_n_box_t;
106 typedef struct {
107 matrix vir;
108 real energy;
109 real dvdlambda;
110 float cycles;
111 gmx_stop_cond_t stop_cond;
112 } gmx_pme_comm_vir_ene_t;
117 gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
119 struct gmx_pme_pp *pme_pp;
120 int rank;
122 snew(pme_pp,1);
124 #ifdef GMX_MPI
125 pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
126 MPI_Comm_rank(cr->mpi_comm_mygroup,&rank);
127 get_pme_ddnodes(cr,rank,&pme_pp->nnode,&pme_pp->node,&pme_pp->node_peer);
128 snew(pme_pp->nat,pme_pp->nnode);
129 snew(pme_pp->req,2*pme_pp->nnode);
130 snew(pme_pp->stat,2*pme_pp->nnode);
131 pme_pp->nalloc = 0;
132 pme_pp->flags_charge = 0;
133 #endif
135 return pme_pp;
138 /* This should be faster with a real non-blocking MPI implementation */
139 /* #define GMX_PME_DELAYED_WAIT */
141 static void gmx_pme_send_q_x_wait(gmx_domdec_t *dd)
143 #ifdef GMX_MPI
144 if (dd->nreq_pme) {
145 MPI_Waitall(dd->nreq_pme,dd->req_pme,MPI_STATUSES_IGNORE);
146 dd->nreq_pme = 0;
148 #endif
151 static void gmx_pme_send_q_x(t_commrec *cr, int flags,
152 real *chargeA, real *chargeB,
153 matrix box, rvec *x,
154 real lambda,
155 int maxshift_x, int maxshift_y,
156 gmx_large_int_t step)
158 gmx_domdec_t *dd;
159 gmx_pme_comm_n_box_t *cnb;
160 int n;
162 dd = cr->dd;
163 n = dd->nat_home;
165 if (debug)
166 fprintf(debug,"PP node %d sending to PME node %d: %d%s%s\n",
167 cr->sim_nodeid,dd->pme_nodeid,n,
168 flags & PP_PME_CHARGE ? " charges" : "",
169 flags & PP_PME_COORD ? " coordinates" : "");
171 #ifdef GMX_PME_DELAYED_WAIT
172 /* When can not use cnb until pending communication has finished */
173 gmx_pme_send_x_q_wait(dd);
174 #endif
176 if (dd->pme_receive_vir_ener) {
177 /* Peer PP node: communicate all data */
178 if (dd->cnb == NULL)
179 snew(dd->cnb,1);
180 cnb = dd->cnb;
182 cnb->flags = flags;
183 cnb->natoms = n;
184 cnb->maxshift_x = maxshift_x;
185 cnb->maxshift_y = maxshift_y;
186 cnb->lambda = lambda;
187 cnb->step = step;
188 if (flags & PP_PME_COORD)
189 copy_mat(box,cnb->box);
190 #ifdef GMX_MPI
191 MPI_Isend(cnb,sizeof(*cnb),MPI_BYTE,
192 dd->pme_nodeid,0,cr->mpi_comm_mysim,
193 &dd->req_pme[dd->nreq_pme++]);
194 #endif
195 } else if (flags & PP_PME_CHARGE) {
196 #ifdef GMX_MPI
197 /* Communicate only the number of atoms */
198 MPI_Isend(&n,sizeof(n),MPI_BYTE,
199 dd->pme_nodeid,0,cr->mpi_comm_mysim,
200 &dd->req_pme[dd->nreq_pme++]);
201 #endif
204 #ifdef GMX_MPI
205 if (n > 0) {
206 if (flags & PP_PME_CHARGE) {
207 MPI_Isend(chargeA,n*sizeof(real),MPI_BYTE,
208 dd->pme_nodeid,1,cr->mpi_comm_mysim,
209 &dd->req_pme[dd->nreq_pme++]);
211 if (flags & PP_PME_CHARGEB) {
212 MPI_Isend(chargeB,n*sizeof(real),MPI_BYTE,
213 dd->pme_nodeid,2,cr->mpi_comm_mysim,
214 &dd->req_pme[dd->nreq_pme++]);
216 if (flags & PP_PME_COORD) {
217 MPI_Isend(x[0],n*sizeof(rvec),MPI_BYTE,
218 dd->pme_nodeid,3,cr->mpi_comm_mysim,
219 &dd->req_pme[dd->nreq_pme++]);
223 #ifndef GMX_PME_DELAYED_WAIT
224 /* Wait for the data to arrive */
225 /* We can skip this wait as we are sure x and q will not be modified
226 * before the next call to gmx_pme_send_x_q or gmx_pme_receive_f.
228 gmx_pme_send_q_x_wait(dd);
229 #endif
230 #endif
233 void gmx_pme_send_q(t_commrec *cr,
234 gmx_bool bFreeEnergy, real *chargeA, real *chargeB,
235 int maxshift_x, int maxshift_y)
237 int flags;
239 flags = PP_PME_CHARGE;
240 if (bFreeEnergy)
241 flags |= PP_PME_CHARGEB;
243 gmx_pme_send_q_x(cr,flags,
244 chargeA,chargeB,NULL,NULL,0,maxshift_x,maxshift_y,-1);
247 void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
248 gmx_bool bFreeEnergy, real lambda,
249 gmx_bool bEnerVir,
250 gmx_large_int_t step)
252 int flags;
254 flags = PP_PME_COORD;
255 if (bFreeEnergy)
256 flags |= PP_PME_FEP;
257 if (bEnerVir)
258 flags |= PP_PME_ENER_VIR;
260 gmx_pme_send_q_x(cr,flags,NULL,NULL,box,x,lambda,0,0,step);
263 void gmx_pme_send_finish(t_commrec *cr)
265 int flags;
267 flags = PP_PME_FINISH;
269 gmx_pme_send_q_x(cr,flags,NULL,NULL,NULL,NULL,0,0,0,-1);
272 void gmx_pme_send_switch(t_commrec *cr, ivec grid_size, real ewaldcoeff)
274 #ifdef GMX_MPI
275 gmx_pme_comm_n_box_t cnb;
277 if (cr->dd->pme_receive_vir_ener)
279 cnb.flags = PP_PME_SWITCH;
280 copy_ivec(grid_size,cnb.grid_size);
281 cnb.ewaldcoeff = ewaldcoeff;
283 /* We send this, uncommon, message blocking to simplify the code */
284 MPI_Send(&cnb,sizeof(cnb),MPI_BYTE,
285 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim);
287 #endif
290 int gmx_pme_recv_q_x(struct gmx_pme_pp *pme_pp,
291 real **chargeA, real **chargeB,
292 matrix box, rvec **x,rvec **f,
293 int *maxshift_x, int *maxshift_y,
294 gmx_bool *bFreeEnergy,real *lambda,
295 gmx_bool *bEnerVir,
296 gmx_large_int_t *step,
297 ivec grid_size, real *ewaldcoeff)
299 gmx_pme_comm_n_box_t cnb;
300 int nat=0,q,messages,sender;
301 real *charge_pp;
303 messages = 0;
305 /* avoid compiler warning about unused variable without MPI support */
306 cnb.flags = 0;
307 #ifdef GMX_MPI
308 do {
309 /* Receive the send count, box and time step from the peer PP node */
310 MPI_Recv(&cnb,sizeof(cnb),MPI_BYTE,
311 pme_pp->node_peer,0,
312 pme_pp->mpi_comm_mysim,MPI_STATUS_IGNORE);
314 if (debug)
316 fprintf(debug,"PME only node receiving:%s%s%s%s\n",
317 (cnb.flags & PP_PME_CHARGE) ? " charges" : "",
318 (cnb.flags & PP_PME_COORD ) ? " coordinates" : "",
319 (cnb.flags & PP_PME_FINISH) ? " finish" : "",
320 (cnb.flags & PP_PME_SWITCH) ? " switch" : "");
323 if (cnb.flags & PP_PME_SWITCH)
325 /* Special case, receive the new parameters and return */
326 copy_ivec(cnb.grid_size,grid_size);
327 *ewaldcoeff = cnb.ewaldcoeff;
329 return -2;
332 if (cnb.flags & PP_PME_CHARGE) {
333 /* Receive the send counts from the other PP nodes */
334 for(sender=0; sender<pme_pp->nnode; sender++) {
335 if (pme_pp->node[sender] == pme_pp->node_peer) {
336 pme_pp->nat[sender] = cnb.natoms;
337 } else {
338 MPI_Irecv(&(pme_pp->nat[sender]),sizeof(pme_pp->nat[0]),
339 MPI_BYTE,
340 pme_pp->node[sender],0,
341 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
344 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
345 messages = 0;
347 nat = 0;
348 for(sender=0; sender<pme_pp->nnode; sender++)
349 nat += pme_pp->nat[sender];
351 if (nat > pme_pp->nalloc) {
352 pme_pp->nalloc = over_alloc_dd(nat);
353 srenew(pme_pp->chargeA,pme_pp->nalloc);
354 if (cnb.flags & PP_PME_CHARGEB)
355 srenew(pme_pp->chargeB,pme_pp->nalloc);
356 srenew(pme_pp->x,pme_pp->nalloc);
357 srenew(pme_pp->f,pme_pp->nalloc);
360 /* maxshift is sent when the charges are sent */
361 *maxshift_x = cnb.maxshift_x;
362 *maxshift_y = cnb.maxshift_y;
364 /* Receive the charges in place */
365 for(q=0; q<((cnb.flags & PP_PME_CHARGEB) ? 2 : 1); q++) {
366 if (q == 0)
367 charge_pp = pme_pp->chargeA;
368 else
369 charge_pp = pme_pp->chargeB;
370 nat = 0;
371 for(sender=0; sender<pme_pp->nnode; sender++) {
372 if (pme_pp->nat[sender] > 0) {
373 MPI_Irecv(charge_pp+nat,
374 pme_pp->nat[sender]*sizeof(real),
375 MPI_BYTE,
376 pme_pp->node[sender],1+q,
377 pme_pp->mpi_comm_mysim,
378 &pme_pp->req[messages++]);
379 nat += pme_pp->nat[sender];
380 if (debug)
381 fprintf(debug,"Received from PP node %d: %d "
382 "charges\n",
383 pme_pp->node[sender],pme_pp->nat[sender]);
388 pme_pp->flags_charge = cnb.flags;
391 if (cnb.flags & PP_PME_COORD) {
392 if (!(pme_pp->flags_charge & PP_PME_CHARGE))
393 gmx_incons("PME-only node received coordinates before charges"
396 /* The box, FE flag and lambda are sent along with the coordinates
397 * */
398 copy_mat(cnb.box,box);
399 *bFreeEnergy = (cnb.flags & PP_PME_FEP);
400 *lambda = cnb.lambda;
401 *bEnerVir = (cnb.flags & PP_PME_ENER_VIR);
403 if (*bFreeEnergy && !(pme_pp->flags_charge & PP_PME_CHARGEB))
404 gmx_incons("PME-only node received free energy request, but "
405 "did not receive B-state charges");
407 /* Receive the coordinates in place */
408 nat = 0;
409 for(sender=0; sender<pme_pp->nnode; sender++) {
410 if (pme_pp->nat[sender] > 0) {
411 MPI_Irecv(pme_pp->x[nat],pme_pp->nat[sender]*sizeof(rvec),
412 MPI_BYTE,
413 pme_pp->node[sender],3,
414 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
415 nat += pme_pp->nat[sender];
416 if (debug)
417 fprintf(debug,"Received from PP node %d: %d "
418 "coordinates\n",
419 pme_pp->node[sender],pme_pp->nat[sender]);
424 /* Wait for the coordinates and/or charges to arrive */
425 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
426 messages = 0;
427 } while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
429 *step = cnb.step;
430 #endif
432 *chargeA = pme_pp->chargeA;
433 *chargeB = pme_pp->chargeB;
434 *x = pme_pp->x;
435 *f = pme_pp->f;
438 return ((cnb.flags & PP_PME_FINISH) ? -1 : nat);
441 static void receive_virial_energy(t_commrec *cr,
442 matrix vir,real *energy,real *dvdlambda,
443 float *pme_cycles)
445 gmx_pme_comm_vir_ene_t cve;
447 if (cr->dd->pme_receive_vir_ener) {
448 if (debug)
449 fprintf(debug,
450 "PP node %d receiving from PME node %d: virial and energy\n",
451 cr->sim_nodeid,cr->dd->pme_nodeid);
452 #ifdef GMX_MPI
453 MPI_Recv(&cve,sizeof(cve),MPI_BYTE,cr->dd->pme_nodeid,1,cr->mpi_comm_mysim,
454 MPI_STATUS_IGNORE);
455 #else
456 memset(&cve,0,sizeof(cve));
457 #endif
459 m_add(vir,cve.vir,vir);
460 *energy = cve.energy;
461 *dvdlambda += cve.dvdlambda;
462 *pme_cycles = cve.cycles;
464 if ( cve.stop_cond != gmx_stop_cond_none )
466 gmx_set_stop_condition(cve.stop_cond);
468 } else {
469 *energy = 0;
470 *pme_cycles = 0;
474 void gmx_pme_receive_f(t_commrec *cr,
475 rvec f[], matrix vir,
476 real *energy, real *dvdlambda,
477 float *pme_cycles)
479 int natoms,i;
481 #ifdef GMX_PME_DELAYED_WAIT
482 /* Wait for the x request to finish */
483 gmx_pme_send_q_x_wait(cr->dd);
484 #endif
486 natoms = cr->dd->nat_home;
488 if (natoms > cr->dd->pme_recv_f_alloc)
490 cr->dd->pme_recv_f_alloc = over_alloc_dd(natoms);
491 srenew(cr->dd->pme_recv_f_buf, cr->dd->pme_recv_f_alloc);
494 #ifdef GMX_MPI
495 MPI_Recv(cr->dd->pme_recv_f_buf[0],
496 natoms*sizeof(rvec),MPI_BYTE,
497 cr->dd->pme_nodeid,0,cr->mpi_comm_mysim,
498 MPI_STATUS_IGNORE);
499 #endif
501 for(i=0; i<natoms; i++)
502 rvec_inc(f[i],cr->dd->pme_recv_f_buf[i]);
505 receive_virial_energy(cr,vir,energy,dvdlambda,pme_cycles);
508 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
509 rvec *f, matrix vir,
510 real energy, real dvdlambda,
511 float cycles)
513 gmx_pme_comm_vir_ene_t cve;
514 int messages,ind_start,ind_end,receiver;
516 cve.cycles = cycles;
518 /* Now the evaluated forces have to be transferred to the PP nodes */
519 messages = 0;
520 ind_end = 0;
521 for (receiver=0; receiver<pme_pp->nnode; receiver++) {
522 ind_start = ind_end;
523 ind_end = ind_start + pme_pp->nat[receiver];
524 #ifdef GMX_MPI
525 if (MPI_Isend(f[ind_start],(ind_end-ind_start)*sizeof(rvec),MPI_BYTE,
526 pme_pp->node[receiver],0,
527 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]) != 0)
528 gmx_comm("MPI_Isend failed in do_pmeonly");
529 #endif
532 /* send virial and energy to our last PP node */
533 copy_mat(vir,cve.vir);
534 cve.energy = energy;
535 cve.dvdlambda = dvdlambda;
536 /* check for the signals to send back to a PP node */
537 cve.stop_cond = gmx_get_stop_condition();
539 cve.cycles = cycles;
541 if (debug)
542 fprintf(debug,"PME node sending to PP node %d: virial and energy\n",
543 pme_pp->node_peer);
544 #ifdef GMX_MPI
545 MPI_Isend(&cve,sizeof(cve),MPI_BYTE,
546 pme_pp->node_peer,1,
547 pme_pp->mpi_comm_mysim,&pme_pp->req[messages++]);
549 /* Wait for the forces to arrive */
550 MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
551 #endif