2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
44 #include "gromacs/domdec/domdec_struct.h"
45 #include "gromacs/domdec/ga2la.h"
46 #include "gromacs/fileio/confio.h"
47 #include "gromacs/gmxlib/network.h"
48 #include "gromacs/math/functions.h"
49 #include "gromacs/math/utilities.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/mdtypes/commrec.h"
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/mdtypes/mdatom.h"
54 #include "gromacs/pbcutil/pbc.h"
55 #include "gromacs/pulling/pull.h"
56 #include "gromacs/pulling/pull_internal.h"
57 #include "gromacs/utility/fatalerror.h"
58 #include "gromacs/utility/futil.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/real.h"
61 #include "gromacs/utility/smalloc.h"
65 // Helper function to deduce MPI datatype from the type of data
66 gmx_unused
static MPI_Datatype
mpiDatatype(const float gmx_unused
*data
)
71 // Helper function to deduce MPI datatype from the type of data
72 gmx_unused
static MPI_Datatype
mpiDatatype(const double gmx_unused
*data
)
80 // Helper function; note that gmx_sum(d) should actually be templated
81 gmx_unused
static void gmxAllReduce(int n
, real
*data
, const t_commrec
*cr
)
87 // Helper function; note that gmx_sum(d) should actually be templated
88 gmx_unused
static void gmxAllReduce(int n
, double *data
, const t_commrec
*cr
)
90 gmx_sumd(n
, data
, cr
);
93 // Reduce data of n elements over all ranks currently participating in pull
95 static void pullAllReduce(const t_commrec
*cr
,
100 if (cr
!= nullptr && PAR(cr
))
102 if (comm
->bParticipateAll
)
104 /* Sum the contributions over all DD ranks */
105 gmxAllReduce(n
, data
, cr
);
109 /* Separate branch because gmx_sum uses cr->mpi_comm_mygroup */
111 #if MPI_IN_PLACE_EXISTS
112 MPI_Allreduce(MPI_IN_PLACE
, data
, n
, mpiDatatype(data
), MPI_SUM
,
115 std::vector
<T
> buf(n
);
117 MPI_Allreduce(data
, buf
, n
, mpiDatatype(data
), MPI_SUM
,
120 /* Copy the result from the buffer to the input/output data */
121 for (int i
= 0; i
< n
; i
++)
127 gmx_incons("comm->bParticipateAll=FALSE without GMX_MPI");
133 static void pull_set_pbcatom(const t_commrec
*cr
, pull_group_work_t
*pgrp
,
139 if (cr
!= nullptr && DOMAINDECOMP(cr
))
141 if (ga2la_get_home(cr
->dd
->ga2la
, pgrp
->params
.pbcatom
, &a
))
143 copy_rvec(x
[a
], x_pbc
);
152 copy_rvec(x
[pgrp
->params
.pbcatom
], x_pbc
);
156 static void pull_set_pbcatoms(const t_commrec
*cr
, struct pull_t
*pull
,
163 for (g
= 0; g
< pull
->ngroup
; g
++)
165 if (!pull
->group
[g
].bCalcCOM
|| pull
->group
[g
].params
.pbcatom
== -1)
167 clear_rvec(x_pbc
[g
]);
171 pull_set_pbcatom(cr
, &pull
->group
[g
], x
, x_pbc
[g
]);
176 if (cr
&& PAR(cr
) && n
> 0)
178 /* Sum over participating ranks to get x_pbc from the home ranks.
179 * This can be very expensive at high parallelization, so we only
180 * do this after each DD repartitioning.
182 pullAllReduce(cr
, &pull
->comm
, pull
->ngroup
*DIM
, x_pbc
[0]);
186 static void make_cyl_refgrps(const t_commrec
*cr
,
193 /* The size and stride per coord for the reduction buffer */
194 const int stride
= 9;
195 int i
, ii
, m
, start
, end
;
199 gmx_ga2la_t
*ga2la
= nullptr;
203 if (comm
->dbuf_cyl
== nullptr)
205 snew(comm
->dbuf_cyl
, pull
->coord
.size()*stride
);
208 if (cr
&& DOMAINDECOMP(cr
))
210 ga2la
= cr
->dd
->ga2la
;
216 inv_cyl_r2
= 1.0/gmx::square(pull
->params
.cylinder_r
);
218 /* loop over all groups to make a reference group for each*/
219 for (size_t c
= 0; c
< pull
->coord
.size(); c
++)
221 pull_coord_work_t
*pcrd
;
222 double sum_a
, wmass
, wwmass
;
223 dvec radf_fac0
, radf_fac1
;
225 pcrd
= &pull
->coord
[c
];
230 clear_dvec(radf_fac0
);
231 clear_dvec(radf_fac1
);
233 if (pcrd
->params
.eGeom
== epullgCYL
)
235 pull_group_work_t
*pref
, *pgrp
, *pdyna
;
237 /* pref will be the same group for all pull coordinates */
238 pref
= &pull
->group
[pcrd
->params
.group
[0]];
239 pgrp
= &pull
->group
[pcrd
->params
.group
[1]];
240 pdyna
= &pull
->dyna
[c
];
241 copy_dvec_to_rvec(pcrd
->spatialData
.vec
, dir
);
244 /* We calculate distances with respect to the reference location
245 * of this cylinder group (g_x), which we already have now since
246 * we reduced the other group COM over the ranks. This resolves
247 * any PBC issues and we don't need to use a PBC-atom here.
249 if (pcrd
->params
.rate
!= 0)
251 /* With rate=0, value_ref is set initially */
252 pcrd
->value_ref
= pcrd
->params
.init
+ pcrd
->params
.rate
*t
;
254 for (m
= 0; m
< DIM
; m
++)
256 g_x
[m
] = pgrp
->x
[m
] - pcrd
->spatialData
.vec
[m
]*pcrd
->value_ref
;
259 /* loop over all atoms in the main ref group */
260 for (i
= 0; i
< pref
->params
.nat
; i
++)
262 ii
= pref
->params
.ind
[i
];
265 if (!ga2la_get_home(ga2la
, pref
->params
.ind
[i
], &ii
))
270 if (ii
>= start
&& ii
< end
)
272 double dr2
, dr2_rel
, inp
;
275 pbc_dx_aiuc(pbc
, x
[ii
], g_x
, dx
);
276 inp
= iprod(dir
, dx
);
278 for (m
= 0; m
< DIM
; m
++)
280 /* Determine the radial components */
281 dr
[m
] = dx
[m
] - inp
*dir
[m
];
284 dr2_rel
= dr2
*inv_cyl_r2
;
288 double mass
, weight
, dweight_r
;
291 /* add to index, to sum of COM, to weight array */
292 if (pdyna
->nat_loc
>= pdyna
->nalloc_loc
)
294 pdyna
->nalloc_loc
= over_alloc_large(pdyna
->nat_loc
+1);
295 srenew(pdyna
->ind_loc
, pdyna
->nalloc_loc
);
296 srenew(pdyna
->weight_loc
, pdyna
->nalloc_loc
);
297 srenew(pdyna
->mdw
, pdyna
->nalloc_loc
);
298 srenew(pdyna
->dv
, pdyna
->nalloc_loc
);
300 pdyna
->ind_loc
[pdyna
->nat_loc
] = ii
;
302 mass
= md
->massT
[ii
];
303 /* The radial weight function is 1-2x^2+x^4,
304 * where x=r/cylinder_r. Since this function depends
305 * on the radial component, we also get radial forces
308 weight
= 1 + (-2 + dr2_rel
)*dr2_rel
;
309 dweight_r
= (-4 + 4*dr2_rel
)*inv_cyl_r2
;
310 pdyna
->weight_loc
[pdyna
->nat_loc
] = weight
;
311 sum_a
+= mass
*weight
*inp
;
312 wmass
+= mass
*weight
;
313 wwmass
+= mass
*weight
*weight
;
314 dsvmul(mass
*dweight_r
, dr
, mdw
);
315 copy_dvec(mdw
, pdyna
->mdw
[pdyna
->nat_loc
]);
316 /* Currently we only have the axial component of the
317 * distance (inp) up to an unkown offset. We add this
318 * offset after the reduction needs to determine the
319 * COM of the cylinder group.
321 pdyna
->dv
[pdyna
->nat_loc
] = inp
;
322 for (m
= 0; m
< DIM
; m
++)
324 radf_fac0
[m
] += mdw
[m
];
325 radf_fac1
[m
] += mdw
[m
]*inp
;
332 comm
->dbuf_cyl
[c
*stride
+0] = wmass
;
333 comm
->dbuf_cyl
[c
*stride
+1] = wwmass
;
334 comm
->dbuf_cyl
[c
*stride
+2] = sum_a
;
335 comm
->dbuf_cyl
[c
*stride
+3] = radf_fac0
[XX
];
336 comm
->dbuf_cyl
[c
*stride
+4] = radf_fac0
[YY
];
337 comm
->dbuf_cyl
[c
*stride
+5] = radf_fac0
[ZZ
];
338 comm
->dbuf_cyl
[c
*stride
+6] = radf_fac1
[XX
];
339 comm
->dbuf_cyl
[c
*stride
+7] = radf_fac1
[YY
];
340 comm
->dbuf_cyl
[c
*stride
+8] = radf_fac1
[ZZ
];
343 if (cr
!= nullptr && PAR(cr
))
345 /* Sum the contributions over the ranks */
346 pullAllReduce(cr
, comm
, pull
->coord
.size()*stride
, comm
->dbuf_cyl
);
349 for (size_t c
= 0; c
< pull
->coord
.size(); c
++)
351 pull_coord_work_t
*pcrd
;
353 pcrd
= &pull
->coord
[c
];
355 if (pcrd
->params
.eGeom
== epullgCYL
)
357 pull_group_work_t
*pdyna
= &pull
->dyna
[c
];
358 pull_group_work_t
*pgrp
= &pull
->group
[pcrd
->params
.group
[1]];
359 PullCoordSpatialData
&spatialData
= pcrd
->spatialData
;
361 double wmass
= comm
->dbuf_cyl
[c
*stride
+0];
362 double wwmass
= comm
->dbuf_cyl
[c
*stride
+1];
363 pdyna
->mwscale
= 1.0/wmass
;
364 /* Cylinder pulling can't be used with constraints, but we set
365 * wscale and invtm anyhow, in case someone would like to use them.
367 pdyna
->wscale
= wmass
/wwmass
;
368 pdyna
->invtm
= wwmass
/(wmass
*wmass
);
370 /* We store the deviation of the COM from the reference location
371 * used above, since we need it when we apply the radial forces
372 * to the atoms in the cylinder group.
374 spatialData
.cyl_dev
= 0;
375 for (m
= 0; m
< DIM
; m
++)
377 g_x
[m
] = pgrp
->x
[m
] - spatialData
.vec
[m
]*pcrd
->value_ref
;
378 double dist
= -spatialData
.vec
[m
]*comm
->dbuf_cyl
[c
*stride
+2]*pdyna
->mwscale
;
379 pdyna
->x
[m
] = g_x
[m
] - dist
;
380 spatialData
.cyl_dev
+= dist
;
382 /* Now we know the exact COM of the cylinder reference group,
383 * we can determine the radial force factor (ffrad) that when
384 * multiplied with the axial pull force will give the radial
385 * force on the pulled (non-cylinder) group.
387 for (m
= 0; m
< DIM
; m
++)
389 spatialData
.ffrad
[m
] = (comm
->dbuf_cyl
[c
*stride
+6+m
] +
390 comm
->dbuf_cyl
[c
*stride
+3+m
]*spatialData
.cyl_dev
)/wmass
;
395 fprintf(debug
, "Pull cylinder group %zu:%8.3f%8.3f%8.3f m:%8.3f\n",
396 c
, pdyna
->x
[0], pdyna
->x
[1],
397 pdyna
->x
[2], 1.0/pdyna
->invtm
);
398 fprintf(debug
, "ffrad %8.3f %8.3f %8.3f\n",
399 spatialData
.ffrad
[XX
], spatialData
.ffrad
[YY
], spatialData
.ffrad
[ZZ
]);
405 static double atan2_0_2pi(double y
, double x
)
417 static void sum_com_part(const pull_group_work_t
*pgrp
,
418 int ind_start
, int ind_end
,
419 const rvec
*x
, const rvec
*xp
,
423 pull_sum_com_t
*sum_com
)
427 dvec sum_wmx
= { 0, 0, 0 };
428 dvec sum_wmxp
= { 0, 0, 0 };
430 for (int i
= ind_start
; i
< ind_end
; i
++)
432 int ii
= pgrp
->ind_loc
[i
];
434 if (pgrp
->weight_loc
== nullptr)
443 w
= pgrp
->weight_loc
[i
];
448 if (pgrp
->epgrppbc
== epgrppbcNONE
)
450 /* Plain COM: sum the coordinates */
451 for (int d
= 0; d
< DIM
; d
++)
453 sum_wmx
[d
] += wm
*x
[ii
][d
];
457 for (int d
= 0; d
< DIM
; d
++)
459 sum_wmxp
[d
] += wm
*xp
[ii
][d
];
467 /* Sum the difference with the reference atom */
468 pbc_dx(pbc
, x
[ii
], x_pbc
, dx
);
469 for (int d
= 0; d
< DIM
; d
++)
471 sum_wmx
[d
] += wm
*dx
[d
];
475 /* For xp add the difference between xp and x to dx,
476 * such that we use the same periodic image,
477 * also when xp has a large displacement.
479 for (int d
= 0; d
< DIM
; d
++)
481 sum_wmxp
[d
] += wm
*(dx
[d
] + xp
[ii
][d
] - x
[ii
][d
]);
487 sum_com
->sum_wm
= sum_wm
;
488 sum_com
->sum_wwm
= sum_wwm
;
489 copy_dvec(sum_wmx
, sum_com
->sum_wmx
);
492 copy_dvec(sum_wmxp
, sum_com
->sum_wmxp
);
496 static void sum_com_part_cosweight(const pull_group_work_t
*pgrp
,
497 int ind_start
, int ind_end
,
498 int cosdim
, real twopi_box
,
499 const rvec
*x
, const rvec
*xp
,
501 pull_sum_com_t
*sum_com
)
503 /* Cosine weighting geometry */
512 for (int i
= ind_start
; i
< ind_end
; i
++)
514 int ii
= pgrp
->ind_loc
[i
];
516 /* Determine cos and sin sums */
517 real cw
= std::cos(x
[ii
][cosdim
]*twopi_box
);
518 real sw
= std::sin(x
[ii
][cosdim
]*twopi_box
);
519 sum_cm
+= static_cast<double>(cw
*m
);
520 sum_sm
+= static_cast<double>(sw
*m
);
521 sum_ccm
+= static_cast<double>(cw
*cw
*m
);
522 sum_csm
+= static_cast<double>(cw
*sw
*m
);
523 sum_ssm
+= static_cast<double>(sw
*sw
*m
);
527 real cw
= std::cos(xp
[ii
][cosdim
]*twopi_box
);
528 real sw
= std::sin(xp
[ii
][cosdim
]*twopi_box
);
529 sum_cmp
+= static_cast<double>(cw
*m
);
530 sum_smp
+= static_cast<double>(sw
*m
);
534 sum_com
->sum_cm
= sum_cm
;
535 sum_com
->sum_sm
= sum_sm
;
536 sum_com
->sum_ccm
= sum_ccm
;
537 sum_com
->sum_csm
= sum_csm
;
538 sum_com
->sum_ssm
= sum_ssm
;
539 sum_com
->sum_cmp
= sum_cmp
;
540 sum_com
->sum_smp
= sum_smp
;
543 /* calculates center of mass of selection index from all coordinates x */
544 void pull_calc_coms(const t_commrec
*cr
,
549 const rvec x
[], rvec
*xp
)
557 if (comm
->rbuf
== nullptr)
559 snew(comm
->rbuf
, pull
->ngroup
);
561 if (comm
->dbuf
== nullptr)
563 snew(comm
->dbuf
, 3*pull
->ngroup
);
566 if (pull
->bRefAt
&& pull
->bSetPBCatoms
)
568 pull_set_pbcatoms(cr
, pull
, x
, comm
->rbuf
);
570 if (cr
!= nullptr && DOMAINDECOMP(cr
))
572 /* We can keep these PBC reference coordinates fixed for nstlist
573 * steps, since atoms won't jump over PBC.
574 * This avoids a global reduction at the next nstlist-1 steps.
575 * Note that the exact values of the pbc reference coordinates
576 * are irrelevant, as long all atoms in the group are within
577 * half a box distance of the reference coordinate.
579 pull
->bSetPBCatoms
= FALSE
;
583 if (pull
->cosdim
>= 0)
587 assert(pull
->npbcdim
<= DIM
);
589 for (m
= pull
->cosdim
+1; m
< pull
->npbcdim
; m
++)
591 if (pbc
->box
[m
][pull
->cosdim
] != 0)
593 gmx_fatal(FARGS
, "Can not do cosine weighting for trilinic dimensions");
596 twopi_box
= 2.0*M_PI
/pbc
->box
[pull
->cosdim
][pull
->cosdim
];
599 for (g
= 0; g
< pull
->ngroup
; g
++)
601 pull_group_work_t
*pgrp
;
603 pgrp
= &pull
->group
[g
];
607 if (pgrp
->epgrppbc
!= epgrppbcCOS
)
609 rvec x_pbc
= { 0, 0, 0 };
611 if (pgrp
->epgrppbc
== epgrppbcREFAT
)
613 /* Set the pbc atom */
614 copy_rvec(comm
->rbuf
[g
], x_pbc
);
617 /* The final sums should end up in sum_com[0] */
618 pull_sum_com_t
*sum_com
= &pull
->sum_com
[0];
620 /* If we have a single-atom group the mass is irrelevant, so
621 * we can remove the mass factor to avoid division by zero.
622 * Note that with constraint pulling the mass does matter, but
623 * in that case a check group mass != 0 has been done before.
625 if (pgrp
->params
.nat
== 1 &&
626 pgrp
->nat_loc
== 1 &&
627 md
->massT
[pgrp
->ind_loc
[0]] == 0)
629 GMX_ASSERT(xp
== NULL
, "We should not have groups with zero mass with constraints, i.e. xp!=NULL");
631 /* Copy the single atom coordinate */
632 for (int d
= 0; d
< DIM
; d
++)
634 sum_com
->sum_wmx
[d
] = x
[pgrp
->ind_loc
[0]][d
];
636 /* Set all mass factors to 1 to get the correct COM */
638 sum_com
->sum_wwm
= 1;
640 else if (pgrp
->nat_loc
<= c_pullMaxNumLocalAtomsSingleThreaded
)
642 sum_com_part(pgrp
, 0, pgrp
->nat_loc
,
649 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
650 for (int t
= 0; t
< pull
->nthreads
; t
++)
652 int ind_start
= (pgrp
->nat_loc
*(t
+ 0))/pull
->nthreads
;
653 int ind_end
= (pgrp
->nat_loc
*(t
+ 1))/pull
->nthreads
;
654 sum_com_part(pgrp
, ind_start
, ind_end
,
660 /* Reduce the thread contributions to sum_com[0] */
661 for (int t
= 1; t
< pull
->nthreads
; t
++)
663 sum_com
->sum_wm
+= pull
->sum_com
[t
].sum_wm
;
664 sum_com
->sum_wwm
+= pull
->sum_com
[t
].sum_wwm
;
665 dvec_inc(sum_com
->sum_wmx
, pull
->sum_com
[t
].sum_wmx
);
666 dvec_inc(sum_com
->sum_wmxp
, pull
->sum_com
[t
].sum_wmxp
);
670 if (pgrp
->weight_loc
== nullptr)
672 sum_com
->sum_wwm
= sum_com
->sum_wm
;
675 /* Copy local sums to a buffer for global summing */
676 copy_dvec(sum_com
->sum_wmx
, comm
->dbuf
[g
*3]);
677 copy_dvec(sum_com
->sum_wmxp
, comm
->dbuf
[g
*3 + 1]);
678 comm
->dbuf
[g
*3 + 2][0] = sum_com
->sum_wm
;
679 comm
->dbuf
[g
*3 + 2][1] = sum_com
->sum_wwm
;
680 comm
->dbuf
[g
*3 + 2][2] = 0;
684 /* Cosine weighting geometry.
685 * This uses a slab of the system, thus we always have many
686 * atoms in the pull groups. Therefore, always use threads.
688 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
689 for (int t
= 0; t
< pull
->nthreads
; t
++)
691 int ind_start
= (pgrp
->nat_loc
*(t
+ 0))/pull
->nthreads
;
692 int ind_end
= (pgrp
->nat_loc
*(t
+ 1))/pull
->nthreads
;
693 sum_com_part_cosweight(pgrp
, ind_start
, ind_end
,
694 pull
->cosdim
, twopi_box
,
699 /* Reduce the thread contributions to sum_com[0] */
700 pull_sum_com_t
*sum_com
= &pull
->sum_com
[0];
701 for (int t
= 1; t
< pull
->nthreads
; t
++)
703 sum_com
->sum_cm
+= pull
->sum_com
[t
].sum_cm
;
704 sum_com
->sum_sm
+= pull
->sum_com
[t
].sum_sm
;
705 sum_com
->sum_ccm
+= pull
->sum_com
[t
].sum_ccm
;
706 sum_com
->sum_csm
+= pull
->sum_com
[t
].sum_csm
;
707 sum_com
->sum_ssm
+= pull
->sum_com
[t
].sum_ssm
;
708 sum_com
->sum_cmp
+= pull
->sum_com
[t
].sum_cmp
;
709 sum_com
->sum_smp
+= pull
->sum_com
[t
].sum_smp
;
712 /* Copy local sums to a buffer for global summing */
713 comm
->dbuf
[g
*3 ][0] = sum_com
->sum_cm
;
714 comm
->dbuf
[g
*3 ][1] = sum_com
->sum_sm
;
715 comm
->dbuf
[g
*3 ][2] = 0;
716 comm
->dbuf
[g
*3 + 1][0] = sum_com
->sum_ccm
;
717 comm
->dbuf
[g
*3 + 1][1] = sum_com
->sum_csm
;
718 comm
->dbuf
[g
*3 + 1][2] = sum_com
->sum_ssm
;
719 comm
->dbuf
[g
*3 + 2][0] = sum_com
->sum_cmp
;
720 comm
->dbuf
[g
*3 + 2][1] = sum_com
->sum_smp
;
721 comm
->dbuf
[g
*3 + 2][2] = 0;
726 pullAllReduce(cr
, comm
, pull
->ngroup
*3*DIM
, comm
->dbuf
[0]);
728 for (g
= 0; g
< pull
->ngroup
; g
++)
730 pull_group_work_t
*pgrp
;
732 pgrp
= &pull
->group
[g
];
735 GMX_ASSERT(pgrp
->params
.nat
> 0, "Normal pull groups should have atoms, only group 0, which should have bCalcCom=FALSE has nat=0");
737 if (pgrp
->epgrppbc
!= epgrppbcCOS
)
739 double wmass
, wwmass
;
742 /* Determine the inverse mass */
743 wmass
= comm
->dbuf
[g
*3+2][0];
744 wwmass
= comm
->dbuf
[g
*3+2][1];
745 pgrp
->mwscale
= 1.0/wmass
;
746 /* invtm==0 signals a frozen group, so then we should keep it zero */
747 if (pgrp
->invtm
!= 0)
749 pgrp
->wscale
= wmass
/wwmass
;
750 pgrp
->invtm
= wwmass
/(wmass
*wmass
);
752 /* Divide by the total mass */
753 for (m
= 0; m
< DIM
; m
++)
755 pgrp
->x
[m
] = comm
->dbuf
[g
*3 ][m
]*pgrp
->mwscale
;
758 pgrp
->xp
[m
] = comm
->dbuf
[g
*3+1][m
]*pgrp
->mwscale
;
760 if (pgrp
->epgrppbc
== epgrppbcREFAT
)
762 pgrp
->x
[m
] += comm
->rbuf
[g
][m
];
765 pgrp
->xp
[m
] += comm
->rbuf
[g
][m
];
772 /* Cosine weighting geometry */
773 double csw
, snw
, wmass
, wwmass
;
776 /* Determine the optimal location of the cosine weight */
777 csw
= comm
->dbuf
[g
*3][0];
778 snw
= comm
->dbuf
[g
*3][1];
779 pgrp
->x
[pull
->cosdim
] = atan2_0_2pi(snw
, csw
)/twopi_box
;
780 /* Set the weights for the local atoms */
781 wmass
= sqrt(csw
*csw
+ snw
*snw
);
782 wwmass
= (comm
->dbuf
[g
*3+1][0]*csw
*csw
+
783 comm
->dbuf
[g
*3+1][1]*csw
*snw
+
784 comm
->dbuf
[g
*3+1][2]*snw
*snw
)/(wmass
*wmass
);
786 pgrp
->mwscale
= 1.0/wmass
;
787 pgrp
->wscale
= wmass
/wwmass
;
788 pgrp
->invtm
= wwmass
/(wmass
*wmass
);
789 /* Set the weights for the local atoms */
792 for (i
= 0; i
< pgrp
->nat_loc
; i
++)
794 ii
= pgrp
->ind_loc
[i
];
795 pgrp
->weight_loc
[i
] = csw
*cos(twopi_box
*x
[ii
][pull
->cosdim
]) +
796 snw
*sin(twopi_box
*x
[ii
][pull
->cosdim
]);
800 csw
= comm
->dbuf
[g
*3+2][0];
801 snw
= comm
->dbuf
[g
*3+2][1];
802 pgrp
->xp
[pull
->cosdim
] = atan2_0_2pi(snw
, csw
)/twopi_box
;
807 fprintf(debug
, "Pull group %d wmass %f invtm %f\n",
808 g
, 1.0/pgrp
->mwscale
, pgrp
->invtm
);
815 /* Calculate the COMs for the cyclinder reference groups */
816 make_cyl_refgrps(cr
, pull
, md
, pbc
, t
, x
);