3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
39 collection of in-line ready operations:
41 lookup-table optimized scalar operations:
42 real gmx_invsqrt(real x)
43 void vecinvsqrt(real in[],real out[],int n)
44 void vecrecip(real in[],real out[],int n)
49 void rvec_add(const rvec a,const rvec b,rvec c) c = a + b
50 void dvec_add(const dvec a,const dvec b,dvec c) c = a + b
51 void ivec_add(const ivec a,const ivec b,ivec c) c = a + b
52 void rvec_inc(rvec a,const rvec b) a += b
53 void dvec_inc(dvec a,const dvec b) a += b
54 void ivec_inc(ivec a,const ivec b) a += b
55 void rvec_sub(const rvec a,const rvec b,rvec c) c = a - b
56 void dvec_sub(const dvec a,const dvec b,dvec c) c = a - b
57 void rvec_dec(rvec a,rvec b) a -= b
58 void copy_rvec(const rvec a,rvec b) b = a (reals)
59 void copy_dvec(const dvec a,dvec b) b = a (reals)
60 void copy_ivec(const ivec a,ivec b) b = a (integers)
61 void ivec_sub(const ivec a,const ivec b,ivec c) c = a - b
62 void svmul(real a,rvec v1,rvec v2) v2 = a * v1
63 void dsvmul(double a,dvec v1,dvec v2) v2 = a * v1
64 void clear_rvec(rvec a) a = 0
65 void clear_dvec(dvec a) a = 0
66 void clear_ivec(rvec a) a = 0
67 void clear_rvecs(int n,rvec v[])
68 real iprod(rvec a,rvec b) = a . b (inner product)
69 double diprod(dvec a,dvec b) = a . b (inner product)
70 real iiprod(ivec a,ivec b) = a . b (integers)
71 real norm2(rvec a) = | a |^2 ( = x*y*z )
72 double dnorm2(dvec a) = | a |^2 ( = x*y*z )
73 real norm(rvec a) = | a |
74 double dnorm(dvec a) = | a |
75 void cprod(rvec a,rvec b,rvec c) c = a x b (cross product)
76 void dprod(rvec a,rvec b,rvec c) c = a x b (cross product)
77 void dprod(rvec a,rvec b,rvec c) c = a * b (direct product)
78 real cos_angle(rvec a,rvec b)
79 real cos_angle_no_table(rvec a,rvec b)
80 real distance2(rvec v1, rvec v2) = | v2 - v1 |^2
81 void unitv(rvec src,rvec dest) dest = src / |src|
82 void unitv_no_table(rvec src,rvec dest) dest = src / |src|
84 matrix (3x3) operations:
85 ! indicates that dest should not be the same as a, b or src
86 the _ur0 varieties work on matrices that have only zeros
87 in the upper right part, such as box matrices, these varieties
88 could produce less rounding errors, not due to the operations themselves,
89 but because the compiler can easier recombine the operations
90 void copy_mat(matrix a,matrix b) b = a
91 void clear_mat(matrix a) a = 0
92 void mmul(matrix a,matrix b,matrix dest) ! dest = a . b
93 void mmul_ur0(matrix a,matrix b,matrix dest) dest = a . b
94 void transpose(matrix src,matrix dest) ! dest = src*
95 void tmmul(matrix a,matrix b,matrix dest) ! dest = a* . b
96 void mtmul(matrix a,matrix b,matrix dest) ! dest = a . b*
97 real det(matrix a) = det(a)
98 void m_add(matrix a,matrix b,matrix dest) dest = a + b
99 void m_sub(matrix a,matrix b,matrix dest) dest = a - b
100 void msmul(matrix m1,real r1,matrix dest) dest = r1 * m1
101 void m_inv_ur0(matrix src,matrix dest) dest = src^-1
102 void m_inv(matrix src,matrix dest) ! dest = src^-1
103 void mvmul(matrix a,rvec src,rvec dest) ! dest = a . src
104 void mvmul_ur0(matrix a,rvec src,rvec dest) dest = a . src
105 void tmvmul_ur0(matrix a,rvec src,rvec dest) dest = a* . src
106 real trace(matrix m) = trace(m)
109 #include "types/simple.h"
111 #include "typedefs.h"
112 #include "sysstuff.h"
114 #include "gmx_fatal.h"
115 #include "mpelogging.h"
121 } /* avoid screwing up indentation */
125 #define EXP_LSB 0x00800000
126 #define EXP_MASK 0x7f800000
128 #define FRACT_MASK 0x007fffff
129 #define FRACT_SIZE 11 /* significant part of fraction */
130 #define FRACT_SHIFT (EXP_SHIFT-FRACT_SIZE)
131 #define EXP_ADDR(val) (((val)&EXP_MASK)>>EXP_SHIFT)
132 #define FRACT_ADDR(val) (((val)&(FRACT_MASK|EXP_LSB))>>FRACT_SHIFT)
134 #define PR_VEC(a) a[XX],a[YY],a[ZZ]
136 #ifdef GMX_SOFTWARE_INVSQRT
137 extern const unsigned int * gmx_invsqrt_exptab
;
138 extern const unsigned int * gmx_invsqrt_fracttab
;
149 #ifdef GMX_SOFTWARE_INVSQRT
150 static real
gmx_invsqrt(real x
)
153 const real three
=3.0;
154 t_convert result
,bit_pattern
;
155 unsigned int exp
,fract
;
163 exp
= EXP_ADDR(bit_pattern
.bval
);
164 fract
= FRACT_ADDR(bit_pattern
.bval
);
165 result
.bval
=gmx_invsqrt_exptab
[exp
] | gmx_invsqrt_fracttab
[fract
];
168 y
=(half
*lu
*(three
-((x
*lu
)*lu
)));
170 y2
=(half
*y
*(three
-((x
*y
)*y
)));
172 return y2
; /* 10 Flops */
174 return y
; /* 5 Flops */
178 #endif /* gmx_invsqrt */
180 #ifdef GMX_POWERPC_SQRT
181 static real
gmx_invsqrt(real x
)
184 const real three
=3.0;
185 t_convert result
,bit_pattern
;
186 unsigned int exp
,fract
;
193 lu
= __frsqrte((double)x
);
195 y
=(half
*lu
*(three
-((x
*lu
)*lu
)));
197 #if (GMX_POWERPC_SQRT==2)
198 /* Extra iteration required */
199 y
=(half
*y
*(three
-((x
*y
)*y
)));
203 y2
=(half
*y
*(three
-((x
*y
)*y
)));
205 return y2
; /* 10 Flops */
207 return y
; /* 5 Flops */
211 #endif /* powerpc_invsqrt */
215 #define gmx_invsqrt(x) (1.0f/sqrt(x))
222 static real
sqr(real x
)
227 static gmx_inline
double dsqr(double x
)
232 /* Maclaurin series for sinh(x)/x, useful for NH chains and MTTK pressure control
233 Here, we compute it to 10th order, which might be overkill, 8th is probably enough,
234 but it's not very much more expensive. */
236 static gmx_inline real
series_sinhx(real x
)
239 return (1 + (x2
/6.0)*(1 + (x2
/20.0)*(1 + (x2
/42.0)*(1 + (x2
/72.0)*(1 + (x2
/110.0))))));
242 void vecinvsqrt(real in
[],real out
[],int n
);
243 /* Perform out[i]=1.0/sqrt(in[i]) for n elements */
246 void vecrecip(real in
[],real out
[],int n
);
247 /* Perform out[i]=1.0/(in[i]) for n elements */
249 /* Note: If you need a fast version of vecinvsqrt
250 * and/or vecrecip, call detectcpu() and run the SSE/3DNow/SSE2/Altivec
251 * versions if your hardware supports it.
253 * To use those routines, your memory HAS TO BE CACHE-ALIGNED.
254 * Use snew_aligned(ptr,size,32) to allocate and sfree_aligned to free.
258 static gmx_inline
void rvec_add(const rvec a
,const rvec b
,rvec c
)
271 static gmx_inline
void dvec_add(const dvec a
,const dvec b
,dvec c
)
284 static gmx_inline
void ivec_add(const ivec a
,const ivec b
,ivec c
)
297 static gmx_inline
void rvec_inc(rvec a
,const rvec b
)
310 static gmx_inline
void dvec_inc(dvec a
,const dvec b
)
323 static gmx_inline
void rvec_sub(const rvec a
,const rvec b
,rvec c
)
336 static gmx_inline
void dvec_sub(const dvec a
,const dvec b
,dvec c
)
349 static gmx_inline
void rvec_dec(rvec a
,const rvec b
)
362 static gmx_inline
void copy_rvec(const rvec a
,rvec b
)
369 static gmx_inline
void copy_rvecn(rvec
*a
,rvec
*b
,int startn
, int endn
)
372 for (i
=startn
;i
<endn
;i
++) {
379 static gmx_inline
void copy_dvec(const dvec a
,dvec b
)
386 static gmx_inline
void copy_ivec(const ivec a
,ivec b
)
393 static gmx_inline
void ivec_sub(const ivec a
,const ivec b
,ivec c
)
406 static gmx_inline
void copy_mat(matrix a
,matrix b
)
408 copy_rvec(a
[XX
],b
[XX
]);
409 copy_rvec(a
[YY
],b
[YY
]);
410 copy_rvec(a
[ZZ
],b
[ZZ
]);
413 static gmx_inline
void svmul(real a
,const rvec v1
,rvec v2
)
420 static gmx_inline
void dsvmul(double a
,const dvec v1
,dvec v2
)
427 static gmx_inline real
distance2(const rvec v1
,const rvec v2
)
429 return sqr(v2
[XX
]-v1
[XX
]) + sqr(v2
[YY
]-v1
[YY
]) + sqr(v2
[ZZ
]-v1
[ZZ
]);
432 static gmx_inline
void clear_rvec(rvec a
)
434 /* The ibm compiler has problems with inlining this
435 * when we use a const real variable
442 static gmx_inline
void clear_dvec(dvec a
)
444 /* The ibm compiler has problems with inlining this
445 * when we use a const real variable
452 static gmx_inline
void clear_ivec(ivec a
)
459 static gmx_inline
void clear_rvecs(int n
,rvec v
[])
461 /* memset(v[0],0,DIM*n*sizeof(v[0][0])); */
464 GMX_MPE_LOG(ev_clear_rvecs_start
);
469 GMX_MPE_LOG(ev_clear_rvecs_finish
);
472 static gmx_inline
void clear_mat(matrix a
)
474 /* memset(a[0],0,DIM*DIM*sizeof(a[0][0])); */
478 a
[XX
][XX
]=a
[XX
][YY
]=a
[XX
][ZZ
]=nul
;
479 a
[YY
][XX
]=a
[YY
][YY
]=a
[YY
][ZZ
]=nul
;
480 a
[ZZ
][XX
]=a
[ZZ
][YY
]=a
[ZZ
][ZZ
]=nul
;
483 static gmx_inline real
iprod(const rvec a
,const rvec b
)
485 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
488 static gmx_inline
double diprod(const dvec a
,const dvec b
)
490 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
493 static gmx_inline
int iiprod(const ivec a
,const ivec b
)
495 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
498 static gmx_inline real
norm2(const rvec a
)
500 return a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
];
503 static gmx_inline
double dnorm2(const dvec a
)
505 return a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
];
508 static gmx_inline real
norm(const rvec a
)
510 return (real
)sqrt(a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
]);
513 static gmx_inline
double dnorm(const dvec a
)
515 return sqrt(a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
]);
519 * Do _not_ use these routines to calculate the angle between two vectors
520 * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
521 * is very flat close to -1 and 1, which will lead to accuracy-loss.
522 * Instead, use the new gmx_angle() function directly.
524 static gmx_inline real
525 cos_angle(const rvec a
,const rvec b
)
528 * ax*bx + ay*by + az*bz
529 * cos-vec (a,b) = ---------------------
534 double aa
,bb
,ip
,ipa
,ipb
,ipab
; /* For accuracy these must be double! */
537 for(m
=0; (m
<DIM
); m
++) { /* 18 */
546 cosval
= ip
*gmx_invsqrt(ipab
); /* 7 */
559 * Do _not_ use these routines to calculate the angle between two vectors
560 * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
561 * is very flat close to -1 and 1, which will lead to accuracy-loss.
562 * Instead, use the new gmx_angle() function directly.
564 static gmx_inline real
565 cos_angle_no_table(const rvec a
,const rvec b
)
567 /* This version does not need the invsqrt lookup table */
570 double aa
,bb
,ip
,ipa
,ipb
; /* For accuracy these must be double! */
573 for(m
=0; (m
<DIM
); m
++) { /* 18 */
580 cosval
=ip
/sqrt(ipa
*ipb
); /* 12 */
591 static gmx_inline
void cprod(const rvec a
,const rvec b
,rvec c
)
593 c
[XX
]=a
[YY
]*b
[ZZ
]-a
[ZZ
]*b
[YY
];
594 c
[YY
]=a
[ZZ
]*b
[XX
]-a
[XX
]*b
[ZZ
];
595 c
[ZZ
]=a
[XX
]*b
[YY
]-a
[YY
]*b
[XX
];
598 static gmx_inline
void dcprod(const dvec a
,const dvec b
,dvec c
)
600 c
[XX
]=a
[YY
]*b
[ZZ
]-a
[ZZ
]*b
[YY
];
601 c
[YY
]=a
[ZZ
]*b
[XX
]-a
[XX
]*b
[ZZ
];
602 c
[ZZ
]=a
[XX
]*b
[YY
]-a
[YY
]*b
[XX
];
605 /* This routine calculates the angle between a & b without any loss of accuracy close to 0/PI.
606 * If you only need cos(theta), use the cos_angle() routines to save a few cycles.
607 * This routine is faster than it might appear, since atan2 is accelerated on many CPUs (e.g. x86).
609 static gmx_inline real
610 gmx_angle(const rvec a
, const rvec b
)
620 return atan2(wlen
,s
);
623 static gmx_inline
void mmul_ur0(matrix a
,matrix b
,matrix dest
)
625 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
];
628 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
];
629 dest
[YY
][YY
]= a
[YY
][YY
]*b
[YY
][YY
];
631 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
632 dest
[ZZ
][YY
]= a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
633 dest
[ZZ
][ZZ
]= a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
636 static gmx_inline
void mmul(matrix a
,matrix b
,matrix dest
)
638 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[XX
][YY
]*b
[YY
][XX
]+a
[XX
][ZZ
]*b
[ZZ
][XX
];
639 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
]+a
[YY
][ZZ
]*b
[ZZ
][XX
];
640 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
641 dest
[XX
][YY
]=a
[XX
][XX
]*b
[XX
][YY
]+a
[XX
][YY
]*b
[YY
][YY
]+a
[XX
][ZZ
]*b
[ZZ
][YY
];
642 dest
[YY
][YY
]=a
[YY
][XX
]*b
[XX
][YY
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[YY
][ZZ
]*b
[ZZ
][YY
];
643 dest
[ZZ
][YY
]=a
[ZZ
][XX
]*b
[XX
][YY
]+a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
644 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[XX
][ZZ
]+a
[XX
][YY
]*b
[YY
][ZZ
]+a
[XX
][ZZ
]*b
[ZZ
][ZZ
];
645 dest
[YY
][ZZ
]=a
[YY
][XX
]*b
[XX
][ZZ
]+a
[YY
][YY
]*b
[YY
][ZZ
]+a
[YY
][ZZ
]*b
[ZZ
][ZZ
];
646 dest
[ZZ
][ZZ
]=a
[ZZ
][XX
]*b
[XX
][ZZ
]+a
[ZZ
][YY
]*b
[YY
][ZZ
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
649 static gmx_inline
void transpose(matrix src
,matrix dest
)
651 dest
[XX
][XX
]=src
[XX
][XX
];
652 dest
[YY
][XX
]=src
[XX
][YY
];
653 dest
[ZZ
][XX
]=src
[XX
][ZZ
];
654 dest
[XX
][YY
]=src
[YY
][XX
];
655 dest
[YY
][YY
]=src
[YY
][YY
];
656 dest
[ZZ
][YY
]=src
[YY
][ZZ
];
657 dest
[XX
][ZZ
]=src
[ZZ
][XX
];
658 dest
[YY
][ZZ
]=src
[ZZ
][YY
];
659 dest
[ZZ
][ZZ
]=src
[ZZ
][ZZ
];
662 static gmx_inline
void tmmul(matrix a
,matrix b
,matrix dest
)
664 /* Computes dest=mmul(transpose(a),b,dest) - used in do_pr_pcoupl */
665 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[YY
][XX
]*b
[YY
][XX
]+a
[ZZ
][XX
]*b
[ZZ
][XX
];
666 dest
[XX
][YY
]=a
[XX
][XX
]*b
[XX
][YY
]+a
[YY
][XX
]*b
[YY
][YY
]+a
[ZZ
][XX
]*b
[ZZ
][YY
];
667 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[XX
][ZZ
]+a
[YY
][XX
]*b
[YY
][ZZ
]+a
[ZZ
][XX
]*b
[ZZ
][ZZ
];
668 dest
[YY
][XX
]=a
[XX
][YY
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
]+a
[ZZ
][YY
]*b
[ZZ
][XX
];
669 dest
[YY
][YY
]=a
[XX
][YY
]*b
[XX
][YY
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[ZZ
][YY
]*b
[ZZ
][YY
];
670 dest
[YY
][ZZ
]=a
[XX
][YY
]*b
[XX
][ZZ
]+a
[YY
][YY
]*b
[YY
][ZZ
]+a
[ZZ
][YY
]*b
[ZZ
][ZZ
];
671 dest
[ZZ
][XX
]=a
[XX
][ZZ
]*b
[XX
][XX
]+a
[YY
][ZZ
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
672 dest
[ZZ
][YY
]=a
[XX
][ZZ
]*b
[XX
][YY
]+a
[YY
][ZZ
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
673 dest
[ZZ
][ZZ
]=a
[XX
][ZZ
]*b
[XX
][ZZ
]+a
[YY
][ZZ
]*b
[YY
][ZZ
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
676 static gmx_inline
void mtmul(matrix a
,matrix b
,matrix dest
)
678 /* Computes dest=mmul(a,transpose(b),dest) - used in do_pr_pcoupl */
679 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[XX
][YY
]*b
[XX
][YY
]+a
[XX
][ZZ
]*b
[XX
][ZZ
];
680 dest
[XX
][YY
]=a
[XX
][XX
]*b
[YY
][XX
]+a
[XX
][YY
]*b
[YY
][YY
]+a
[XX
][ZZ
]*b
[YY
][ZZ
];
681 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[ZZ
][XX
]+a
[XX
][YY
]*b
[ZZ
][YY
]+a
[XX
][ZZ
]*b
[ZZ
][ZZ
];
682 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[XX
][YY
]+a
[YY
][ZZ
]*b
[XX
][ZZ
];
683 dest
[YY
][YY
]=a
[YY
][XX
]*b
[YY
][XX
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[YY
][ZZ
]*b
[YY
][ZZ
];
684 dest
[YY
][ZZ
]=a
[YY
][XX
]*b
[ZZ
][XX
]+a
[YY
][YY
]*b
[ZZ
][YY
]+a
[YY
][ZZ
]*b
[ZZ
][ZZ
];
685 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[XX
][YY
]+a
[ZZ
][ZZ
]*b
[XX
][ZZ
];
686 dest
[ZZ
][YY
]=a
[ZZ
][XX
]*b
[YY
][XX
]+a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[YY
][ZZ
];
687 dest
[ZZ
][ZZ
]=a
[ZZ
][XX
]*b
[ZZ
][XX
]+a
[ZZ
][YY
]*b
[ZZ
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
690 static gmx_inline real
det(matrix a
)
692 return ( a
[XX
][XX
]*(a
[YY
][YY
]*a
[ZZ
][ZZ
]-a
[ZZ
][YY
]*a
[YY
][ZZ
])
693 -a
[YY
][XX
]*(a
[XX
][YY
]*a
[ZZ
][ZZ
]-a
[ZZ
][YY
]*a
[XX
][ZZ
])
694 +a
[ZZ
][XX
]*(a
[XX
][YY
]*a
[YY
][ZZ
]-a
[YY
][YY
]*a
[XX
][ZZ
]));
697 static gmx_inline
void m_add(matrix a
,matrix b
,matrix dest
)
699 dest
[XX
][XX
]=a
[XX
][XX
]+b
[XX
][XX
];
700 dest
[XX
][YY
]=a
[XX
][YY
]+b
[XX
][YY
];
701 dest
[XX
][ZZ
]=a
[XX
][ZZ
]+b
[XX
][ZZ
];
702 dest
[YY
][XX
]=a
[YY
][XX
]+b
[YY
][XX
];
703 dest
[YY
][YY
]=a
[YY
][YY
]+b
[YY
][YY
];
704 dest
[YY
][ZZ
]=a
[YY
][ZZ
]+b
[YY
][ZZ
];
705 dest
[ZZ
][XX
]=a
[ZZ
][XX
]+b
[ZZ
][XX
];
706 dest
[ZZ
][YY
]=a
[ZZ
][YY
]+b
[ZZ
][YY
];
707 dest
[ZZ
][ZZ
]=a
[ZZ
][ZZ
]+b
[ZZ
][ZZ
];
710 static gmx_inline
void m_sub(matrix a
,matrix b
,matrix dest
)
712 dest
[XX
][XX
]=a
[XX
][XX
]-b
[XX
][XX
];
713 dest
[XX
][YY
]=a
[XX
][YY
]-b
[XX
][YY
];
714 dest
[XX
][ZZ
]=a
[XX
][ZZ
]-b
[XX
][ZZ
];
715 dest
[YY
][XX
]=a
[YY
][XX
]-b
[YY
][XX
];
716 dest
[YY
][YY
]=a
[YY
][YY
]-b
[YY
][YY
];
717 dest
[YY
][ZZ
]=a
[YY
][ZZ
]-b
[YY
][ZZ
];
718 dest
[ZZ
][XX
]=a
[ZZ
][XX
]-b
[ZZ
][XX
];
719 dest
[ZZ
][YY
]=a
[ZZ
][YY
]-b
[ZZ
][YY
];
720 dest
[ZZ
][ZZ
]=a
[ZZ
][ZZ
]-b
[ZZ
][ZZ
];
723 static gmx_inline
void msmul(matrix m1
,real r1
,matrix dest
)
725 dest
[XX
][XX
]=r1
*m1
[XX
][XX
];
726 dest
[XX
][YY
]=r1
*m1
[XX
][YY
];
727 dest
[XX
][ZZ
]=r1
*m1
[XX
][ZZ
];
728 dest
[YY
][XX
]=r1
*m1
[YY
][XX
];
729 dest
[YY
][YY
]=r1
*m1
[YY
][YY
];
730 dest
[YY
][ZZ
]=r1
*m1
[YY
][ZZ
];
731 dest
[ZZ
][XX
]=r1
*m1
[ZZ
][XX
];
732 dest
[ZZ
][YY
]=r1
*m1
[ZZ
][YY
];
733 dest
[ZZ
][ZZ
]=r1
*m1
[ZZ
][ZZ
];
736 static gmx_inline
void m_inv_ur0(matrix src
,matrix dest
)
738 double tmp
= src
[XX
][XX
]*src
[YY
][YY
]*src
[ZZ
][ZZ
];
739 if (fabs(tmp
) <= 100*GMX_REAL_MIN
)
740 gmx_fatal(FARGS
,"Can not invert matrix, determinant is zero");
742 dest
[XX
][XX
] = 1/src
[XX
][XX
];
743 dest
[YY
][YY
] = 1/src
[YY
][YY
];
744 dest
[ZZ
][ZZ
] = 1/src
[ZZ
][ZZ
];
745 dest
[ZZ
][XX
] = (src
[YY
][XX
]*src
[ZZ
][YY
]*dest
[YY
][YY
]
746 - src
[ZZ
][XX
])*dest
[XX
][XX
]*dest
[ZZ
][ZZ
];
747 dest
[YY
][XX
] = -src
[YY
][XX
]*dest
[XX
][XX
]*dest
[YY
][YY
];
748 dest
[ZZ
][YY
] = -src
[ZZ
][YY
]*dest
[YY
][YY
]*dest
[ZZ
][ZZ
];
754 static gmx_inline
void m_inv(matrix src
,matrix dest
)
756 const real smallreal
= (real
)1.0e-24;
757 const real largereal
= (real
)1.0e24
;
764 if ((fc
<= smallreal
) || (fc
>= largereal
))
765 gmx_fatal(FARGS
,"Can not invert matrix, determinant = %e",deter
);
767 dest
[XX
][XX
]= c
*(src
[YY
][YY
]*src
[ZZ
][ZZ
]-src
[ZZ
][YY
]*src
[YY
][ZZ
]);
768 dest
[XX
][YY
]=-c
*(src
[XX
][YY
]*src
[ZZ
][ZZ
]-src
[ZZ
][YY
]*src
[XX
][ZZ
]);
769 dest
[XX
][ZZ
]= c
*(src
[XX
][YY
]*src
[YY
][ZZ
]-src
[YY
][YY
]*src
[XX
][ZZ
]);
770 dest
[YY
][XX
]=-c
*(src
[YY
][XX
]*src
[ZZ
][ZZ
]-src
[ZZ
][XX
]*src
[YY
][ZZ
]);
771 dest
[YY
][YY
]= c
*(src
[XX
][XX
]*src
[ZZ
][ZZ
]-src
[ZZ
][XX
]*src
[XX
][ZZ
]);
772 dest
[YY
][ZZ
]=-c
*(src
[XX
][XX
]*src
[YY
][ZZ
]-src
[YY
][XX
]*src
[XX
][ZZ
]);
773 dest
[ZZ
][XX
]= c
*(src
[YY
][XX
]*src
[ZZ
][YY
]-src
[ZZ
][XX
]*src
[YY
][YY
]);
774 dest
[ZZ
][YY
]=-c
*(src
[XX
][XX
]*src
[ZZ
][YY
]-src
[ZZ
][XX
]*src
[XX
][YY
]);
775 dest
[ZZ
][ZZ
]= c
*(src
[XX
][XX
]*src
[YY
][YY
]-src
[YY
][XX
]*src
[XX
][YY
]);
778 static gmx_inline
void mvmul(matrix a
,const rvec src
,rvec dest
)
780 dest
[XX
]=a
[XX
][XX
]*src
[XX
]+a
[XX
][YY
]*src
[YY
]+a
[XX
][ZZ
]*src
[ZZ
];
781 dest
[YY
]=a
[YY
][XX
]*src
[XX
]+a
[YY
][YY
]*src
[YY
]+a
[YY
][ZZ
]*src
[ZZ
];
782 dest
[ZZ
]=a
[ZZ
][XX
]*src
[XX
]+a
[ZZ
][YY
]*src
[YY
]+a
[ZZ
][ZZ
]*src
[ZZ
];
785 static gmx_inline
void mvmul_ur0(matrix a
,const rvec src
,rvec dest
)
787 dest
[ZZ
]=a
[ZZ
][XX
]*src
[XX
]+a
[ZZ
][YY
]*src
[YY
]+a
[ZZ
][ZZ
]*src
[ZZ
];
788 dest
[YY
]=a
[YY
][XX
]*src
[XX
]+a
[YY
][YY
]*src
[YY
];
789 dest
[XX
]=a
[XX
][XX
]*src
[XX
];
792 static gmx_inline
void tmvmul_ur0(matrix a
,const rvec src
,rvec dest
)
794 dest
[XX
]=a
[XX
][XX
]*src
[XX
]+a
[YY
][XX
]*src
[YY
]+a
[ZZ
][XX
]*src
[ZZ
];
795 dest
[YY
]= a
[YY
][YY
]*src
[YY
]+a
[ZZ
][YY
]*src
[ZZ
];
796 dest
[ZZ
]= a
[ZZ
][ZZ
]*src
[ZZ
];
799 static gmx_inline
void unitv(const rvec src
,rvec dest
)
803 linv
=gmx_invsqrt(norm2(src
));
804 dest
[XX
]=linv
*src
[XX
];
805 dest
[YY
]=linv
*src
[YY
];
806 dest
[ZZ
]=linv
*src
[ZZ
];
809 static gmx_inline
void unitv_no_table(const rvec src
,rvec dest
)
813 linv
=1.0/sqrt(norm2(src
));
814 dest
[XX
]=linv
*src
[XX
];
815 dest
[YY
]=linv
*src
[YY
];
816 dest
[ZZ
]=linv
*src
[ZZ
];
819 static void calc_lll(rvec box
,rvec lll
)
821 lll
[XX
] = 2.0*M_PI
/box
[XX
];
822 lll
[YY
] = 2.0*M_PI
/box
[YY
];
823 lll
[ZZ
] = 2.0*M_PI
/box
[ZZ
];
826 static gmx_inline real
trace(matrix m
)
828 return (m
[XX
][XX
]+m
[YY
][YY
]+m
[ZZ
][ZZ
]);
831 static gmx_inline real
_divide_err(real a
,real b
,const char *file
,int line
)
833 if (fabs(b
) <= GMX_REAL_MIN
)
834 gmx_fatal(FARGS
,"Dividing by zero, file %s, line %d",file
,line
);
838 static gmx_inline
int _mod(int a
,int b
,char *file
,int line
)
841 gmx_fatal(FARGS
,"Modulo zero, file %s, line %d",file
,line
);
845 /* Operations on multidimensional rvecs, used e.g. in edsam.c */
846 static void m_rveccopy(int dim
, rvec
*a
, rvec
*b
)
851 for (i
=0; i
<dim
; i
++)
852 copy_rvec(a
[i
],b
[i
]);
855 /*computer matrix vectors from base vectors and angles */
856 static void matrix_convert(matrix box
, rvec vec
, rvec angle
)
858 svmul(DEG2RAD
,angle
,angle
);
859 box
[XX
][XX
] = vec
[XX
];
860 box
[YY
][XX
] = vec
[YY
]*cos(angle
[ZZ
]);
861 box
[YY
][YY
] = vec
[YY
]*sin(angle
[ZZ
]);
862 box
[ZZ
][XX
] = vec
[ZZ
]*cos(angle
[YY
]);
863 box
[ZZ
][YY
] = vec
[ZZ
]
864 *(cos(angle
[XX
])-cos(angle
[YY
])*cos(angle
[ZZ
]))/sin(angle
[ZZ
]);
865 box
[ZZ
][ZZ
] = sqrt(sqr(vec
[ZZ
])
866 -box
[ZZ
][XX
]*box
[ZZ
][XX
]-box
[ZZ
][YY
]*box
[ZZ
][YY
]);
869 #define divide_err(a,b) _divide_err((a),(b),__FILE__,__LINE__)
870 #define mod(a,b) _mod((a),(b),__FILE__,__LINE__)