1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This file is part of GROMACS.
7 * Written by the Gromacs development team under coordination of
8 * David van der Spoel, Berk Hess, and Erik Lindahl.
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * To help us fund GROMACS development, we humbly ask that you cite
16 * the research papers on the package. Check out http://www.gromacs.org
19 * Gnomes, ROck Monsters And Chili Sauce
21 #ifndef _gmx_math_x86_sse4_1_double_h_
22 #define _gmx_math_x86_sse4_1_double_h_
27 #include "gmx_x86_sse4.h"
32 # define M_PI 3.14159265358979323846264338327950288
35 /************************
37 * Simple math routines *
39 ************************/
42 static gmx_inline __m128d
43 gmx_mm_invsqrt_pd(__m128d x
)
45 const __m128d half
= _mm_set1_pd(0.5);
46 const __m128d three
= _mm_set1_pd(3.0);
48 /* Lookup instruction only exists in single precision, convert back and forth... */
49 __m128d lu
= _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x
)));
51 lu
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu
,lu
),x
)),lu
));
52 return _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu
,lu
),x
)),lu
));
55 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
57 gmx_mm_invsqrt_pair_pd(__m128d x1
, __m128d x2
, __m128d
*invsqrt1
, __m128d
*invsqrt2
)
59 const __m128d half
= _mm_set1_pd(0.5);
60 const __m128d three
= _mm_set1_pd(3.0);
61 const __m128 halff
= _mm_set1_ps(0.5f
);
62 const __m128 threef
= _mm_set1_ps(3.0f
);
67 /* Do first N-R step in float for 2x throughput */
68 xf
= _mm_shuffle_ps(_mm_cvtpd_ps(x1
),_mm_cvtpd_ps(x2
),MM_SHUFFLE(1,0,1,0));
69 luf
= _mm_rsqrt_ps(xf
);
70 luf
= _mm_mul_ps(halff
,_mm_mul_ps(_mm_sub_ps(threef
,_mm_mul_ps(_mm_mul_ps(luf
,luf
),xf
)),luf
));
72 lu2
= _mm_cvtps_pd(_mm_shuffle_ps(luf
,luf
,MM_SHUFFLE(3,2,3,2)));
73 lu1
= _mm_cvtps_pd(luf
);
75 *invsqrt1
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu1
,lu1
),x1
)),lu1
));
76 *invsqrt2
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu2
,lu2
),x2
)),lu2
));
79 /* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
80 static gmx_inline __m128d
81 gmx_mm_sqrt_pd(__m128d x
)
86 mask
= _mm_cmpeq_pd(x
,_mm_setzero_pd());
87 res
= _mm_andnot_pd(mask
,gmx_mm_invsqrt_pd(x
));
89 res
= _mm_mul_pd(x
,res
);
95 static gmx_inline __m128d
96 gmx_mm_inv_pd(__m128d x
)
98 const __m128d two
= _mm_set1_pd(2.0);
100 /* Lookup instruction only exists in single precision, convert back and forth... */
101 __m128d lu
= _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x
)));
103 /* Perform two N-R steps for double precision */
104 lu
= _mm_mul_pd(lu
,_mm_sub_pd(two
,_mm_mul_pd(x
,lu
)));
105 return _mm_mul_pd(lu
,_mm_sub_pd(two
,_mm_mul_pd(x
,lu
)));
108 static gmx_inline __m128d
109 gmx_mm_abs_pd(__m128d x
)
111 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
113 return _mm_and_pd(x
,signmask
);
120 * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
123 * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
124 * according to the same algorithm as used in the Cephes/netlib math routines.
127 gmx_mm_exp2_pd(__m128d x
)
129 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
130 const __m128d arglimit
= _mm_set1_pd(1022.0);
131 const __m128i expbase
= _mm_set1_epi32(1023);
133 const __m128d P2
= _mm_set1_pd(2.30933477057345225087e-2);
134 const __m128d P1
= _mm_set1_pd(2.02020656693165307700e1
);
135 const __m128d P0
= _mm_set1_pd(1.51390680115615096133e3
);
137 const __m128d Q1
= _mm_set1_pd(2.33184211722314911771e2
);
138 const __m128d Q0
= _mm_set1_pd(4.36821166879210612817e3
);
139 const __m128d one
= _mm_set1_pd(1.0);
140 const __m128d two
= _mm_set1_pd(2.0);
149 iexppart
= _mm_cvtpd_epi32(x
);
150 intpart
= _mm_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
152 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
153 * To be able to shift it into the exponent for a double precision number we first need to
154 * shuffle so that the lower half contains the first element, and the upper half the second.
155 * This should really be done as a zero-extension, but since the next instructions will shift
156 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
157 * (thus we just use element 2 from iexppart).
159 iexppart
= _mm_shuffle_epi32(iexppart
,_MM_SHUFFLE(2,1,2,0));
161 /* Do the shift operation on the 64-bit registers */
162 iexppart
= _mm_add_epi32(iexppart
,expbase
);
163 iexppart
= _mm_slli_epi64(iexppart
,52);
165 valuemask
= _mm_cmpge_pd(arglimit
,gmx_mm_abs_pd(x
));
166 fexppart
= _mm_and_pd(valuemask
,gmx_mm_castsi128_pd(iexppart
));
168 z
= _mm_sub_pd(x
,intpart
);
169 z2
= _mm_mul_pd(z
,z
);
171 PolyP
= _mm_mul_pd(P2
,z2
);
172 PolyP
= _mm_add_pd(PolyP
,P1
);
173 PolyQ
= _mm_add_pd(z2
,Q1
);
174 PolyP
= _mm_mul_pd(PolyP
,z2
);
175 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
176 PolyP
= _mm_add_pd(PolyP
,P0
);
177 PolyQ
= _mm_add_pd(PolyQ
,Q0
);
178 PolyP
= _mm_mul_pd(PolyP
,z
);
180 z
= _mm_mul_pd(PolyP
,gmx_mm_inv_pd(_mm_sub_pd(PolyQ
,PolyP
)));
181 z
= _mm_add_pd(one
,_mm_mul_pd(two
,z
));
183 z
= _mm_mul_pd(z
,fexppart
);
188 /* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
189 * but there will then be a small rounding error since we lose some precision due to the
190 * multiplication. This will then be magnified a lot by the exponential.
192 * Instead, we calculate the fractional part directly as a Padé approximation of
193 * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
194 * remaining after 2^y, which avoids the precision-loss.
197 gmx_mm_exp_pd(__m128d exparg
)
199 const __m128d argscale
= _mm_set1_pd(1.4426950408889634073599);
200 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
201 const __m128d arglimit
= _mm_set1_pd(1022.0);
202 const __m128i expbase
= _mm_set1_epi32(1023);
204 const __m128d invargscale0
= _mm_set1_pd(6.93145751953125e-1);
205 const __m128d invargscale1
= _mm_set1_pd(1.42860682030941723212e-6);
207 const __m128d P2
= _mm_set1_pd(1.26177193074810590878e-4);
208 const __m128d P1
= _mm_set1_pd(3.02994407707441961300e-2);
210 const __m128d Q3
= _mm_set1_pd(3.00198505138664455042E-6);
211 const __m128d Q2
= _mm_set1_pd(2.52448340349684104192E-3);
212 const __m128d Q1
= _mm_set1_pd(2.27265548208155028766E-1);
214 const __m128d one
= _mm_set1_pd(1.0);
215 const __m128d two
= _mm_set1_pd(2.0);
224 x
= _mm_mul_pd(exparg
,argscale
);
226 iexppart
= _mm_cvtpd_epi32(x
);
227 intpart
= _mm_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
229 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
230 * To be able to shift it into the exponent for a double precision number we first need to
231 * shuffle so that the lower half contains the first element, and the upper half the second.
232 * This should really be done as a zero-extension, but since the next instructions will shift
233 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
234 * (thus we just use element 2 from iexppart).
236 iexppart
= _mm_shuffle_epi32(iexppart
,_MM_SHUFFLE(2,1,2,0));
238 /* Do the shift operation on the 64-bit registers */
239 iexppart
= _mm_add_epi32(iexppart
,expbase
);
240 iexppart
= _mm_slli_epi64(iexppart
,52);
242 valuemask
= _mm_cmpge_pd(arglimit
,gmx_mm_abs_pd(x
));
243 fexppart
= _mm_and_pd(valuemask
,gmx_mm_castsi128_pd(iexppart
));
245 z
= _mm_sub_pd(exparg
,_mm_mul_pd(invargscale0
,intpart
));
246 z
= _mm_sub_pd(z
,_mm_mul_pd(invargscale1
,intpart
));
248 z2
= _mm_mul_pd(z
,z
);
250 PolyQ
= _mm_mul_pd(Q3
,z2
);
251 PolyQ
= _mm_add_pd(PolyQ
,Q2
);
252 PolyP
= _mm_mul_pd(P2
,z2
);
253 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
254 PolyP
= _mm_add_pd(PolyP
,P1
);
255 PolyQ
= _mm_add_pd(PolyQ
,Q1
);
256 PolyP
= _mm_mul_pd(PolyP
,z2
);
257 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
258 PolyP
= _mm_add_pd(PolyP
,one
);
259 PolyQ
= _mm_add_pd(PolyQ
,two
);
261 PolyP
= _mm_mul_pd(PolyP
,z
);
263 z
= _mm_mul_pd(PolyP
,gmx_mm_inv_pd(_mm_sub_pd(PolyQ
,PolyP
)));
264 z
= _mm_add_pd(one
,_mm_mul_pd(two
,z
));
266 z
= _mm_mul_pd(z
,fexppart
);
274 gmx_mm_log_pd(__m128d x
)
276 /* Same algorithm as cephes library */
277 const __m128d expmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
279 const __m128i expbase_m1
= _mm_set1_epi32(1023-1); /* We want non-IEEE format */
281 const __m128d half
= _mm_set1_pd(0.5);
282 const __m128d one
= _mm_set1_pd(1.0);
283 const __m128d two
= _mm_set1_pd(2.0);
284 const __m128d invsq2
= _mm_set1_pd(1.0/sqrt(2.0));
286 const __m128d corr1
= _mm_set1_pd(-2.121944400546905827679e-4);
287 const __m128d corr2
= _mm_set1_pd(0.693359375);
289 const __m128d P5
= _mm_set1_pd(1.01875663804580931796e-4);
290 const __m128d P4
= _mm_set1_pd(4.97494994976747001425e-1);
291 const __m128d P3
= _mm_set1_pd(4.70579119878881725854e0
);
292 const __m128d P2
= _mm_set1_pd(1.44989225341610930846e1
);
293 const __m128d P1
= _mm_set1_pd(1.79368678507819816313e1
);
294 const __m128d P0
= _mm_set1_pd(7.70838733755885391666e0
);
296 const __m128d Q4
= _mm_set1_pd(1.12873587189167450590e1
);
297 const __m128d Q3
= _mm_set1_pd(4.52279145837532221105e1
);
298 const __m128d Q2
= _mm_set1_pd(8.29875266912776603211e1
);
299 const __m128d Q1
= _mm_set1_pd(7.11544750618563894466e1
);
300 const __m128d Q0
= _mm_set1_pd(2.31251620126765340583e1
);
302 const __m128d R2
= _mm_set1_pd(-7.89580278884799154124e-1);
303 const __m128d R1
= _mm_set1_pd(1.63866645699558079767e1
);
304 const __m128d R0
= _mm_set1_pd(-6.41409952958715622951e1
);
306 const __m128d S2
= _mm_set1_pd(-3.56722798256324312549E1
);
307 const __m128d S1
= _mm_set1_pd(3.12093766372244180303E2
);
308 const __m128d S0
= _mm_set1_pd(-7.69691943550460008604E2
);
314 __m128d corr
,t1
,t2
,q
;
315 __m128d zA
,yA
,xA
,zB
,yB
,xB
,z
;
317 __m128d polyP1
,polyP2
,polyQ1
,polyQ2
;
319 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
320 fexp
= _mm_and_pd(x
,expmask
);
321 iexp
= gmx_mm_castpd_si128(fexp
);
322 iexp
= _mm_srli_epi64(iexp
,52);
323 iexp
= _mm_sub_epi32(iexp
,expbase_m1
);
324 iexp
= _mm_shuffle_epi32(iexp
, _MM_SHUFFLE(1,1,2,0) );
325 fexp
= _mm_cvtepi32_pd(iexp
);
327 x
= _mm_andnot_pd(expmask
,x
);
328 x
= _mm_or_pd(x
,one
);
329 x
= _mm_mul_pd(x
,half
);
331 mask1
= _mm_cmpgt_pd(gmx_mm_abs_pd(fexp
),two
);
332 mask2
= _mm_cmplt_pd(x
,invsq2
);
334 fexp
= _mm_sub_pd(fexp
,_mm_and_pd(mask2
,one
));
336 /* If mask1 is set ('A') */
337 zA
= _mm_sub_pd(x
,half
);
338 t1
= _mm_blendv_pd( zA
,x
,mask2
);
339 zA
= _mm_sub_pd(t1
,half
);
340 t2
= _mm_blendv_pd( x
,zA
,mask2
);
341 yA
= _mm_mul_pd(half
,_mm_add_pd(t2
,one
));
343 xA
= _mm_mul_pd(zA
,gmx_mm_inv_pd(yA
));
344 zA
= _mm_mul_pd(xA
,xA
);
347 polyR
= _mm_mul_pd(R2
,zA
);
348 polyR
= _mm_add_pd(polyR
,R1
);
349 polyR
= _mm_mul_pd(polyR
,zA
);
350 polyR
= _mm_add_pd(polyR
,R0
);
352 polyS
= _mm_add_pd(zA
,S2
);
353 polyS
= _mm_mul_pd(polyS
,zA
);
354 polyS
= _mm_add_pd(polyS
,S1
);
355 polyS
= _mm_mul_pd(polyS
,zA
);
356 polyS
= _mm_add_pd(polyS
,S0
);
358 q
= _mm_mul_pd(polyR
,gmx_mm_inv_pd(polyS
));
359 zA
= _mm_mul_pd(_mm_mul_pd(xA
,zA
),q
);
361 zA
= _mm_add_pd(zA
,_mm_mul_pd(corr1
,fexp
));
362 zA
= _mm_add_pd(zA
,xA
);
363 zA
= _mm_add_pd(zA
,_mm_mul_pd(corr2
,fexp
));
365 /* If mask1 is not set ('B') */
366 corr
= _mm_and_pd(mask2
,x
);
367 xB
= _mm_add_pd(x
,corr
);
368 xB
= _mm_sub_pd(xB
,one
);
369 zB
= _mm_mul_pd(xB
,xB
);
371 polyP1
= _mm_mul_pd(P5
,zB
);
372 polyP2
= _mm_mul_pd(P4
,zB
);
373 polyP1
= _mm_add_pd(polyP1
,P3
);
374 polyP2
= _mm_add_pd(polyP2
,P2
);
375 polyP1
= _mm_mul_pd(polyP1
,zB
);
376 polyP2
= _mm_mul_pd(polyP2
,zB
);
377 polyP1
= _mm_add_pd(polyP1
,P1
);
378 polyP2
= _mm_add_pd(polyP2
,P0
);
379 polyP1
= _mm_mul_pd(polyP1
,xB
);
380 polyP1
= _mm_add_pd(polyP1
,polyP2
);
382 polyQ2
= _mm_mul_pd(Q4
,zB
);
383 polyQ1
= _mm_add_pd(zB
,Q3
);
384 polyQ2
= _mm_add_pd(polyQ2
,Q2
);
385 polyQ1
= _mm_mul_pd(polyQ1
,zB
);
386 polyQ2
= _mm_mul_pd(polyQ2
,zB
);
387 polyQ1
= _mm_add_pd(polyQ1
,Q1
);
388 polyQ2
= _mm_add_pd(polyQ2
,Q0
);
389 polyQ1
= _mm_mul_pd(polyQ1
,xB
);
390 polyQ1
= _mm_add_pd(polyQ1
,polyQ2
);
392 fexp
= _mm_and_pd(fexp
,_mm_cmpneq_pd(fexp
,_mm_setzero_pd()));
394 q
= _mm_mul_pd(polyP1
,gmx_mm_inv_pd(polyQ1
));
395 yB
= _mm_mul_pd(_mm_mul_pd(xB
,zB
),q
);
397 yB
= _mm_add_pd(yB
,_mm_mul_pd(corr1
,fexp
));
398 yB
= _mm_sub_pd(yB
,_mm_mul_pd(half
,zB
));
399 zB
= _mm_add_pd(xB
,yB
);
400 zB
= _mm_add_pd(zB
,_mm_mul_pd(corr2
,fexp
));
402 z
= _mm_blendv_pd( zB
,zA
,mask1
);
409 gmx_mm_erf_pd(__m128d x
)
411 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
412 const __m128d CAP4
= _mm_set1_pd(-0.431780540597889301512e-4);
413 const __m128d CAP3
= _mm_set1_pd(-0.00578562306260059236059);
414 const __m128d CAP2
= _mm_set1_pd(-0.028593586920219752446);
415 const __m128d CAP1
= _mm_set1_pd(-0.315924962948621698209);
416 const __m128d CAP0
= _mm_set1_pd(0.14952975608477029151);
418 const __m128d CAQ5
= _mm_set1_pd(-0.374089300177174709737e-5);
419 const __m128d CAQ4
= _mm_set1_pd(0.00015126584532155383535);
420 const __m128d CAQ3
= _mm_set1_pd(0.00536692680669480725423);
421 const __m128d CAQ2
= _mm_set1_pd(0.0668686825594046122636);
422 const __m128d CAQ1
= _mm_set1_pd(0.402604990869284362773);
424 const __m128d CAoffset
= _mm_set1_pd(0.9788494110107421875);
426 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
427 const __m128d CBP6
= _mm_set1_pd(2.49650423685462752497647637088e-10);
428 const __m128d CBP5
= _mm_set1_pd(0.00119770193298159629350136085658);
429 const __m128d CBP4
= _mm_set1_pd(0.0164944422378370965881008942733);
430 const __m128d CBP3
= _mm_set1_pd(0.0984581468691775932063932439252);
431 const __m128d CBP2
= _mm_set1_pd(0.317364595806937763843589437418);
432 const __m128d CBP1
= _mm_set1_pd(0.554167062641455850932670067075);
433 const __m128d CBP0
= _mm_set1_pd(0.427583576155807163756925301060);
434 const __m128d CBQ7
= _mm_set1_pd(0.00212288829699830145976198384930);
435 const __m128d CBQ6
= _mm_set1_pd(0.0334810979522685300554606393425);
436 const __m128d CBQ5
= _mm_set1_pd(0.2361713785181450957579508850717);
437 const __m128d CBQ4
= _mm_set1_pd(0.955364736493055670530981883072);
438 const __m128d CBQ3
= _mm_set1_pd(2.36815675631420037315349279199);
439 const __m128d CBQ2
= _mm_set1_pd(3.55261649184083035537184223542);
440 const __m128d CBQ1
= _mm_set1_pd(2.93501136050160872574376997993);
443 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
444 const __m128d CCP6
= _mm_set1_pd(-2.8175401114513378771);
445 const __m128d CCP5
= _mm_set1_pd(-3.22729451764143718517);
446 const __m128d CCP4
= _mm_set1_pd(-2.5518551727311523996);
447 const __m128d CCP3
= _mm_set1_pd(-0.687717681153649930619);
448 const __m128d CCP2
= _mm_set1_pd(-0.212652252872804219852);
449 const __m128d CCP1
= _mm_set1_pd(0.0175389834052493308818);
450 const __m128d CCP0
= _mm_set1_pd(0.00628057170626964891937);
452 const __m128d CCQ6
= _mm_set1_pd(5.48409182238641741584);
453 const __m128d CCQ5
= _mm_set1_pd(13.5064170191802889145);
454 const __m128d CCQ4
= _mm_set1_pd(22.9367376522880577224);
455 const __m128d CCQ3
= _mm_set1_pd(15.930646027911794143);
456 const __m128d CCQ2
= _mm_set1_pd(11.0567237927800161565);
457 const __m128d CCQ1
= _mm_set1_pd(2.79257750980575282228);
459 const __m128d CCoffset
= _mm_set1_pd(0.5579090118408203125);
461 const __m128d one
= _mm_set1_pd(1.0);
462 const __m128d two
= _mm_set1_pd(2.0);
464 const __m128d signbit
= gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
466 __m128d xabs
,x2
,x4
,t
,t2
,w
,w2
;
467 __m128d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
468 __m128d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
469 __m128d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
470 __m128d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
473 /* Calculate erf() */
474 xabs
= gmx_mm_abs_pd(x
);
475 x2
= _mm_mul_pd(x
,x
);
476 x4
= _mm_mul_pd(x2
,x2
);
478 PolyAP0
= _mm_mul_pd(CAP4
,x4
);
479 PolyAP1
= _mm_mul_pd(CAP3
,x4
);
480 PolyAP0
= _mm_add_pd(PolyAP0
,CAP2
);
481 PolyAP1
= _mm_add_pd(PolyAP1
,CAP1
);
482 PolyAP0
= _mm_mul_pd(PolyAP0
,x4
);
483 PolyAP1
= _mm_mul_pd(PolyAP1
,x2
);
484 PolyAP0
= _mm_add_pd(PolyAP0
,CAP0
);
485 PolyAP0
= _mm_add_pd(PolyAP0
,PolyAP1
);
487 PolyAQ1
= _mm_mul_pd(CAQ5
,x4
);
488 PolyAQ0
= _mm_mul_pd(CAQ4
,x4
);
489 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ3
);
490 PolyAQ0
= _mm_add_pd(PolyAQ0
,CAQ2
);
491 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x4
);
492 PolyAQ0
= _mm_mul_pd(PolyAQ0
,x4
);
493 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ1
);
494 PolyAQ0
= _mm_add_pd(PolyAQ0
,one
);
495 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x2
);
496 PolyAQ0
= _mm_add_pd(PolyAQ0
,PolyAQ1
);
498 res_erf
= _mm_mul_pd(PolyAP0
,gmx_mm_inv_pd(PolyAQ0
));
499 res_erf
= _mm_add_pd(CAoffset
,res_erf
);
500 res_erf
= _mm_mul_pd(x
,res_erf
);
502 /* Calculate erfc() in range [1,4.5] */
503 t
= _mm_sub_pd(xabs
,one
);
504 t2
= _mm_mul_pd(t
,t
);
506 PolyBP0
= _mm_mul_pd(CBP6
,t2
);
507 PolyBP1
= _mm_mul_pd(CBP5
,t2
);
508 PolyBP0
= _mm_add_pd(PolyBP0
,CBP4
);
509 PolyBP1
= _mm_add_pd(PolyBP1
,CBP3
);
510 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
511 PolyBP1
= _mm_mul_pd(PolyBP1
,t2
);
512 PolyBP0
= _mm_add_pd(PolyBP0
,CBP2
);
513 PolyBP1
= _mm_add_pd(PolyBP1
,CBP1
);
514 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
515 PolyBP1
= _mm_mul_pd(PolyBP1
,t
);
516 PolyBP0
= _mm_add_pd(PolyBP0
,CBP0
);
517 PolyBP0
= _mm_add_pd(PolyBP0
,PolyBP1
);
519 PolyBQ1
= _mm_mul_pd(CBQ7
,t2
);
520 PolyBQ0
= _mm_mul_pd(CBQ6
,t2
);
521 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ5
);
522 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ4
);
523 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
524 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
525 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ3
);
526 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ2
);
527 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
528 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
529 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ1
);
530 PolyBQ0
= _mm_add_pd(PolyBQ0
,one
);
531 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t
);
532 PolyBQ0
= _mm_add_pd(PolyBQ0
,PolyBQ1
);
534 res_erfcB
= _mm_mul_pd(PolyBP0
,gmx_mm_inv_pd(PolyBQ0
));
536 res_erfcB
= _mm_mul_pd(res_erfcB
,xabs
);
538 /* Calculate erfc() in range [4.5,inf] */
539 w
= gmx_mm_inv_pd(xabs
);
540 w2
= _mm_mul_pd(w
,w
);
542 PolyCP0
= _mm_mul_pd(CCP6
,w2
);
543 PolyCP1
= _mm_mul_pd(CCP5
,w2
);
544 PolyCP0
= _mm_add_pd(PolyCP0
,CCP4
);
545 PolyCP1
= _mm_add_pd(PolyCP1
,CCP3
);
546 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
547 PolyCP1
= _mm_mul_pd(PolyCP1
,w2
);
548 PolyCP0
= _mm_add_pd(PolyCP0
,CCP2
);
549 PolyCP1
= _mm_add_pd(PolyCP1
,CCP1
);
550 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
551 PolyCP1
= _mm_mul_pd(PolyCP1
,w
);
552 PolyCP0
= _mm_add_pd(PolyCP0
,CCP0
);
553 PolyCP0
= _mm_add_pd(PolyCP0
,PolyCP1
);
555 PolyCQ0
= _mm_mul_pd(CCQ6
,w2
);
556 PolyCQ1
= _mm_mul_pd(CCQ5
,w2
);
557 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ4
);
558 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ3
);
559 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
560 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w2
);
561 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ2
);
562 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ1
);
563 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
564 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w
);
565 PolyCQ0
= _mm_add_pd(PolyCQ0
,one
);
566 PolyCQ0
= _mm_add_pd(PolyCQ0
,PolyCQ1
);
568 expmx2
= gmx_mm_exp_pd( _mm_or_pd(signbit
, x2
) );
570 res_erfcC
= _mm_mul_pd(PolyCP0
,gmx_mm_inv_pd(PolyCQ0
));
571 res_erfcC
= _mm_add_pd(res_erfcC
,CCoffset
);
572 res_erfcC
= _mm_mul_pd(res_erfcC
,w
);
574 mask
= _mm_cmpgt_pd(xabs
,_mm_set1_pd(4.5));
575 res_erfc
= _mm_blendv_pd(res_erfcB
,res_erfcC
,mask
);
577 res_erfc
= _mm_mul_pd(res_erfc
,expmx2
);
579 /* erfc(x<0) = 2-erfc(|x|) */
580 mask
= _mm_cmplt_pd(x
,_mm_setzero_pd());
581 res_erfc
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(two
,res_erfc
),mask
);
583 /* Select erf() or erfc() */
584 mask
= _mm_cmplt_pd(xabs
,one
);
585 res
= _mm_blendv_pd(_mm_sub_pd(one
,res_erfc
),res_erf
,mask
);
592 gmx_mm_erfc_pd(__m128d x
)
594 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
595 const __m128d CAP4
= _mm_set1_pd(-0.431780540597889301512e-4);
596 const __m128d CAP3
= _mm_set1_pd(-0.00578562306260059236059);
597 const __m128d CAP2
= _mm_set1_pd(-0.028593586920219752446);
598 const __m128d CAP1
= _mm_set1_pd(-0.315924962948621698209);
599 const __m128d CAP0
= _mm_set1_pd(0.14952975608477029151);
601 const __m128d CAQ5
= _mm_set1_pd(-0.374089300177174709737e-5);
602 const __m128d CAQ4
= _mm_set1_pd(0.00015126584532155383535);
603 const __m128d CAQ3
= _mm_set1_pd(0.00536692680669480725423);
604 const __m128d CAQ2
= _mm_set1_pd(0.0668686825594046122636);
605 const __m128d CAQ1
= _mm_set1_pd(0.402604990869284362773);
607 const __m128d CAoffset
= _mm_set1_pd(0.9788494110107421875);
609 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
610 const __m128d CBP6
= _mm_set1_pd(2.49650423685462752497647637088e-10);
611 const __m128d CBP5
= _mm_set1_pd(0.00119770193298159629350136085658);
612 const __m128d CBP4
= _mm_set1_pd(0.0164944422378370965881008942733);
613 const __m128d CBP3
= _mm_set1_pd(0.0984581468691775932063932439252);
614 const __m128d CBP2
= _mm_set1_pd(0.317364595806937763843589437418);
615 const __m128d CBP1
= _mm_set1_pd(0.554167062641455850932670067075);
616 const __m128d CBP0
= _mm_set1_pd(0.427583576155807163756925301060);
617 const __m128d CBQ7
= _mm_set1_pd(0.00212288829699830145976198384930);
618 const __m128d CBQ6
= _mm_set1_pd(0.0334810979522685300554606393425);
619 const __m128d CBQ5
= _mm_set1_pd(0.2361713785181450957579508850717);
620 const __m128d CBQ4
= _mm_set1_pd(0.955364736493055670530981883072);
621 const __m128d CBQ3
= _mm_set1_pd(2.36815675631420037315349279199);
622 const __m128d CBQ2
= _mm_set1_pd(3.55261649184083035537184223542);
623 const __m128d CBQ1
= _mm_set1_pd(2.93501136050160872574376997993);
626 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
627 const __m128d CCP6
= _mm_set1_pd(-2.8175401114513378771);
628 const __m128d CCP5
= _mm_set1_pd(-3.22729451764143718517);
629 const __m128d CCP4
= _mm_set1_pd(-2.5518551727311523996);
630 const __m128d CCP3
= _mm_set1_pd(-0.687717681153649930619);
631 const __m128d CCP2
= _mm_set1_pd(-0.212652252872804219852);
632 const __m128d CCP1
= _mm_set1_pd(0.0175389834052493308818);
633 const __m128d CCP0
= _mm_set1_pd(0.00628057170626964891937);
635 const __m128d CCQ6
= _mm_set1_pd(5.48409182238641741584);
636 const __m128d CCQ5
= _mm_set1_pd(13.5064170191802889145);
637 const __m128d CCQ4
= _mm_set1_pd(22.9367376522880577224);
638 const __m128d CCQ3
= _mm_set1_pd(15.930646027911794143);
639 const __m128d CCQ2
= _mm_set1_pd(11.0567237927800161565);
640 const __m128d CCQ1
= _mm_set1_pd(2.79257750980575282228);
642 const __m128d CCoffset
= _mm_set1_pd(0.5579090118408203125);
644 const __m128d one
= _mm_set1_pd(1.0);
645 const __m128d two
= _mm_set1_pd(2.0);
647 const __m128d signbit
= gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
649 __m128d xabs
,x2
,x4
,t
,t2
,w
,w2
;
650 __m128d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
651 __m128d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
652 __m128d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
653 __m128d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
656 /* Calculate erf() */
657 xabs
= gmx_mm_abs_pd(x
);
658 x2
= _mm_mul_pd(x
,x
);
659 x4
= _mm_mul_pd(x2
,x2
);
661 PolyAP0
= _mm_mul_pd(CAP4
,x4
);
662 PolyAP1
= _mm_mul_pd(CAP3
,x4
);
663 PolyAP0
= _mm_add_pd(PolyAP0
,CAP2
);
664 PolyAP1
= _mm_add_pd(PolyAP1
,CAP1
);
665 PolyAP0
= _mm_mul_pd(PolyAP0
,x4
);
666 PolyAP1
= _mm_mul_pd(PolyAP1
,x2
);
667 PolyAP0
= _mm_add_pd(PolyAP0
,CAP0
);
668 PolyAP0
= _mm_add_pd(PolyAP0
,PolyAP1
);
670 PolyAQ1
= _mm_mul_pd(CAQ5
,x4
);
671 PolyAQ0
= _mm_mul_pd(CAQ4
,x4
);
672 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ3
);
673 PolyAQ0
= _mm_add_pd(PolyAQ0
,CAQ2
);
674 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x4
);
675 PolyAQ0
= _mm_mul_pd(PolyAQ0
,x4
);
676 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ1
);
677 PolyAQ0
= _mm_add_pd(PolyAQ0
,one
);
678 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x2
);
679 PolyAQ0
= _mm_add_pd(PolyAQ0
,PolyAQ1
);
681 res_erf
= _mm_mul_pd(PolyAP0
,gmx_mm_inv_pd(PolyAQ0
));
682 res_erf
= _mm_add_pd(CAoffset
,res_erf
);
683 res_erf
= _mm_mul_pd(x
,res_erf
);
685 /* Calculate erfc() in range [1,4.5] */
686 t
= _mm_sub_pd(xabs
,one
);
687 t2
= _mm_mul_pd(t
,t
);
689 PolyBP0
= _mm_mul_pd(CBP6
,t2
);
690 PolyBP1
= _mm_mul_pd(CBP5
,t2
);
691 PolyBP0
= _mm_add_pd(PolyBP0
,CBP4
);
692 PolyBP1
= _mm_add_pd(PolyBP1
,CBP3
);
693 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
694 PolyBP1
= _mm_mul_pd(PolyBP1
,t2
);
695 PolyBP0
= _mm_add_pd(PolyBP0
,CBP2
);
696 PolyBP1
= _mm_add_pd(PolyBP1
,CBP1
);
697 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
698 PolyBP1
= _mm_mul_pd(PolyBP1
,t
);
699 PolyBP0
= _mm_add_pd(PolyBP0
,CBP0
);
700 PolyBP0
= _mm_add_pd(PolyBP0
,PolyBP1
);
702 PolyBQ1
= _mm_mul_pd(CBQ7
,t2
);
703 PolyBQ0
= _mm_mul_pd(CBQ6
,t2
);
704 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ5
);
705 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ4
);
706 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
707 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
708 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ3
);
709 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ2
);
710 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
711 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
712 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ1
);
713 PolyBQ0
= _mm_add_pd(PolyBQ0
,one
);
714 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t
);
715 PolyBQ0
= _mm_add_pd(PolyBQ0
,PolyBQ1
);
717 res_erfcB
= _mm_mul_pd(PolyBP0
,gmx_mm_inv_pd(PolyBQ0
));
719 res_erfcB
= _mm_mul_pd(res_erfcB
,xabs
);
721 /* Calculate erfc() in range [4.5,inf] */
722 w
= gmx_mm_inv_pd(xabs
);
723 w2
= _mm_mul_pd(w
,w
);
725 PolyCP0
= _mm_mul_pd(CCP6
,w2
);
726 PolyCP1
= _mm_mul_pd(CCP5
,w2
);
727 PolyCP0
= _mm_add_pd(PolyCP0
,CCP4
);
728 PolyCP1
= _mm_add_pd(PolyCP1
,CCP3
);
729 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
730 PolyCP1
= _mm_mul_pd(PolyCP1
,w2
);
731 PolyCP0
= _mm_add_pd(PolyCP0
,CCP2
);
732 PolyCP1
= _mm_add_pd(PolyCP1
,CCP1
);
733 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
734 PolyCP1
= _mm_mul_pd(PolyCP1
,w
);
735 PolyCP0
= _mm_add_pd(PolyCP0
,CCP0
);
736 PolyCP0
= _mm_add_pd(PolyCP0
,PolyCP1
);
738 PolyCQ0
= _mm_mul_pd(CCQ6
,w2
);
739 PolyCQ1
= _mm_mul_pd(CCQ5
,w2
);
740 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ4
);
741 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ3
);
742 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
743 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w2
);
744 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ2
);
745 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ1
);
746 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
747 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w
);
748 PolyCQ0
= _mm_add_pd(PolyCQ0
,one
);
749 PolyCQ0
= _mm_add_pd(PolyCQ0
,PolyCQ1
);
751 expmx2
= gmx_mm_exp_pd( _mm_or_pd(signbit
, x2
) );
753 res_erfcC
= _mm_mul_pd(PolyCP0
,gmx_mm_inv_pd(PolyCQ0
));
754 res_erfcC
= _mm_add_pd(res_erfcC
,CCoffset
);
755 res_erfcC
= _mm_mul_pd(res_erfcC
,w
);
757 mask
= _mm_cmpgt_pd(xabs
,_mm_set1_pd(4.5));
758 res_erfc
= _mm_blendv_pd(res_erfcB
,res_erfcC
,mask
);
760 res_erfc
= _mm_mul_pd(res_erfc
,expmx2
);
762 /* erfc(x<0) = 2-erfc(|x|) */
763 mask
= _mm_cmplt_pd(x
,_mm_setzero_pd());
764 res_erfc
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(two
,res_erfc
),mask
);
766 /* Select erf() or erfc() */
767 mask
= _mm_cmplt_pd(xabs
,one
);
768 res
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(one
,res_erf
),mask
);
774 /* Calculate the force correction due to PME analytically.
776 * This routine is meant to enable analytical evaluation of the
777 * direct-space PME electrostatic force to avoid tables.
779 * The direct-space potential should be Erfc(beta*r)/r, but there
780 * are some problems evaluating that:
782 * First, the error function is difficult (read: expensive) to
783 * approxmiate accurately for intermediate to large arguments, and
784 * this happens already in ranges of beta*r that occur in simulations.
785 * Second, we now try to avoid calculating potentials in Gromacs but
786 * use forces directly.
788 * We can simply things slight by noting that the PME part is really
789 * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
791 * V= 1/r - Erf(beta*r)/r
793 * The first term we already have from the inverse square root, so
794 * that we can leave out of this routine.
796 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
797 * the argument beta*r will be in the range 0.15 to ~4. Use your
798 * favorite plotting program to realize how well-behaved Erf(z)/z is
801 * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
802 * However, it turns out it is more efficient to approximate f(z)/z and
803 * then only use even powers. This is another minor optimization, since
804 * we actually WANT f(z)/z, because it is going to be multiplied by
805 * the vector between the two atoms to get the vectorial force. The
806 * fastest flops are the ones we can avoid calculating!
808 * So, here's how it should be used:
811 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
812 * 3. Evaluate this routine with z^2 as the argument.
813 * 4. The return value is the expression:
817 * ------------ - --------
820 * 5. Multiply the entire expression by beta^3. This will get you
822 * beta^3*2*exp(-z^2) beta^3*erf(z)
823 * ------------------ - ---------------
826 * or, switching back to r (z=r*beta):
828 * 2*beta*exp(-r^2*beta^2) erf(r*beta)
829 * ----------------------- - -----------
833 * With a bit of math exercise you should be able to confirm that
834 * this is exactly D[Erf[beta*r]/r,r] divided by r another time.
836 * 6. Add the result to 1/r^3, multiply by the product of the charges,
837 * and you have your force (divided by r). A final multiplication
838 * with the vector connecting the two particles and you have your
839 * vectorial force to add to the particles.
843 gmx_mm_pmecorrF_pd(__m128d z2
)
845 const __m128d FN10
= _mm_set1_pd(-8.0072854618360083154e-14);
846 const __m128d FN9
= _mm_set1_pd(1.1859116242260148027e-11);
847 const __m128d FN8
= _mm_set1_pd(-8.1490406329798423616e-10);
848 const __m128d FN7
= _mm_set1_pd(3.4404793543907847655e-8);
849 const __m128d FN6
= _mm_set1_pd(-9.9471420832602741006e-7);
850 const __m128d FN5
= _mm_set1_pd(0.000020740315999115847456);
851 const __m128d FN4
= _mm_set1_pd(-0.00031991745139313364005);
852 const __m128d FN3
= _mm_set1_pd(0.0035074449373659008203);
853 const __m128d FN2
= _mm_set1_pd(-0.031750380176100813405);
854 const __m128d FN1
= _mm_set1_pd(0.13884101728898463426);
855 const __m128d FN0
= _mm_set1_pd(-0.75225277815249618847);
857 const __m128d FD5
= _mm_set1_pd(0.000016009278224355026701);
858 const __m128d FD4
= _mm_set1_pd(0.00051055686934806966046);
859 const __m128d FD3
= _mm_set1_pd(0.0081803507497974289008);
860 const __m128d FD2
= _mm_set1_pd(0.077181146026670287235);
861 const __m128d FD1
= _mm_set1_pd(0.41543303143712535988);
862 const __m128d FD0
= _mm_set1_pd(1.0);
865 __m128d polyFN0
,polyFN1
,polyFD0
,polyFD1
;
867 z4
= _mm_mul_pd(z2
,z2
);
869 polyFD1
= _mm_mul_pd(FD5
,z4
);
870 polyFD0
= _mm_mul_pd(FD4
,z4
);
871 polyFD1
= _mm_add_pd(polyFD1
,FD3
);
872 polyFD0
= _mm_add_pd(polyFD0
,FD2
);
873 polyFD1
= _mm_mul_pd(polyFD1
,z4
);
874 polyFD0
= _mm_mul_pd(polyFD0
,z4
);
875 polyFD1
= _mm_add_pd(polyFD1
,FD1
);
876 polyFD0
= _mm_add_pd(polyFD0
,FD0
);
877 polyFD1
= _mm_mul_pd(polyFD1
,z2
);
878 polyFD0
= _mm_add_pd(polyFD0
,polyFD1
);
880 polyFD0
= gmx_mm_inv_pd(polyFD0
);
882 polyFN0
= _mm_mul_pd(FN10
,z4
);
883 polyFN1
= _mm_mul_pd(FN9
,z4
);
884 polyFN0
= _mm_add_pd(polyFN0
,FN8
);
885 polyFN1
= _mm_add_pd(polyFN1
,FN7
);
886 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
887 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
888 polyFN0
= _mm_add_pd(polyFN0
,FN6
);
889 polyFN1
= _mm_add_pd(polyFN1
,FN5
);
890 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
891 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
892 polyFN0
= _mm_add_pd(polyFN0
,FN4
);
893 polyFN1
= _mm_add_pd(polyFN1
,FN3
);
894 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
895 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
896 polyFN0
= _mm_add_pd(polyFN0
,FN2
);
897 polyFN1
= _mm_add_pd(polyFN1
,FN1
);
898 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
899 polyFN1
= _mm_mul_pd(polyFN1
,z2
);
900 polyFN0
= _mm_add_pd(polyFN0
,FN0
);
901 polyFN0
= _mm_add_pd(polyFN0
,polyFN1
);
903 return _mm_mul_pd(polyFN0
,polyFD0
);
909 /* Calculate the potential correction due to PME analytically.
911 * See gmx_mm256_pmecorrF_ps() for details about the approximation.
913 * This routine calculates Erf(z)/z, although you should provide z^2
914 * as the input argument.
916 * Here's how it should be used:
919 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
920 * 3. Evaluate this routine with z^2 as the argument.
921 * 4. The return value is the expression:
928 * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
934 * 6. Add the result to 1/r, multiply by the product of the charges,
935 * and you have your potential.
939 gmx_mm_pmecorrV_pd(__m256d z2
)
941 const __m128d VN9
= _mm_set1_pd(-9.3723776169321855475e-13);
942 const __m128d VN8
= _mm_set1_pd(1.2280156762674215741e-10);
943 const __m128d VN7
= _mm_set1_pd(-7.3562157912251309487e-9);
944 const __m128d VN6
= _mm_set1_pd(2.6215886208032517509e-7);
945 const __m128d VN5
= _mm_set1_pd(-4.9532491651265819499e-6);
946 const __m128d VN4
= _mm_set1_pd(0.00025907400778966060389);
947 const __m128d VN3
= _mm_set1_pd(0.0010585044856156469792);
948 const __m128d VN2
= _mm_set1_pd(0.045247661136833092885);
949 const __m128d VN1
= _mm_set1_pd(0.11643931522926034421);
950 const __m128d VN0
= _mm_set1_pd(1.1283791671726767970);
952 const __m128d VD5
= _mm_set1_pd(0.000021784709867336150342);
953 const __m128d VD4
= _mm_set1_pd(0.00064293662010911388448);
954 const __m128d VD3
= _mm_set1_pd(0.0096311444822588683504);
955 const __m128d VD2
= _mm_set1_pd(0.085608012351550627051);
956 const __m128d VD1
= _mm_set1_pd(0.43652499166614811084);
957 const __m128d VD0
= _mm_set1_pd(1.0);
960 __m128d polyVN0
,polyVN1
,polyVD0
,polyVD1
;
962 z4
= _mm_mul_pd(z2
,z2
);
964 polyVD1
= _mm_mul_pd(VD5
,z4
);
965 polyVD0
= _mm_mul_pd(VD4
,z4
);
966 polyVD1
= _mm_add_pd(polyVD1
,VD3
);
967 polyVD0
= _mm_add_pd(polyVD0
,VD2
);
968 polyVD1
= _mm_mul_pd(polyVD1
,z4
);
969 polyVD0
= _mm_mul_pd(polyVD0
,z4
);
970 polyVD1
= _mm_add_pd(polyVD1
,VD1
);
971 polyVD0
= _mm_add_pd(polyVD0
,VD0
);
972 polyVD1
= _mm_mul_pd(polyVD1
,z2
);
973 polyVD0
= _mm_add_pd(polyVD0
,polyVD1
);
975 polyVD0
= gmx_mm_inv_pd(polyVD0
);
977 polyVN1
= _mm_mul_pd(VN9
,z4
);
978 polyVN0
= _mm_mul_pd(VN8
,z4
);
979 polyVN1
= _mm_add_pd(polyVN1
,VN7
);
980 polyVN0
= _mm_add_pd(polyVN0
,VN6
);
981 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
982 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
983 polyVN1
= _mm_add_pd(polyVN1
,VN5
);
984 polyVN0
= _mm_add_pd(polyVN0
,VN4
);
985 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
986 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
987 polyVN1
= _mm_add_pd(polyVN1
,VN3
);
988 polyVN0
= _mm_add_pd(polyVN0
,VN2
);
989 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
990 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
991 polyVN1
= _mm_add_pd(polyVN1
,VN1
);
992 polyVN0
= _mm_add_pd(polyVN0
,VN0
);
993 polyVN1
= _mm_mul_pd(polyVN1
,z2
);
994 polyVN0
= _mm_add_pd(polyVN0
,polyVN1
);
996 return _mm_mul_pd(polyVN0
,polyVD0
);
1001 gmx_mm_sincos_pd(__m128d x
,
1006 __declspec(align(16))
1007 const double sintable
[34] =
1009 1.00000000000000000e+00 , 0.00000000000000000e+00 ,
1010 9.95184726672196929e-01 , 9.80171403295606036e-02 ,
1011 9.80785280403230431e-01 , 1.95090322016128248e-01 ,
1012 9.56940335732208824e-01 , 2.90284677254462331e-01 ,
1013 9.23879532511286738e-01 , 3.82683432365089782e-01 ,
1014 8.81921264348355050e-01 , 4.71396736825997642e-01 ,
1015 8.31469612302545236e-01 , 5.55570233019602178e-01 ,
1016 7.73010453362736993e-01 , 6.34393284163645488e-01 ,
1017 7.07106781186547573e-01 , 7.07106781186547462e-01 ,
1018 6.34393284163645599e-01 , 7.73010453362736882e-01 ,
1019 5.55570233019602289e-01 , 8.31469612302545125e-01 ,
1020 4.71396736825997809e-01 , 8.81921264348354939e-01 ,
1021 3.82683432365089837e-01 , 9.23879532511286738e-01 ,
1022 2.90284677254462276e-01 , 9.56940335732208935e-01 ,
1023 1.95090322016128304e-01 , 9.80785280403230431e-01 ,
1024 9.80171403295607702e-02 , 9.95184726672196818e-01 ,
1025 0.0 , 1.00000000000000000e+00
1028 const __m128d sintable
[17] =
1030 _mm_set_pd( 0.0 , 1.0 ),
1031 _mm_set_pd( sin( 1.0 * (M_PI
/2.0) / 16.0) , cos( 1.0 * (M_PI
/2.0) / 16.0) ),
1032 _mm_set_pd( sin( 2.0 * (M_PI
/2.0) / 16.0) , cos( 2.0 * (M_PI
/2.0) / 16.0) ),
1033 _mm_set_pd( sin( 3.0 * (M_PI
/2.0) / 16.0) , cos( 3.0 * (M_PI
/2.0) / 16.0) ),
1034 _mm_set_pd( sin( 4.0 * (M_PI
/2.0) / 16.0) , cos( 4.0 * (M_PI
/2.0) / 16.0) ),
1035 _mm_set_pd( sin( 5.0 * (M_PI
/2.0) / 16.0) , cos( 5.0 * (M_PI
/2.0) / 16.0) ),
1036 _mm_set_pd( sin( 6.0 * (M_PI
/2.0) / 16.0) , cos( 6.0 * (M_PI
/2.0) / 16.0) ),
1037 _mm_set_pd( sin( 7.0 * (M_PI
/2.0) / 16.0) , cos( 7.0 * (M_PI
/2.0) / 16.0) ),
1038 _mm_set_pd( sin( 8.0 * (M_PI
/2.0) / 16.0) , cos( 8.0 * (M_PI
/2.0) / 16.0) ),
1039 _mm_set_pd( sin( 9.0 * (M_PI
/2.0) / 16.0) , cos( 9.0 * (M_PI
/2.0) / 16.0) ),
1040 _mm_set_pd( sin( 10.0 * (M_PI
/2.0) / 16.0) , cos( 10.0 * (M_PI
/2.0) / 16.0) ),
1041 _mm_set_pd( sin( 11.0 * (M_PI
/2.0) / 16.0) , cos( 11.0 * (M_PI
/2.0) / 16.0) ),
1042 _mm_set_pd( sin( 12.0 * (M_PI
/2.0) / 16.0) , cos( 12.0 * (M_PI
/2.0) / 16.0) ),
1043 _mm_set_pd( sin( 13.0 * (M_PI
/2.0) / 16.0) , cos( 13.0 * (M_PI
/2.0) / 16.0) ),
1044 _mm_set_pd( sin( 14.0 * (M_PI
/2.0) / 16.0) , cos( 14.0 * (M_PI
/2.0) / 16.0) ),
1045 _mm_set_pd( sin( 15.0 * (M_PI
/2.0) / 16.0) , cos( 15.0 * (M_PI
/2.0) / 16.0) ),
1046 _mm_set_pd( 1.0 , 0.0 )
1050 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1051 const __m128i signbit_epi32
= _mm_set1_epi32(0x80000000);
1053 const __m128d tabscale
= _mm_set1_pd(32.0/M_PI
);
1054 const __m128d invtabscale0
= _mm_set1_pd(9.81747508049011230469e-02);
1055 const __m128d invtabscale1
= _mm_set1_pd(1.96197799156550576057e-08);
1056 const __m128i ione
= _mm_set1_epi32(1);
1057 const __m128i i32
= _mm_set1_epi32(32);
1058 const __m128i i16
= _mm_set1_epi32(16);
1059 const __m128i tabmask
= _mm_set1_epi32(0x3F);
1060 const __m128d sinP7
= _mm_set1_pd(-1.0/5040.0);
1061 const __m128d sinP5
= _mm_set1_pd(1.0/120.0);
1062 const __m128d sinP3
= _mm_set1_pd(-1.0/6.0);
1063 const __m128d sinP1
= _mm_set1_pd(1.0);
1065 const __m128d cosP6
= _mm_set1_pd(-1.0/720.0);
1066 const __m128d cosP4
= _mm_set1_pd(1.0/24.0);
1067 const __m128d cosP2
= _mm_set1_pd(-1.0/2.0);
1068 const __m128d cosP0
= _mm_set1_pd(1.0);
1071 __m128i tabidx
,corridx
;
1072 __m128d xabs
,z
,z2
,polySin
,polyCos
;
1074 __m128d ypoint0
,ypoint1
;
1076 __m128d sinpoint
,cospoint
;
1077 __m128d xsign
,ssign
,csign
;
1078 __m128i imask
,sswapsign
,cswapsign
;
1081 xsign
= _mm_andnot_pd(signmask
,x
);
1082 xabs
= _mm_and_pd(x
,signmask
);
1084 scalex
= _mm_mul_pd(tabscale
,xabs
);
1085 tabidx
= _mm_cvtpd_epi32(scalex
);
1087 xpoint
= _mm_round_pd(scalex
,_MM_FROUND_TO_NEAREST_INT
);
1089 /* Extended precision arithmetics */
1090 z
= _mm_sub_pd(xabs
,_mm_mul_pd(invtabscale0
,xpoint
));
1091 z
= _mm_sub_pd(z
,_mm_mul_pd(invtabscale1
,xpoint
));
1093 /* Range reduction to 0..2*Pi */
1094 tabidx
= _mm_and_si128(tabidx
,tabmask
);
1096 /* tabidx is now in range [0,..,64] */
1097 imask
= _mm_cmpgt_epi32(tabidx
,i32
);
1100 corridx
= _mm_and_si128(imask
,i32
);
1101 tabidx
= _mm_sub_epi32(tabidx
,corridx
);
1103 /* tabidx is now in range [0..32] */
1104 imask
= _mm_cmpgt_epi32(tabidx
,i16
);
1105 cswapsign
= _mm_xor_si128(cswapsign
,imask
);
1106 corridx
= _mm_sub_epi32(i32
,tabidx
);
1107 tabidx
= _mm_blendv_epi8(tabidx
,corridx
,imask
);
1108 /* tabidx is now in range [0..16] */
1109 ssign
= _mm_cvtepi32_pd( _mm_or_si128( sswapsign
, ione
) );
1110 csign
= _mm_cvtepi32_pd( _mm_or_si128( cswapsign
, ione
) );
1113 ypoint0
= _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,0));
1114 ypoint1
= _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,1));
1116 ypoint0
= sintable
[_mm_extract_epi32(tabidx
,0)];
1117 ypoint1
= sintable
[_mm_extract_epi32(tabidx
,1)];
1119 sinpoint
= _mm_unpackhi_pd(ypoint0
,ypoint1
);
1120 cospoint
= _mm_unpacklo_pd(ypoint0
,ypoint1
);
1122 sinpoint
= _mm_mul_pd(sinpoint
,ssign
);
1123 cospoint
= _mm_mul_pd(cospoint
,csign
);
1125 z2
= _mm_mul_pd(z
,z
);
1127 polySin
= _mm_mul_pd(sinP7
,z2
);
1128 polySin
= _mm_add_pd(polySin
,sinP5
);
1129 polySin
= _mm_mul_pd(polySin
,z2
);
1130 polySin
= _mm_add_pd(polySin
,sinP3
);
1131 polySin
= _mm_mul_pd(polySin
,z2
);
1132 polySin
= _mm_add_pd(polySin
,sinP1
);
1133 polySin
= _mm_mul_pd(polySin
,z
);
1135 polyCos
= _mm_mul_pd(cosP6
,z2
);
1136 polyCos
= _mm_add_pd(polyCos
,cosP4
);
1137 polyCos
= _mm_mul_pd(polyCos
,z2
);
1138 polyCos
= _mm_add_pd(polyCos
,cosP2
);
1139 polyCos
= _mm_mul_pd(polyCos
,z2
);
1140 polyCos
= _mm_add_pd(polyCos
,cosP0
);
1142 *sinval
= _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint
,polyCos
) , _mm_mul_pd(cospoint
,polySin
) ),xsign
);
1143 *cosval
= _mm_sub_pd( _mm_mul_pd(cospoint
,polyCos
) , _mm_mul_pd(sinpoint
,polySin
) );
1149 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1150 * will then call the sincos() routine and waste a factor 2 in performance!
1153 gmx_mm_sin_pd(__m128d x
)
1156 gmx_mm_sincos_pd(x
,&s
,&c
);
1161 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1162 * will then call the sincos() routine and waste a factor 2 in performance!
1165 gmx_mm_cos_pd(__m128d x
)
1168 gmx_mm_sincos_pd(x
,&s
,&c
);
1175 gmx_mm_tan_pd(__m128d x
)
1177 __m128d sinval
,cosval
;
1180 gmx_mm_sincos_pd(x
,&sinval
,&cosval
);
1182 tanval
= _mm_mul_pd(sinval
,gmx_mm_inv_pd(cosval
));
1190 gmx_mm_asin_pd(__m128d x
)
1192 /* Same algorithm as cephes library */
1193 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1194 const __m128d limit1
= _mm_set1_pd(0.625);
1195 const __m128d limit2
= _mm_set1_pd(1e-8);
1196 const __m128d one
= _mm_set1_pd(1.0);
1197 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
1198 const __m128d quarterpi
= _mm_set1_pd(M_PI
/4.0);
1199 const __m128d morebits
= _mm_set1_pd(6.123233995736765886130e-17);
1201 const __m128d P5
= _mm_set1_pd(4.253011369004428248960e-3);
1202 const __m128d P4
= _mm_set1_pd(-6.019598008014123785661e-1);
1203 const __m128d P3
= _mm_set1_pd(5.444622390564711410273e0
);
1204 const __m128d P2
= _mm_set1_pd(-1.626247967210700244449e1
);
1205 const __m128d P1
= _mm_set1_pd(1.956261983317594739197e1
);
1206 const __m128d P0
= _mm_set1_pd(-8.198089802484824371615e0
);
1208 const __m128d Q4
= _mm_set1_pd(-1.474091372988853791896e1
);
1209 const __m128d Q3
= _mm_set1_pd(7.049610280856842141659e1
);
1210 const __m128d Q2
= _mm_set1_pd(-1.471791292232726029859e2
);
1211 const __m128d Q1
= _mm_set1_pd(1.395105614657485689735e2
);
1212 const __m128d Q0
= _mm_set1_pd(-4.918853881490881290097e1
);
1214 const __m128d R4
= _mm_set1_pd(2.967721961301243206100e-3);
1215 const __m128d R3
= _mm_set1_pd(-5.634242780008963776856e-1);
1216 const __m128d R2
= _mm_set1_pd(6.968710824104713396794e0
);
1217 const __m128d R1
= _mm_set1_pd(-2.556901049652824852289e1
);
1218 const __m128d R0
= _mm_set1_pd(2.853665548261061424989e1
);
1220 const __m128d S3
= _mm_set1_pd(-2.194779531642920639778e1
);
1221 const __m128d S2
= _mm_set1_pd(1.470656354026814941758e2
);
1222 const __m128d S1
= _mm_set1_pd(-3.838770957603691357202e2
);
1223 const __m128d S0
= _mm_set1_pd(3.424398657913078477438e2
);
1228 __m128d zz
,ww
,z
,q
,w
,y
,zz2
,ww2
;
1235 sign
= _mm_andnot_pd(signmask
,x
);
1236 xabs
= _mm_and_pd(x
,signmask
);
1238 mask
= _mm_cmpgt_pd(xabs
,limit1
);
1240 zz
= _mm_sub_pd(one
,xabs
);
1241 ww
= _mm_mul_pd(xabs
,xabs
);
1242 zz2
= _mm_mul_pd(zz
,zz
);
1243 ww2
= _mm_mul_pd(ww
,ww
);
1246 RA
= _mm_mul_pd(R4
,zz2
);
1247 RB
= _mm_mul_pd(R3
,zz2
);
1248 RA
= _mm_add_pd(RA
,R2
);
1249 RB
= _mm_add_pd(RB
,R1
);
1250 RA
= _mm_mul_pd(RA
,zz2
);
1251 RB
= _mm_mul_pd(RB
,zz
);
1252 RA
= _mm_add_pd(RA
,R0
);
1253 RA
= _mm_add_pd(RA
,RB
);
1256 SB
= _mm_mul_pd(S3
,zz2
);
1257 SA
= _mm_add_pd(zz2
,S2
);
1258 SB
= _mm_add_pd(SB
,S1
);
1259 SA
= _mm_mul_pd(SA
,zz2
);
1260 SB
= _mm_mul_pd(SB
,zz
);
1261 SA
= _mm_add_pd(SA
,S0
);
1262 SA
= _mm_add_pd(SA
,SB
);
1265 PA
= _mm_mul_pd(P5
,ww2
);
1266 PB
= _mm_mul_pd(P4
,ww2
);
1267 PA
= _mm_add_pd(PA
,P3
);
1268 PB
= _mm_add_pd(PB
,P2
);
1269 PA
= _mm_mul_pd(PA
,ww2
);
1270 PB
= _mm_mul_pd(PB
,ww2
);
1271 PA
= _mm_add_pd(PA
,P1
);
1272 PB
= _mm_add_pd(PB
,P0
);
1273 PA
= _mm_mul_pd(PA
,ww
);
1274 PA
= _mm_add_pd(PA
,PB
);
1277 QB
= _mm_mul_pd(Q4
,ww2
);
1278 QA
= _mm_add_pd(ww2
,Q3
);
1279 QB
= _mm_add_pd(QB
,Q2
);
1280 QA
= _mm_mul_pd(QA
,ww2
);
1281 QB
= _mm_mul_pd(QB
,ww2
);
1282 QA
= _mm_add_pd(QA
,Q1
);
1283 QB
= _mm_add_pd(QB
,Q0
);
1284 QA
= _mm_mul_pd(QA
,ww
);
1285 QA
= _mm_add_pd(QA
,QB
);
1287 RA
= _mm_mul_pd(RA
,zz
);
1288 PA
= _mm_mul_pd(PA
,ww
);
1290 nom
= _mm_blendv_pd( PA
,RA
,mask
);
1291 denom
= _mm_blendv_pd( QA
,SA
,mask
);
1293 q
= _mm_mul_pd( nom
, gmx_mm_inv_pd(denom
) );
1295 zz
= _mm_add_pd(zz
,zz
);
1296 zz
= gmx_mm_sqrt_pd(zz
);
1297 z
= _mm_sub_pd(quarterpi
,zz
);
1298 zz
= _mm_mul_pd(zz
,q
);
1299 zz
= _mm_sub_pd(zz
,morebits
);
1300 z
= _mm_sub_pd(z
,zz
);
1301 z
= _mm_add_pd(z
,quarterpi
);
1303 w
= _mm_mul_pd(xabs
,q
);
1304 w
= _mm_add_pd(w
,xabs
);
1306 z
= _mm_blendv_pd( w
,z
,mask
);
1308 mask
= _mm_cmpgt_pd(xabs
,limit2
);
1309 z
= _mm_blendv_pd( xabs
,z
,mask
);
1311 z
= _mm_xor_pd(z
,sign
);
1318 gmx_mm_acos_pd(__m128d x
)
1320 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1321 const __m128d one
= _mm_set1_pd(1.0);
1322 const __m128d half
= _mm_set1_pd(0.5);
1323 const __m128d pi
= _mm_set1_pd(M_PI
);
1324 const __m128d quarterpi0
= _mm_set1_pd(7.85398163397448309616e-1);
1325 const __m128d quarterpi1
= _mm_set1_pd(6.123233995736765886130e-17);
1332 mask1
= _mm_cmpgt_pd(x
,half
);
1333 z1
= _mm_mul_pd(half
,_mm_sub_pd(one
,x
));
1334 z1
= gmx_mm_sqrt_pd(z1
);
1335 z
= _mm_blendv_pd( x
,z1
,mask1
);
1337 z
= gmx_mm_asin_pd(z
);
1339 z1
= _mm_add_pd(z
,z
);
1341 z2
= _mm_sub_pd(quarterpi0
,z
);
1342 z2
= _mm_add_pd(z2
,quarterpi1
);
1343 z2
= _mm_add_pd(z2
,quarterpi0
);
1345 z
= _mm_blendv_pd(z2
,z1
,mask1
);
1351 gmx_mm_atan_pd(__m128d x
)
1353 /* Same algorithm as cephes library */
1354 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1355 const __m128d limit1
= _mm_set1_pd(0.66);
1356 const __m128d limit2
= _mm_set1_pd(2.41421356237309504880);
1357 const __m128d quarterpi
= _mm_set1_pd(M_PI
/4.0);
1358 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
1359 const __m128d mone
= _mm_set1_pd(-1.0);
1360 const __m128d morebits1
= _mm_set1_pd(0.5*6.123233995736765886130E-17);
1361 const __m128d morebits2
= _mm_set1_pd(6.123233995736765886130E-17);
1363 const __m128d P4
= _mm_set1_pd(-8.750608600031904122785E-1);
1364 const __m128d P3
= _mm_set1_pd(-1.615753718733365076637E1
);
1365 const __m128d P2
= _mm_set1_pd(-7.500855792314704667340E1
);
1366 const __m128d P1
= _mm_set1_pd(-1.228866684490136173410E2
);
1367 const __m128d P0
= _mm_set1_pd(-6.485021904942025371773E1
);
1369 const __m128d Q4
= _mm_set1_pd(2.485846490142306297962E1
);
1370 const __m128d Q3
= _mm_set1_pd(1.650270098316988542046E2
);
1371 const __m128d Q2
= _mm_set1_pd(4.328810604912902668951E2
);
1372 const __m128d Q1
= _mm_set1_pd(4.853903996359136964868E2
);
1373 const __m128d Q0
= _mm_set1_pd(1.945506571482613964425E2
);
1376 __m128d mask1
,mask2
;
1379 __m128d P_A
,P_B
,Q_A
,Q_B
;
1381 sign
= _mm_andnot_pd(signmask
,x
);
1382 x
= _mm_and_pd(x
,signmask
);
1384 mask1
= _mm_cmpgt_pd(x
,limit1
);
1385 mask2
= _mm_cmpgt_pd(x
,limit2
);
1387 t1
= _mm_mul_pd(_mm_add_pd(x
,mone
),gmx_mm_inv_pd(_mm_sub_pd(x
,mone
)));
1388 t2
= _mm_mul_pd(mone
,gmx_mm_inv_pd(x
));
1390 y
= _mm_and_pd(mask1
,quarterpi
);
1391 y
= _mm_or_pd( _mm_and_pd(mask2
,halfpi
) , _mm_andnot_pd(mask2
,y
) );
1393 x
= _mm_or_pd( _mm_and_pd(mask1
,t1
) , _mm_andnot_pd(mask1
,x
) );
1394 x
= _mm_or_pd( _mm_and_pd(mask2
,t2
) , _mm_andnot_pd(mask2
,x
) );
1396 z
= _mm_mul_pd(x
,x
);
1397 z2
= _mm_mul_pd(z
,z
);
1399 P_A
= _mm_mul_pd(P4
,z2
);
1400 P_B
= _mm_mul_pd(P3
,z2
);
1401 P_A
= _mm_add_pd(P_A
,P2
);
1402 P_B
= _mm_add_pd(P_B
,P1
);
1403 P_A
= _mm_mul_pd(P_A
,z2
);
1404 P_B
= _mm_mul_pd(P_B
,z
);
1405 P_A
= _mm_add_pd(P_A
,P0
);
1406 P_A
= _mm_add_pd(P_A
,P_B
);
1409 Q_B
= _mm_mul_pd(Q4
,z2
);
1410 Q_A
= _mm_add_pd(z2
,Q3
);
1411 Q_B
= _mm_add_pd(Q_B
,Q2
);
1412 Q_A
= _mm_mul_pd(Q_A
,z2
);
1413 Q_B
= _mm_mul_pd(Q_B
,z2
);
1414 Q_A
= _mm_add_pd(Q_A
,Q1
);
1415 Q_B
= _mm_add_pd(Q_B
,Q0
);
1416 Q_A
= _mm_mul_pd(Q_A
,z
);
1417 Q_A
= _mm_add_pd(Q_A
,Q_B
);
1419 z
= _mm_mul_pd(z
,P_A
);
1420 z
= _mm_mul_pd(z
,gmx_mm_inv_pd(Q_A
));
1421 z
= _mm_mul_pd(z
,x
);
1422 z
= _mm_add_pd(z
,x
);
1424 t1
= _mm_and_pd(mask1
,morebits1
);
1425 t1
= _mm_or_pd( _mm_and_pd(mask2
,morebits2
) , _mm_andnot_pd(mask2
,t1
) );
1427 z
= _mm_add_pd(z
,t1
);
1428 y
= _mm_add_pd(y
,z
);
1430 y
= _mm_xor_pd(y
,sign
);
1437 gmx_mm_atan2_pd(__m128d y
, __m128d x
)
1439 const __m128d pi
= _mm_set1_pd(M_PI
);
1440 const __m128d minuspi
= _mm_set1_pd(-M_PI
);
1441 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
1442 const __m128d minushalfpi
= _mm_set1_pd(-M_PI
/2.0);
1446 __m128d maskx_lt
,maskx_eq
;
1447 __m128d masky_lt
,masky_eq
;
1448 __m128d mask1
,mask2
,mask3
,mask4
,maskall
;
1450 maskx_lt
= _mm_cmplt_pd(x
,_mm_setzero_pd());
1451 masky_lt
= _mm_cmplt_pd(y
,_mm_setzero_pd());
1452 maskx_eq
= _mm_cmpeq_pd(x
,_mm_setzero_pd());
1453 masky_eq
= _mm_cmpeq_pd(y
,_mm_setzero_pd());
1455 z
= _mm_mul_pd(y
,gmx_mm_inv_pd(x
));
1456 z
= gmx_mm_atan_pd(z
);
1458 mask1
= _mm_and_pd(maskx_eq
,masky_lt
);
1459 mask2
= _mm_andnot_pd(maskx_lt
,masky_eq
);
1460 mask3
= _mm_andnot_pd( _mm_or_pd(masky_lt
,masky_eq
) , maskx_eq
);
1461 mask4
= _mm_and_pd(masky_eq
,maskx_lt
);
1463 maskall
= _mm_or_pd( _mm_or_pd(mask1
,mask2
), _mm_or_pd(mask3
,mask4
) );
1465 z
= _mm_andnot_pd(maskall
,z
);
1466 z1
= _mm_and_pd(mask1
,minushalfpi
);
1467 z3
= _mm_and_pd(mask3
,halfpi
);
1468 z4
= _mm_and_pd(mask4
,pi
);
1470 z
= _mm_or_pd( _mm_or_pd(z
,z1
), _mm_or_pd(z3
,z4
) );
1472 w
= _mm_blendv_pd(pi
,minuspi
,masky_lt
);
1473 w
= _mm_and_pd(w
,maskx_lt
);
1475 w
= _mm_andnot_pd(maskall
,w
);
1477 z
= _mm_add_pd(z
,w
);
1482 #endif /*_gmx_math_x86_sse4_1_double_h_ */