Use git snapshots of the tests if are on git
[gromacs.git] / include / gmx_math_x86_sse4_1_double.h
blobcf2afeee844a6750843fd96e085111e95a1ff2d0
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6 * others, as listed in the AUTHORS file in the top-level source
7 * directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _gmx_math_x86_sse4_1_double_h_
36 #define _gmx_math_x86_sse4_1_double_h_
38 #include <stdio.h>
39 #include <math.h>
41 #include "gmx_x86_sse4_1.h"
45 #ifndef M_PI
46 # define M_PI 3.14159265358979323846264338327950288
47 #endif
49 /************************
50 * *
51 * Simple math routines *
52 * *
53 ************************/
55 /* 1.0/sqrt(x) */
56 static gmx_inline __m128d
57 gmx_mm_invsqrt_pd(__m128d x)
59 const __m128d half = _mm_set1_pd(0.5);
60 const __m128d three = _mm_set1_pd(3.0);
62 /* Lookup instruction only exists in single precision, convert back and forth... */
63 __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
65 lu = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu, lu), x)), lu));
66 return _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu, lu), x)), lu));
69 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
70 static void
71 gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
73 const __m128d half = _mm_set1_pd(0.5);
74 const __m128d three = _mm_set1_pd(3.0);
75 const __m128 halff = _mm_set1_ps(0.5f);
76 const __m128 threef = _mm_set1_ps(3.0f);
78 __m128 xf, luf;
79 __m128d lu1, lu2;
81 /* Do first N-R step in float for 2x throughput */
82 xf = _mm_shuffle_ps(_mm_cvtpd_ps(x1), _mm_cvtpd_ps(x2), _MM_SHUFFLE(1, 0, 1, 0));
83 luf = _mm_rsqrt_ps(xf);
84 luf = _mm_mul_ps(halff, _mm_mul_ps(_mm_sub_ps(threef, _mm_mul_ps(_mm_mul_ps(luf, luf), xf)), luf));
86 lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf, luf, _MM_SHUFFLE(3, 2, 3, 2)));
87 lu1 = _mm_cvtps_pd(luf);
89 *invsqrt1 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu1, lu1), x1)), lu1));
90 *invsqrt2 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu2, lu2), x2)), lu2));
93 /* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
94 static gmx_inline __m128d
95 gmx_mm_sqrt_pd(__m128d x)
97 __m128d mask;
98 __m128d res;
100 mask = _mm_cmpeq_pd(x, _mm_setzero_pd());
101 res = _mm_andnot_pd(mask, gmx_mm_invsqrt_pd(x));
103 res = _mm_mul_pd(x, res);
105 return res;
108 /* 1.0/x */
109 static gmx_inline __m128d
110 gmx_mm_inv_pd(__m128d x)
112 const __m128d two = _mm_set1_pd(2.0);
114 /* Lookup instruction only exists in single precision, convert back and forth... */
115 __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
117 /* Perform two N-R steps for double precision */
118 lu = _mm_mul_pd(lu, _mm_sub_pd(two, _mm_mul_pd(x, lu)));
119 return _mm_mul_pd(lu, _mm_sub_pd(two, _mm_mul_pd(x, lu)));
122 static gmx_inline __m128d
123 gmx_mm_abs_pd(__m128d x)
125 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
127 return _mm_and_pd(x, signmask);
132 * 2^x function.
134 * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
135 * [-0.5,0.5].
137 * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
138 * according to the same algorithm as used in the Cephes/netlib math routines.
140 static __m128d
141 gmx_mm_exp2_pd(__m128d x)
143 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
144 const __m128d arglimit = _mm_set1_pd(1022.0);
145 const __m128i expbase = _mm_set1_epi32(1023);
147 const __m128d P2 = _mm_set1_pd(2.30933477057345225087e-2);
148 const __m128d P1 = _mm_set1_pd(2.02020656693165307700e1);
149 const __m128d P0 = _mm_set1_pd(1.51390680115615096133e3);
150 /* Q2 == 1.0 */
151 const __m128d Q1 = _mm_set1_pd(2.33184211722314911771e2);
152 const __m128d Q0 = _mm_set1_pd(4.36821166879210612817e3);
153 const __m128d one = _mm_set1_pd(1.0);
154 const __m128d two = _mm_set1_pd(2.0);
156 __m128d valuemask;
157 __m128i iexppart;
158 __m128d fexppart;
159 __m128d intpart;
160 __m128d z, z2;
161 __m128d PolyP, PolyQ;
163 iexppart = _mm_cvtpd_epi32(x);
164 intpart = _mm_round_pd(x, _MM_FROUND_TO_NEAREST_INT);
166 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
167 * To be able to shift it into the exponent for a double precision number we first need to
168 * shuffle so that the lower half contains the first element, and the upper half the second.
169 * This should really be done as a zero-extension, but since the next instructions will shift
170 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
171 * (thus we just use element 2 from iexppart).
173 iexppart = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
175 /* Do the shift operation on the 64-bit registers */
176 iexppart = _mm_add_epi32(iexppart, expbase);
177 iexppart = _mm_slli_epi64(iexppart, 52);
179 valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
180 fexppart = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
182 z = _mm_sub_pd(x, intpart);
183 z2 = _mm_mul_pd(z, z);
185 PolyP = _mm_mul_pd(P2, z2);
186 PolyP = _mm_add_pd(PolyP, P1);
187 PolyQ = _mm_add_pd(z2, Q1);
188 PolyP = _mm_mul_pd(PolyP, z2);
189 PolyQ = _mm_mul_pd(PolyQ, z2);
190 PolyP = _mm_add_pd(PolyP, P0);
191 PolyQ = _mm_add_pd(PolyQ, Q0);
192 PolyP = _mm_mul_pd(PolyP, z);
194 z = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
195 z = _mm_add_pd(one, _mm_mul_pd(two, z));
197 z = _mm_mul_pd(z, fexppart);
199 return z;
202 /* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
203 * but there will then be a small rounding error since we lose some precision due to the
204 * multiplication. This will then be magnified a lot by the exponential.
206 * Instead, we calculate the fractional part directly as a Padé approximation of
207 * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
208 * remaining after 2^y, which avoids the precision-loss.
210 static __m128d
211 gmx_mm_exp_pd(__m128d exparg)
213 const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
214 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
215 const __m128d arglimit = _mm_set1_pd(1022.0);
216 const __m128i expbase = _mm_set1_epi32(1023);
218 const __m128d invargscale0 = _mm_set1_pd(6.93145751953125e-1);
219 const __m128d invargscale1 = _mm_set1_pd(1.42860682030941723212e-6);
221 const __m128d P2 = _mm_set1_pd(1.26177193074810590878e-4);
222 const __m128d P1 = _mm_set1_pd(3.02994407707441961300e-2);
223 /* P0 == 1.0 */
224 const __m128d Q3 = _mm_set1_pd(3.00198505138664455042E-6);
225 const __m128d Q2 = _mm_set1_pd(2.52448340349684104192E-3);
226 const __m128d Q1 = _mm_set1_pd(2.27265548208155028766E-1);
227 /* Q0 == 2.0 */
228 const __m128d one = _mm_set1_pd(1.0);
229 const __m128d two = _mm_set1_pd(2.0);
231 __m128d valuemask;
232 __m128i iexppart;
233 __m128d fexppart;
234 __m128d intpart;
235 __m128d x, z, z2;
236 __m128d PolyP, PolyQ;
238 x = _mm_mul_pd(exparg, argscale);
240 iexppart = _mm_cvtpd_epi32(x);
241 intpart = _mm_round_pd(x, _MM_FROUND_TO_NEAREST_INT);
243 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
244 * To be able to shift it into the exponent for a double precision number we first need to
245 * shuffle so that the lower half contains the first element, and the upper half the second.
246 * This should really be done as a zero-extension, but since the next instructions will shift
247 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
248 * (thus we just use element 2 from iexppart).
250 iexppart = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
252 /* Do the shift operation on the 64-bit registers */
253 iexppart = _mm_add_epi32(iexppart, expbase);
254 iexppart = _mm_slli_epi64(iexppart, 52);
256 valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
257 fexppart = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
259 z = _mm_sub_pd(exparg, _mm_mul_pd(invargscale0, intpart));
260 z = _mm_sub_pd(z, _mm_mul_pd(invargscale1, intpart));
262 z2 = _mm_mul_pd(z, z);
264 PolyQ = _mm_mul_pd(Q3, z2);
265 PolyQ = _mm_add_pd(PolyQ, Q2);
266 PolyP = _mm_mul_pd(P2, z2);
267 PolyQ = _mm_mul_pd(PolyQ, z2);
268 PolyP = _mm_add_pd(PolyP, P1);
269 PolyQ = _mm_add_pd(PolyQ, Q1);
270 PolyP = _mm_mul_pd(PolyP, z2);
271 PolyQ = _mm_mul_pd(PolyQ, z2);
272 PolyP = _mm_add_pd(PolyP, one);
273 PolyQ = _mm_add_pd(PolyQ, two);
275 PolyP = _mm_mul_pd(PolyP, z);
277 z = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
278 z = _mm_add_pd(one, _mm_mul_pd(two, z));
280 z = _mm_mul_pd(z, fexppart);
282 return z;
287 static __m128d
288 gmx_mm_log_pd(__m128d x)
290 /* Same algorithm as cephes library */
291 const __m128d expmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
293 const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
295 const __m128d half = _mm_set1_pd(0.5);
296 const __m128d one = _mm_set1_pd(1.0);
297 const __m128d two = _mm_set1_pd(2.0);
298 const __m128d invsq2 = _mm_set1_pd(1.0/sqrt(2.0));
300 const __m128d corr1 = _mm_set1_pd(-2.121944400546905827679e-4);
301 const __m128d corr2 = _mm_set1_pd(0.693359375);
303 const __m128d P5 = _mm_set1_pd(1.01875663804580931796e-4);
304 const __m128d P4 = _mm_set1_pd(4.97494994976747001425e-1);
305 const __m128d P3 = _mm_set1_pd(4.70579119878881725854e0);
306 const __m128d P2 = _mm_set1_pd(1.44989225341610930846e1);
307 const __m128d P1 = _mm_set1_pd(1.79368678507819816313e1);
308 const __m128d P0 = _mm_set1_pd(7.70838733755885391666e0);
310 const __m128d Q4 = _mm_set1_pd(1.12873587189167450590e1);
311 const __m128d Q3 = _mm_set1_pd(4.52279145837532221105e1);
312 const __m128d Q2 = _mm_set1_pd(8.29875266912776603211e1);
313 const __m128d Q1 = _mm_set1_pd(7.11544750618563894466e1);
314 const __m128d Q0 = _mm_set1_pd(2.31251620126765340583e1);
316 const __m128d R2 = _mm_set1_pd(-7.89580278884799154124e-1);
317 const __m128d R1 = _mm_set1_pd(1.63866645699558079767e1);
318 const __m128d R0 = _mm_set1_pd(-6.41409952958715622951e1);
320 const __m128d S2 = _mm_set1_pd(-3.56722798256324312549E1);
321 const __m128d S1 = _mm_set1_pd(3.12093766372244180303E2);
322 const __m128d S0 = _mm_set1_pd(-7.69691943550460008604E2);
324 __m128d fexp;
325 __m128i iexp;
327 __m128d mask1, mask2;
328 __m128d corr, t1, t2, q;
329 __m128d zA, yA, xA, zB, yB, xB, z;
330 __m128d polyR, polyS;
331 __m128d polyP1, polyP2, polyQ1, polyQ2;
333 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
334 fexp = _mm_and_pd(x, expmask);
335 iexp = gmx_mm_castpd_si128(fexp);
336 iexp = _mm_srli_epi64(iexp, 52);
337 iexp = _mm_sub_epi32(iexp, expbase_m1);
338 iexp = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1, 1, 2, 0) );
339 fexp = _mm_cvtepi32_pd(iexp);
341 x = _mm_andnot_pd(expmask, x);
342 x = _mm_or_pd(x, one);
343 x = _mm_mul_pd(x, half);
345 mask1 = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp), two);
346 mask2 = _mm_cmplt_pd(x, invsq2);
348 fexp = _mm_sub_pd(fexp, _mm_and_pd(mask2, one));
350 /* If mask1 is set ('A') */
351 zA = _mm_sub_pd(x, half);
352 t1 = _mm_blendv_pd( zA, x, mask2 );
353 zA = _mm_sub_pd(t1, half);
354 t2 = _mm_blendv_pd( x, zA, mask2 );
355 yA = _mm_mul_pd(half, _mm_add_pd(t2, one));
357 xA = _mm_mul_pd(zA, gmx_mm_inv_pd(yA));
358 zA = _mm_mul_pd(xA, xA);
360 /* EVALUATE POLY */
361 polyR = _mm_mul_pd(R2, zA);
362 polyR = _mm_add_pd(polyR, R1);
363 polyR = _mm_mul_pd(polyR, zA);
364 polyR = _mm_add_pd(polyR, R0);
366 polyS = _mm_add_pd(zA, S2);
367 polyS = _mm_mul_pd(polyS, zA);
368 polyS = _mm_add_pd(polyS, S1);
369 polyS = _mm_mul_pd(polyS, zA);
370 polyS = _mm_add_pd(polyS, S0);
372 q = _mm_mul_pd(polyR, gmx_mm_inv_pd(polyS));
373 zA = _mm_mul_pd(_mm_mul_pd(xA, zA), q);
375 zA = _mm_add_pd(zA, _mm_mul_pd(corr1, fexp));
376 zA = _mm_add_pd(zA, xA);
377 zA = _mm_add_pd(zA, _mm_mul_pd(corr2, fexp));
379 /* If mask1 is not set ('B') */
380 corr = _mm_and_pd(mask2, x);
381 xB = _mm_add_pd(x, corr);
382 xB = _mm_sub_pd(xB, one);
383 zB = _mm_mul_pd(xB, xB);
385 polyP1 = _mm_mul_pd(P5, zB);
386 polyP2 = _mm_mul_pd(P4, zB);
387 polyP1 = _mm_add_pd(polyP1, P3);
388 polyP2 = _mm_add_pd(polyP2, P2);
389 polyP1 = _mm_mul_pd(polyP1, zB);
390 polyP2 = _mm_mul_pd(polyP2, zB);
391 polyP1 = _mm_add_pd(polyP1, P1);
392 polyP2 = _mm_add_pd(polyP2, P0);
393 polyP1 = _mm_mul_pd(polyP1, xB);
394 polyP1 = _mm_add_pd(polyP1, polyP2);
396 polyQ2 = _mm_mul_pd(Q4, zB);
397 polyQ1 = _mm_add_pd(zB, Q3);
398 polyQ2 = _mm_add_pd(polyQ2, Q2);
399 polyQ1 = _mm_mul_pd(polyQ1, zB);
400 polyQ2 = _mm_mul_pd(polyQ2, zB);
401 polyQ1 = _mm_add_pd(polyQ1, Q1);
402 polyQ2 = _mm_add_pd(polyQ2, Q0);
403 polyQ1 = _mm_mul_pd(polyQ1, xB);
404 polyQ1 = _mm_add_pd(polyQ1, polyQ2);
406 fexp = _mm_and_pd(fexp, _mm_cmpneq_pd(fexp, _mm_setzero_pd()));
408 q = _mm_mul_pd(polyP1, gmx_mm_inv_pd(polyQ1));
409 yB = _mm_mul_pd(_mm_mul_pd(xB, zB), q);
411 yB = _mm_add_pd(yB, _mm_mul_pd(corr1, fexp));
412 yB = _mm_sub_pd(yB, _mm_mul_pd(half, zB));
413 zB = _mm_add_pd(xB, yB);
414 zB = _mm_add_pd(zB, _mm_mul_pd(corr2, fexp));
416 z = _mm_blendv_pd( zB, zA, mask1 );
418 return z;
422 static __m128d
423 gmx_mm_erf_pd(__m128d x)
425 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
426 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
427 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
428 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
429 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
430 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
432 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
433 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
434 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
435 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
436 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
437 /* CAQ0 == 1.0 */
438 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
440 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
441 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
442 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
443 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
444 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
445 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
446 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
447 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
448 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
449 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
450 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
451 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
452 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
453 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
454 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
455 /* CBQ0 == 1.0 */
457 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
458 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
459 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
460 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
461 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
462 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
463 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
464 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
466 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
467 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
468 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
469 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
470 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
471 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
472 /* CCQ0 == 1.0 */
473 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
475 const __m128d one = _mm_set1_pd(1.0);
476 const __m128d two = _mm_set1_pd(2.0);
478 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
480 __m128d xabs, x2, x4, t, t2, w, w2;
481 __m128d PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
482 __m128d PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
483 __m128d PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
484 __m128d res_erf, res_erfcB, res_erfcC, res_erfc, res;
485 __m128d mask, expmx2;
487 /* Calculate erf() */
488 xabs = gmx_mm_abs_pd(x);
489 x2 = _mm_mul_pd(x, x);
490 x4 = _mm_mul_pd(x2, x2);
492 PolyAP0 = _mm_mul_pd(CAP4, x4);
493 PolyAP1 = _mm_mul_pd(CAP3, x4);
494 PolyAP0 = _mm_add_pd(PolyAP0, CAP2);
495 PolyAP1 = _mm_add_pd(PolyAP1, CAP1);
496 PolyAP0 = _mm_mul_pd(PolyAP0, x4);
497 PolyAP1 = _mm_mul_pd(PolyAP1, x2);
498 PolyAP0 = _mm_add_pd(PolyAP0, CAP0);
499 PolyAP0 = _mm_add_pd(PolyAP0, PolyAP1);
501 PolyAQ1 = _mm_mul_pd(CAQ5, x4);
502 PolyAQ0 = _mm_mul_pd(CAQ4, x4);
503 PolyAQ1 = _mm_add_pd(PolyAQ1, CAQ3);
504 PolyAQ0 = _mm_add_pd(PolyAQ0, CAQ2);
505 PolyAQ1 = _mm_mul_pd(PolyAQ1, x4);
506 PolyAQ0 = _mm_mul_pd(PolyAQ0, x4);
507 PolyAQ1 = _mm_add_pd(PolyAQ1, CAQ1);
508 PolyAQ0 = _mm_add_pd(PolyAQ0, one);
509 PolyAQ1 = _mm_mul_pd(PolyAQ1, x2);
510 PolyAQ0 = _mm_add_pd(PolyAQ0, PolyAQ1);
512 res_erf = _mm_mul_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0));
513 res_erf = _mm_add_pd(CAoffset, res_erf);
514 res_erf = _mm_mul_pd(x, res_erf);
516 /* Calculate erfc() in range [1,4.5] */
517 t = _mm_sub_pd(xabs, one);
518 t2 = _mm_mul_pd(t, t);
520 PolyBP0 = _mm_mul_pd(CBP6, t2);
521 PolyBP1 = _mm_mul_pd(CBP5, t2);
522 PolyBP0 = _mm_add_pd(PolyBP0, CBP4);
523 PolyBP1 = _mm_add_pd(PolyBP1, CBP3);
524 PolyBP0 = _mm_mul_pd(PolyBP0, t2);
525 PolyBP1 = _mm_mul_pd(PolyBP1, t2);
526 PolyBP0 = _mm_add_pd(PolyBP0, CBP2);
527 PolyBP1 = _mm_add_pd(PolyBP1, CBP1);
528 PolyBP0 = _mm_mul_pd(PolyBP0, t2);
529 PolyBP1 = _mm_mul_pd(PolyBP1, t);
530 PolyBP0 = _mm_add_pd(PolyBP0, CBP0);
531 PolyBP0 = _mm_add_pd(PolyBP0, PolyBP1);
533 PolyBQ1 = _mm_mul_pd(CBQ7, t2);
534 PolyBQ0 = _mm_mul_pd(CBQ6, t2);
535 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ5);
536 PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ4);
537 PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
538 PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
539 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ3);
540 PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ2);
541 PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
542 PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
543 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ1);
544 PolyBQ0 = _mm_add_pd(PolyBQ0, one);
545 PolyBQ1 = _mm_mul_pd(PolyBQ1, t);
546 PolyBQ0 = _mm_add_pd(PolyBQ0, PolyBQ1);
548 res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
550 res_erfcB = _mm_mul_pd(res_erfcB, xabs);
552 /* Calculate erfc() in range [4.5,inf] */
553 w = gmx_mm_inv_pd(xabs);
554 w2 = _mm_mul_pd(w, w);
556 PolyCP0 = _mm_mul_pd(CCP6, w2);
557 PolyCP1 = _mm_mul_pd(CCP5, w2);
558 PolyCP0 = _mm_add_pd(PolyCP0, CCP4);
559 PolyCP1 = _mm_add_pd(PolyCP1, CCP3);
560 PolyCP0 = _mm_mul_pd(PolyCP0, w2);
561 PolyCP1 = _mm_mul_pd(PolyCP1, w2);
562 PolyCP0 = _mm_add_pd(PolyCP0, CCP2);
563 PolyCP1 = _mm_add_pd(PolyCP1, CCP1);
564 PolyCP0 = _mm_mul_pd(PolyCP0, w2);
565 PolyCP1 = _mm_mul_pd(PolyCP1, w);
566 PolyCP0 = _mm_add_pd(PolyCP0, CCP0);
567 PolyCP0 = _mm_add_pd(PolyCP0, PolyCP1);
569 PolyCQ0 = _mm_mul_pd(CCQ6, w2);
570 PolyCQ1 = _mm_mul_pd(CCQ5, w2);
571 PolyCQ0 = _mm_add_pd(PolyCQ0, CCQ4);
572 PolyCQ1 = _mm_add_pd(PolyCQ1, CCQ3);
573 PolyCQ0 = _mm_mul_pd(PolyCQ0, w2);
574 PolyCQ1 = _mm_mul_pd(PolyCQ1, w2);
575 PolyCQ0 = _mm_add_pd(PolyCQ0, CCQ2);
576 PolyCQ1 = _mm_add_pd(PolyCQ1, CCQ1);
577 PolyCQ0 = _mm_mul_pd(PolyCQ0, w2);
578 PolyCQ1 = _mm_mul_pd(PolyCQ1, w);
579 PolyCQ0 = _mm_add_pd(PolyCQ0, one);
580 PolyCQ0 = _mm_add_pd(PolyCQ0, PolyCQ1);
582 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
584 res_erfcC = _mm_mul_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0));
585 res_erfcC = _mm_add_pd(res_erfcC, CCoffset);
586 res_erfcC = _mm_mul_pd(res_erfcC, w);
588 mask = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
589 res_erfc = _mm_blendv_pd(res_erfcB, res_erfcC, mask);
591 res_erfc = _mm_mul_pd(res_erfc, expmx2);
593 /* erfc(x<0) = 2-erfc(|x|) */
594 mask = _mm_cmplt_pd(x, _mm_setzero_pd());
595 res_erfc = _mm_blendv_pd(res_erfc, _mm_sub_pd(two, res_erfc), mask);
597 /* Select erf() or erfc() */
598 mask = _mm_cmplt_pd(xabs, one);
599 res = _mm_blendv_pd(_mm_sub_pd(one, res_erfc), res_erf, mask);
601 return res;
605 static __m128d
606 gmx_mm_erfc_pd(__m128d x)
608 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
609 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
610 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
611 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
612 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
613 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
615 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
616 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
617 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
618 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
619 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
620 /* CAQ0 == 1.0 */
621 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
623 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
624 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
625 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
626 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
627 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
628 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
629 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
630 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
631 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
632 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
633 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
634 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
635 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
636 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
637 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
638 /* CBQ0 == 1.0 */
640 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
641 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
642 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
643 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
644 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
645 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
646 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
647 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
649 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
650 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
651 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
652 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
653 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
654 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
655 /* CCQ0 == 1.0 */
656 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
658 const __m128d one = _mm_set1_pd(1.0);
659 const __m128d two = _mm_set1_pd(2.0);
661 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
663 __m128d xabs, x2, x4, t, t2, w, w2;
664 __m128d PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
665 __m128d PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
666 __m128d PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
667 __m128d res_erf, res_erfcB, res_erfcC, res_erfc, res;
668 __m128d mask, expmx2;
670 /* Calculate erf() */
671 xabs = gmx_mm_abs_pd(x);
672 x2 = _mm_mul_pd(x, x);
673 x4 = _mm_mul_pd(x2, x2);
675 PolyAP0 = _mm_mul_pd(CAP4, x4);
676 PolyAP1 = _mm_mul_pd(CAP3, x4);
677 PolyAP0 = _mm_add_pd(PolyAP0, CAP2);
678 PolyAP1 = _mm_add_pd(PolyAP1, CAP1);
679 PolyAP0 = _mm_mul_pd(PolyAP0, x4);
680 PolyAP1 = _mm_mul_pd(PolyAP1, x2);
681 PolyAP0 = _mm_add_pd(PolyAP0, CAP0);
682 PolyAP0 = _mm_add_pd(PolyAP0, PolyAP1);
684 PolyAQ1 = _mm_mul_pd(CAQ5, x4);
685 PolyAQ0 = _mm_mul_pd(CAQ4, x4);
686 PolyAQ1 = _mm_add_pd(PolyAQ1, CAQ3);
687 PolyAQ0 = _mm_add_pd(PolyAQ0, CAQ2);
688 PolyAQ1 = _mm_mul_pd(PolyAQ1, x4);
689 PolyAQ0 = _mm_mul_pd(PolyAQ0, x4);
690 PolyAQ1 = _mm_add_pd(PolyAQ1, CAQ1);
691 PolyAQ0 = _mm_add_pd(PolyAQ0, one);
692 PolyAQ1 = _mm_mul_pd(PolyAQ1, x2);
693 PolyAQ0 = _mm_add_pd(PolyAQ0, PolyAQ1);
695 res_erf = _mm_mul_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0));
696 res_erf = _mm_add_pd(CAoffset, res_erf);
697 res_erf = _mm_mul_pd(x, res_erf);
699 /* Calculate erfc() in range [1,4.5] */
700 t = _mm_sub_pd(xabs, one);
701 t2 = _mm_mul_pd(t, t);
703 PolyBP0 = _mm_mul_pd(CBP6, t2);
704 PolyBP1 = _mm_mul_pd(CBP5, t2);
705 PolyBP0 = _mm_add_pd(PolyBP0, CBP4);
706 PolyBP1 = _mm_add_pd(PolyBP1, CBP3);
707 PolyBP0 = _mm_mul_pd(PolyBP0, t2);
708 PolyBP1 = _mm_mul_pd(PolyBP1, t2);
709 PolyBP0 = _mm_add_pd(PolyBP0, CBP2);
710 PolyBP1 = _mm_add_pd(PolyBP1, CBP1);
711 PolyBP0 = _mm_mul_pd(PolyBP0, t2);
712 PolyBP1 = _mm_mul_pd(PolyBP1, t);
713 PolyBP0 = _mm_add_pd(PolyBP0, CBP0);
714 PolyBP0 = _mm_add_pd(PolyBP0, PolyBP1);
716 PolyBQ1 = _mm_mul_pd(CBQ7, t2);
717 PolyBQ0 = _mm_mul_pd(CBQ6, t2);
718 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ5);
719 PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ4);
720 PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
721 PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
722 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ3);
723 PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ2);
724 PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
725 PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
726 PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ1);
727 PolyBQ0 = _mm_add_pd(PolyBQ0, one);
728 PolyBQ1 = _mm_mul_pd(PolyBQ1, t);
729 PolyBQ0 = _mm_add_pd(PolyBQ0, PolyBQ1);
731 res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
733 res_erfcB = _mm_mul_pd(res_erfcB, xabs);
735 /* Calculate erfc() in range [4.5,inf] */
736 w = gmx_mm_inv_pd(xabs);
737 w2 = _mm_mul_pd(w, w);
739 PolyCP0 = _mm_mul_pd(CCP6, w2);
740 PolyCP1 = _mm_mul_pd(CCP5, w2);
741 PolyCP0 = _mm_add_pd(PolyCP0, CCP4);
742 PolyCP1 = _mm_add_pd(PolyCP1, CCP3);
743 PolyCP0 = _mm_mul_pd(PolyCP0, w2);
744 PolyCP1 = _mm_mul_pd(PolyCP1, w2);
745 PolyCP0 = _mm_add_pd(PolyCP0, CCP2);
746 PolyCP1 = _mm_add_pd(PolyCP1, CCP1);
747 PolyCP0 = _mm_mul_pd(PolyCP0, w2);
748 PolyCP1 = _mm_mul_pd(PolyCP1, w);
749 PolyCP0 = _mm_add_pd(PolyCP0, CCP0);
750 PolyCP0 = _mm_add_pd(PolyCP0, PolyCP1);
752 PolyCQ0 = _mm_mul_pd(CCQ6, w2);
753 PolyCQ1 = _mm_mul_pd(CCQ5, w2);
754 PolyCQ0 = _mm_add_pd(PolyCQ0, CCQ4);
755 PolyCQ1 = _mm_add_pd(PolyCQ1, CCQ3);
756 PolyCQ0 = _mm_mul_pd(PolyCQ0, w2);
757 PolyCQ1 = _mm_mul_pd(PolyCQ1, w2);
758 PolyCQ0 = _mm_add_pd(PolyCQ0, CCQ2);
759 PolyCQ1 = _mm_add_pd(PolyCQ1, CCQ1);
760 PolyCQ0 = _mm_mul_pd(PolyCQ0, w2);
761 PolyCQ1 = _mm_mul_pd(PolyCQ1, w);
762 PolyCQ0 = _mm_add_pd(PolyCQ0, one);
763 PolyCQ0 = _mm_add_pd(PolyCQ0, PolyCQ1);
765 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
767 res_erfcC = _mm_mul_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0));
768 res_erfcC = _mm_add_pd(res_erfcC, CCoffset);
769 res_erfcC = _mm_mul_pd(res_erfcC, w);
771 mask = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
772 res_erfc = _mm_blendv_pd(res_erfcB, res_erfcC, mask);
774 res_erfc = _mm_mul_pd(res_erfc, expmx2);
776 /* erfc(x<0) = 2-erfc(|x|) */
777 mask = _mm_cmplt_pd(x, _mm_setzero_pd());
778 res_erfc = _mm_blendv_pd(res_erfc, _mm_sub_pd(two, res_erfc), mask);
780 /* Select erf() or erfc() */
781 mask = _mm_cmplt_pd(xabs, one);
782 res = _mm_blendv_pd(res_erfc, _mm_sub_pd(one, res_erf), mask);
784 return res;
788 /* Calculate the force correction due to PME analytically.
790 * This routine is meant to enable analytical evaluation of the
791 * direct-space PME electrostatic force to avoid tables.
793 * The direct-space potential should be Erfc(beta*r)/r, but there
794 * are some problems evaluating that:
796 * First, the error function is difficult (read: expensive) to
797 * approxmiate accurately for intermediate to large arguments, and
798 * this happens already in ranges of beta*r that occur in simulations.
799 * Second, we now try to avoid calculating potentials in Gromacs but
800 * use forces directly.
802 * We can simply things slight by noting that the PME part is really
803 * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
805 * V= 1/r - Erf(beta*r)/r
807 * The first term we already have from the inverse square root, so
808 * that we can leave out of this routine.
810 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
811 * the argument beta*r will be in the range 0.15 to ~4. Use your
812 * favorite plotting program to realize how well-behaved Erf(z)/z is
813 * in this range!
815 * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
816 * However, it turns out it is more efficient to approximate f(z)/z and
817 * then only use even powers. This is another minor optimization, since
818 * we actually WANT f(z)/z, because it is going to be multiplied by
819 * the vector between the two atoms to get the vectorial force. The
820 * fastest flops are the ones we can avoid calculating!
822 * So, here's how it should be used:
824 * 1. Calculate r^2.
825 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
826 * 3. Evaluate this routine with z^2 as the argument.
827 * 4. The return value is the expression:
830 * 2*exp(-z^2) erf(z)
831 * ------------ - --------
832 * sqrt(Pi)*z^2 z^3
834 * 5. Multiply the entire expression by beta^3. This will get you
836 * beta^3*2*exp(-z^2) beta^3*erf(z)
837 * ------------------ - ---------------
838 * sqrt(Pi)*z^2 z^3
840 * or, switching back to r (z=r*beta):
842 * 2*beta*exp(-r^2*beta^2) erf(r*beta)
843 * ----------------------- - -----------
844 * sqrt(Pi)*r^2 r^3
847 * With a bit of math exercise you should be able to confirm that
848 * this is exactly D[Erf[beta*r]/r,r] divided by r another time.
850 * 6. Add the result to 1/r^3, multiply by the product of the charges,
851 * and you have your force (divided by r). A final multiplication
852 * with the vector connecting the two particles and you have your
853 * vectorial force to add to the particles.
856 static __m128d
857 gmx_mm_pmecorrF_pd(__m128d z2)
859 const __m128d FN10 = _mm_set1_pd(-8.0072854618360083154e-14);
860 const __m128d FN9 = _mm_set1_pd(1.1859116242260148027e-11);
861 const __m128d FN8 = _mm_set1_pd(-8.1490406329798423616e-10);
862 const __m128d FN7 = _mm_set1_pd(3.4404793543907847655e-8);
863 const __m128d FN6 = _mm_set1_pd(-9.9471420832602741006e-7);
864 const __m128d FN5 = _mm_set1_pd(0.000020740315999115847456);
865 const __m128d FN4 = _mm_set1_pd(-0.00031991745139313364005);
866 const __m128d FN3 = _mm_set1_pd(0.0035074449373659008203);
867 const __m128d FN2 = _mm_set1_pd(-0.031750380176100813405);
868 const __m128d FN1 = _mm_set1_pd(0.13884101728898463426);
869 const __m128d FN0 = _mm_set1_pd(-0.75225277815249618847);
871 const __m128d FD5 = _mm_set1_pd(0.000016009278224355026701);
872 const __m128d FD4 = _mm_set1_pd(0.00051055686934806966046);
873 const __m128d FD3 = _mm_set1_pd(0.0081803507497974289008);
874 const __m128d FD2 = _mm_set1_pd(0.077181146026670287235);
875 const __m128d FD1 = _mm_set1_pd(0.41543303143712535988);
876 const __m128d FD0 = _mm_set1_pd(1.0);
878 __m128d z4;
879 __m128d polyFN0, polyFN1, polyFD0, polyFD1;
881 z4 = _mm_mul_pd(z2, z2);
883 polyFD1 = _mm_mul_pd(FD5, z4);
884 polyFD0 = _mm_mul_pd(FD4, z4);
885 polyFD1 = _mm_add_pd(polyFD1, FD3);
886 polyFD0 = _mm_add_pd(polyFD0, FD2);
887 polyFD1 = _mm_mul_pd(polyFD1, z4);
888 polyFD0 = _mm_mul_pd(polyFD0, z4);
889 polyFD1 = _mm_add_pd(polyFD1, FD1);
890 polyFD0 = _mm_add_pd(polyFD0, FD0);
891 polyFD1 = _mm_mul_pd(polyFD1, z2);
892 polyFD0 = _mm_add_pd(polyFD0, polyFD1);
894 polyFD0 = gmx_mm_inv_pd(polyFD0);
896 polyFN0 = _mm_mul_pd(FN10, z4);
897 polyFN1 = _mm_mul_pd(FN9, z4);
898 polyFN0 = _mm_add_pd(polyFN0, FN8);
899 polyFN1 = _mm_add_pd(polyFN1, FN7);
900 polyFN0 = _mm_mul_pd(polyFN0, z4);
901 polyFN1 = _mm_mul_pd(polyFN1, z4);
902 polyFN0 = _mm_add_pd(polyFN0, FN6);
903 polyFN1 = _mm_add_pd(polyFN1, FN5);
904 polyFN0 = _mm_mul_pd(polyFN0, z4);
905 polyFN1 = _mm_mul_pd(polyFN1, z4);
906 polyFN0 = _mm_add_pd(polyFN0, FN4);
907 polyFN1 = _mm_add_pd(polyFN1, FN3);
908 polyFN0 = _mm_mul_pd(polyFN0, z4);
909 polyFN1 = _mm_mul_pd(polyFN1, z4);
910 polyFN0 = _mm_add_pd(polyFN0, FN2);
911 polyFN1 = _mm_add_pd(polyFN1, FN1);
912 polyFN0 = _mm_mul_pd(polyFN0, z4);
913 polyFN1 = _mm_mul_pd(polyFN1, z2);
914 polyFN0 = _mm_add_pd(polyFN0, FN0);
915 polyFN0 = _mm_add_pd(polyFN0, polyFN1);
917 return _mm_mul_pd(polyFN0, polyFD0);
923 /* Calculate the potential correction due to PME analytically.
925 * See gmx_mm256_pmecorrF_ps() for details about the approximation.
927 * This routine calculates Erf(z)/z, although you should provide z^2
928 * as the input argument.
930 * Here's how it should be used:
932 * 1. Calculate r^2.
933 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
934 * 3. Evaluate this routine with z^2 as the argument.
935 * 4. The return value is the expression:
938 * erf(z)
939 * --------
942 * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
944 * erf(r*beta)
945 * -----------
948 * 6. Subtract the result from 1/r, multiply by the product of the charges,
949 * and you have your potential.
952 static __m128d
953 gmx_mm_pmecorrV_pd(__m128d z2)
955 const __m128d VN9 = _mm_set1_pd(-9.3723776169321855475e-13);
956 const __m128d VN8 = _mm_set1_pd(1.2280156762674215741e-10);
957 const __m128d VN7 = _mm_set1_pd(-7.3562157912251309487e-9);
958 const __m128d VN6 = _mm_set1_pd(2.6215886208032517509e-7);
959 const __m128d VN5 = _mm_set1_pd(-4.9532491651265819499e-6);
960 const __m128d VN4 = _mm_set1_pd(0.00025907400778966060389);
961 const __m128d VN3 = _mm_set1_pd(0.0010585044856156469792);
962 const __m128d VN2 = _mm_set1_pd(0.045247661136833092885);
963 const __m128d VN1 = _mm_set1_pd(0.11643931522926034421);
964 const __m128d VN0 = _mm_set1_pd(1.1283791671726767970);
966 const __m128d VD5 = _mm_set1_pd(0.000021784709867336150342);
967 const __m128d VD4 = _mm_set1_pd(0.00064293662010911388448);
968 const __m128d VD3 = _mm_set1_pd(0.0096311444822588683504);
969 const __m128d VD2 = _mm_set1_pd(0.085608012351550627051);
970 const __m128d VD1 = _mm_set1_pd(0.43652499166614811084);
971 const __m128d VD0 = _mm_set1_pd(1.0);
973 __m128d z4;
974 __m128d polyVN0, polyVN1, polyVD0, polyVD1;
976 z4 = _mm_mul_pd(z2, z2);
978 polyVD1 = _mm_mul_pd(VD5, z4);
979 polyVD0 = _mm_mul_pd(VD4, z4);
980 polyVD1 = _mm_add_pd(polyVD1, VD3);
981 polyVD0 = _mm_add_pd(polyVD0, VD2);
982 polyVD1 = _mm_mul_pd(polyVD1, z4);
983 polyVD0 = _mm_mul_pd(polyVD0, z4);
984 polyVD1 = _mm_add_pd(polyVD1, VD1);
985 polyVD0 = _mm_add_pd(polyVD0, VD0);
986 polyVD1 = _mm_mul_pd(polyVD1, z2);
987 polyVD0 = _mm_add_pd(polyVD0, polyVD1);
989 polyVD0 = gmx_mm_inv_pd(polyVD0);
991 polyVN1 = _mm_mul_pd(VN9, z4);
992 polyVN0 = _mm_mul_pd(VN8, z4);
993 polyVN1 = _mm_add_pd(polyVN1, VN7);
994 polyVN0 = _mm_add_pd(polyVN0, VN6);
995 polyVN1 = _mm_mul_pd(polyVN1, z4);
996 polyVN0 = _mm_mul_pd(polyVN0, z4);
997 polyVN1 = _mm_add_pd(polyVN1, VN5);
998 polyVN0 = _mm_add_pd(polyVN0, VN4);
999 polyVN1 = _mm_mul_pd(polyVN1, z4);
1000 polyVN0 = _mm_mul_pd(polyVN0, z4);
1001 polyVN1 = _mm_add_pd(polyVN1, VN3);
1002 polyVN0 = _mm_add_pd(polyVN0, VN2);
1003 polyVN1 = _mm_mul_pd(polyVN1, z4);
1004 polyVN0 = _mm_mul_pd(polyVN0, z4);
1005 polyVN1 = _mm_add_pd(polyVN1, VN1);
1006 polyVN0 = _mm_add_pd(polyVN0, VN0);
1007 polyVN1 = _mm_mul_pd(polyVN1, z2);
1008 polyVN0 = _mm_add_pd(polyVN0, polyVN1);
1010 return _mm_mul_pd(polyVN0, polyVD0);
1014 static int
1015 gmx_mm_sincos_pd(__m128d x,
1016 __m128d *sinval,
1017 __m128d *cosval)
1019 #ifdef _MSC_VER
1020 __declspec(align(16))
1021 const double sintable[34] =
1023 1.00000000000000000e+00, 0.00000000000000000e+00,
1024 9.95184726672196929e-01, 9.80171403295606036e-02,
1025 9.80785280403230431e-01, 1.95090322016128248e-01,
1026 9.56940335732208824e-01, 2.90284677254462331e-01,
1027 9.23879532511286738e-01, 3.82683432365089782e-01,
1028 8.81921264348355050e-01, 4.71396736825997642e-01,
1029 8.31469612302545236e-01, 5.55570233019602178e-01,
1030 7.73010453362736993e-01, 6.34393284163645488e-01,
1031 7.07106781186547573e-01, 7.07106781186547462e-01,
1032 6.34393284163645599e-01, 7.73010453362736882e-01,
1033 5.55570233019602289e-01, 8.31469612302545125e-01,
1034 4.71396736825997809e-01, 8.81921264348354939e-01,
1035 3.82683432365089837e-01, 9.23879532511286738e-01,
1036 2.90284677254462276e-01, 9.56940335732208935e-01,
1037 1.95090322016128304e-01, 9.80785280403230431e-01,
1038 9.80171403295607702e-02, 9.95184726672196818e-01,
1039 0.0, 1.00000000000000000e+00
1041 #else
1042 const __m128d sintable[17] =
1044 _mm_set_pd( 0.0, 1.0 ),
1045 _mm_set_pd( sin( 1.0 * (M_PI/2.0) / 16.0), cos( 1.0 * (M_PI/2.0) / 16.0) ),
1046 _mm_set_pd( sin( 2.0 * (M_PI/2.0) / 16.0), cos( 2.0 * (M_PI/2.0) / 16.0) ),
1047 _mm_set_pd( sin( 3.0 * (M_PI/2.0) / 16.0), cos( 3.0 * (M_PI/2.0) / 16.0) ),
1048 _mm_set_pd( sin( 4.0 * (M_PI/2.0) / 16.0), cos( 4.0 * (M_PI/2.0) / 16.0) ),
1049 _mm_set_pd( sin( 5.0 * (M_PI/2.0) / 16.0), cos( 5.0 * (M_PI/2.0) / 16.0) ),
1050 _mm_set_pd( sin( 6.0 * (M_PI/2.0) / 16.0), cos( 6.0 * (M_PI/2.0) / 16.0) ),
1051 _mm_set_pd( sin( 7.0 * (M_PI/2.0) / 16.0), cos( 7.0 * (M_PI/2.0) / 16.0) ),
1052 _mm_set_pd( sin( 8.0 * (M_PI/2.0) / 16.0), cos( 8.0 * (M_PI/2.0) / 16.0) ),
1053 _mm_set_pd( sin( 9.0 * (M_PI/2.0) / 16.0), cos( 9.0 * (M_PI/2.0) / 16.0) ),
1054 _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0), cos( 10.0 * (M_PI/2.0) / 16.0) ),
1055 _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0), cos( 11.0 * (M_PI/2.0) / 16.0) ),
1056 _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0), cos( 12.0 * (M_PI/2.0) / 16.0) ),
1057 _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0), cos( 13.0 * (M_PI/2.0) / 16.0) ),
1058 _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0), cos( 14.0 * (M_PI/2.0) / 16.0) ),
1059 _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0), cos( 15.0 * (M_PI/2.0) / 16.0) ),
1060 _mm_set_pd( 1.0, 0.0 )
1062 #endif
1064 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1065 const __m128i signbit_epi32 = _mm_set1_epi32(0x80000000);
1067 const __m128d tabscale = _mm_set1_pd(32.0/M_PI);
1068 const __m128d invtabscale0 = _mm_set1_pd(9.81747508049011230469e-02);
1069 const __m128d invtabscale1 = _mm_set1_pd(1.96197799156550576057e-08);
1070 const __m128i ione = _mm_set1_epi32(1);
1071 const __m128i i32 = _mm_set1_epi32(32);
1072 const __m128i i16 = _mm_set1_epi32(16);
1073 const __m128i tabmask = _mm_set1_epi32(0x3F);
1074 const __m128d sinP7 = _mm_set1_pd(-1.0/5040.0);
1075 const __m128d sinP5 = _mm_set1_pd(1.0/120.0);
1076 const __m128d sinP3 = _mm_set1_pd(-1.0/6.0);
1077 const __m128d sinP1 = _mm_set1_pd(1.0);
1079 const __m128d cosP6 = _mm_set1_pd(-1.0/720.0);
1080 const __m128d cosP4 = _mm_set1_pd(1.0/24.0);
1081 const __m128d cosP2 = _mm_set1_pd(-1.0/2.0);
1082 const __m128d cosP0 = _mm_set1_pd(1.0);
1084 __m128d scalex;
1085 __m128i tabidx, corridx;
1086 __m128d xabs, z, z2, polySin, polyCos;
1087 __m128d xpoint;
1088 __m128d ypoint0, ypoint1;
1090 __m128d sinpoint, cospoint;
1091 __m128d xsign, ssign, csign;
1092 __m128i imask, sswapsign, cswapsign;
1093 __m128d minusone;
1095 xsign = _mm_andnot_pd(signmask, x);
1096 xabs = _mm_and_pd(x, signmask);
1098 scalex = _mm_mul_pd(tabscale, xabs);
1099 tabidx = _mm_cvtpd_epi32(scalex);
1101 xpoint = _mm_round_pd(scalex, _MM_FROUND_TO_NEAREST_INT);
1103 /* Extended precision arithmetics */
1104 z = _mm_sub_pd(xabs, _mm_mul_pd(invtabscale0, xpoint));
1105 z = _mm_sub_pd(z, _mm_mul_pd(invtabscale1, xpoint));
1107 /* Range reduction to 0..2*Pi */
1108 tabidx = _mm_and_si128(tabidx, tabmask);
1110 /* tabidx is now in range [0,..,64] */
1111 imask = _mm_cmpgt_epi32(tabidx, i32);
1112 sswapsign = imask;
1113 cswapsign = imask;
1114 corridx = _mm_and_si128(imask, i32);
1115 tabidx = _mm_sub_epi32(tabidx, corridx);
1117 /* tabidx is now in range [0..32] */
1118 imask = _mm_cmpgt_epi32(tabidx, i16);
1119 cswapsign = _mm_xor_si128(cswapsign, imask);
1120 corridx = _mm_sub_epi32(i32, tabidx);
1121 tabidx = _mm_blendv_epi8(tabidx, corridx, imask);
1122 /* tabidx is now in range [0..16] */
1123 ssign = _mm_cvtepi32_pd( _mm_or_si128( sswapsign, ione ) );
1124 csign = _mm_cvtepi32_pd( _mm_or_si128( cswapsign, ione ) );
1126 #ifdef _MSC_VER
1127 ypoint0 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx, 0));
1128 ypoint1 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx, 1));
1129 #else
1130 ypoint0 = sintable[_mm_extract_epi32(tabidx, 0)];
1131 ypoint1 = sintable[_mm_extract_epi32(tabidx, 1)];
1132 #endif
1133 sinpoint = _mm_unpackhi_pd(ypoint0, ypoint1);
1134 cospoint = _mm_unpacklo_pd(ypoint0, ypoint1);
1136 sinpoint = _mm_mul_pd(sinpoint, ssign);
1137 cospoint = _mm_mul_pd(cospoint, csign);
1139 z2 = _mm_mul_pd(z, z);
1141 polySin = _mm_mul_pd(sinP7, z2);
1142 polySin = _mm_add_pd(polySin, sinP5);
1143 polySin = _mm_mul_pd(polySin, z2);
1144 polySin = _mm_add_pd(polySin, sinP3);
1145 polySin = _mm_mul_pd(polySin, z2);
1146 polySin = _mm_add_pd(polySin, sinP1);
1147 polySin = _mm_mul_pd(polySin, z);
1149 polyCos = _mm_mul_pd(cosP6, z2);
1150 polyCos = _mm_add_pd(polyCos, cosP4);
1151 polyCos = _mm_mul_pd(polyCos, z2);
1152 polyCos = _mm_add_pd(polyCos, cosP2);
1153 polyCos = _mm_mul_pd(polyCos, z2);
1154 polyCos = _mm_add_pd(polyCos, cosP0);
1156 *sinval = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint, polyCos), _mm_mul_pd(cospoint, polySin) ), xsign);
1157 *cosval = _mm_sub_pd( _mm_mul_pd(cospoint, polyCos), _mm_mul_pd(sinpoint, polySin) );
1159 return 0;
1163 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1164 * will then call the sincos() routine and waste a factor 2 in performance!
1166 static __m128d
1167 gmx_mm_sin_pd(__m128d x)
1169 __m128d s, c;
1170 gmx_mm_sincos_pd(x, &s, &c);
1171 return s;
1175 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1176 * will then call the sincos() routine and waste a factor 2 in performance!
1178 static __m128d
1179 gmx_mm_cos_pd(__m128d x)
1181 __m128d s, c;
1182 gmx_mm_sincos_pd(x, &s, &c);
1183 return c;
1188 static __m128d
1189 gmx_mm_tan_pd(__m128d x)
1191 __m128d sinval, cosval;
1192 __m128d tanval;
1194 gmx_mm_sincos_pd(x, &sinval, &cosval);
1196 tanval = _mm_mul_pd(sinval, gmx_mm_inv_pd(cosval));
1198 return tanval;
1203 static __m128d
1204 gmx_mm_asin_pd(__m128d x)
1206 /* Same algorithm as cephes library */
1207 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1208 const __m128d limit1 = _mm_set1_pd(0.625);
1209 const __m128d limit2 = _mm_set1_pd(1e-8);
1210 const __m128d one = _mm_set1_pd(1.0);
1211 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1212 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1213 const __m128d morebits = _mm_set1_pd(6.123233995736765886130e-17);
1215 const __m128d P5 = _mm_set1_pd(4.253011369004428248960e-3);
1216 const __m128d P4 = _mm_set1_pd(-6.019598008014123785661e-1);
1217 const __m128d P3 = _mm_set1_pd(5.444622390564711410273e0);
1218 const __m128d P2 = _mm_set1_pd(-1.626247967210700244449e1);
1219 const __m128d P1 = _mm_set1_pd(1.956261983317594739197e1);
1220 const __m128d P0 = _mm_set1_pd(-8.198089802484824371615e0);
1222 const __m128d Q4 = _mm_set1_pd(-1.474091372988853791896e1);
1223 const __m128d Q3 = _mm_set1_pd(7.049610280856842141659e1);
1224 const __m128d Q2 = _mm_set1_pd(-1.471791292232726029859e2);
1225 const __m128d Q1 = _mm_set1_pd(1.395105614657485689735e2);
1226 const __m128d Q0 = _mm_set1_pd(-4.918853881490881290097e1);
1228 const __m128d R4 = _mm_set1_pd(2.967721961301243206100e-3);
1229 const __m128d R3 = _mm_set1_pd(-5.634242780008963776856e-1);
1230 const __m128d R2 = _mm_set1_pd(6.968710824104713396794e0);
1231 const __m128d R1 = _mm_set1_pd(-2.556901049652824852289e1);
1232 const __m128d R0 = _mm_set1_pd(2.853665548261061424989e1);
1234 const __m128d S3 = _mm_set1_pd(-2.194779531642920639778e1);
1235 const __m128d S2 = _mm_set1_pd(1.470656354026814941758e2);
1236 const __m128d S1 = _mm_set1_pd(-3.838770957603691357202e2);
1237 const __m128d S0 = _mm_set1_pd(3.424398657913078477438e2);
1239 __m128d sign;
1240 __m128d mask;
1241 __m128d xabs;
1242 __m128d zz, ww, z, q, w, y, zz2, ww2;
1243 __m128d PA, PB;
1244 __m128d QA, QB;
1245 __m128d RA, RB;
1246 __m128d SA, SB;
1247 __m128d nom, denom;
1249 sign = _mm_andnot_pd(signmask, x);
1250 xabs = _mm_and_pd(x, signmask);
1252 mask = _mm_cmpgt_pd(xabs, limit1);
1254 zz = _mm_sub_pd(one, xabs);
1255 ww = _mm_mul_pd(xabs, xabs);
1256 zz2 = _mm_mul_pd(zz, zz);
1257 ww2 = _mm_mul_pd(ww, ww);
1259 /* R */
1260 RA = _mm_mul_pd(R4, zz2);
1261 RB = _mm_mul_pd(R3, zz2);
1262 RA = _mm_add_pd(RA, R2);
1263 RB = _mm_add_pd(RB, R1);
1264 RA = _mm_mul_pd(RA, zz2);
1265 RB = _mm_mul_pd(RB, zz);
1266 RA = _mm_add_pd(RA, R0);
1267 RA = _mm_add_pd(RA, RB);
1269 /* S, SA = zz2 */
1270 SB = _mm_mul_pd(S3, zz2);
1271 SA = _mm_add_pd(zz2, S2);
1272 SB = _mm_add_pd(SB, S1);
1273 SA = _mm_mul_pd(SA, zz2);
1274 SB = _mm_mul_pd(SB, zz);
1275 SA = _mm_add_pd(SA, S0);
1276 SA = _mm_add_pd(SA, SB);
1278 /* P */
1279 PA = _mm_mul_pd(P5, ww2);
1280 PB = _mm_mul_pd(P4, ww2);
1281 PA = _mm_add_pd(PA, P3);
1282 PB = _mm_add_pd(PB, P2);
1283 PA = _mm_mul_pd(PA, ww2);
1284 PB = _mm_mul_pd(PB, ww2);
1285 PA = _mm_add_pd(PA, P1);
1286 PB = _mm_add_pd(PB, P0);
1287 PA = _mm_mul_pd(PA, ww);
1288 PA = _mm_add_pd(PA, PB);
1290 /* Q, QA = ww2 */
1291 QB = _mm_mul_pd(Q4, ww2);
1292 QA = _mm_add_pd(ww2, Q3);
1293 QB = _mm_add_pd(QB, Q2);
1294 QA = _mm_mul_pd(QA, ww2);
1295 QB = _mm_mul_pd(QB, ww2);
1296 QA = _mm_add_pd(QA, Q1);
1297 QB = _mm_add_pd(QB, Q0);
1298 QA = _mm_mul_pd(QA, ww);
1299 QA = _mm_add_pd(QA, QB);
1301 RA = _mm_mul_pd(RA, zz);
1302 PA = _mm_mul_pd(PA, ww);
1304 nom = _mm_blendv_pd( PA, RA, mask );
1305 denom = _mm_blendv_pd( QA, SA, mask );
1307 q = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
1309 zz = _mm_add_pd(zz, zz);
1310 zz = gmx_mm_sqrt_pd(zz);
1311 z = _mm_sub_pd(quarterpi, zz);
1312 zz = _mm_mul_pd(zz, q);
1313 zz = _mm_sub_pd(zz, morebits);
1314 z = _mm_sub_pd(z, zz);
1315 z = _mm_add_pd(z, quarterpi);
1317 w = _mm_mul_pd(xabs, q);
1318 w = _mm_add_pd(w, xabs);
1320 z = _mm_blendv_pd( w, z, mask );
1322 mask = _mm_cmpgt_pd(xabs, limit2);
1323 z = _mm_blendv_pd( xabs, z, mask );
1325 z = _mm_xor_pd(z, sign);
1327 return z;
1331 static __m128d
1332 gmx_mm_acos_pd(__m128d x)
1334 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1335 const __m128d one = _mm_set1_pd(1.0);
1336 const __m128d half = _mm_set1_pd(0.5);
1337 const __m128d pi = _mm_set1_pd(M_PI);
1338 const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
1339 const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
1342 __m128d mask1;
1344 __m128d z, z1, z2;
1346 mask1 = _mm_cmpgt_pd(x, half);
1347 z1 = _mm_mul_pd(half, _mm_sub_pd(one, x));
1348 z1 = gmx_mm_sqrt_pd(z1);
1349 z = _mm_blendv_pd( x, z1, mask1 );
1351 z = gmx_mm_asin_pd(z);
1353 z1 = _mm_add_pd(z, z);
1355 z2 = _mm_sub_pd(quarterpi0, z);
1356 z2 = _mm_add_pd(z2, quarterpi1);
1357 z2 = _mm_add_pd(z2, quarterpi0);
1359 z = _mm_blendv_pd(z2, z1, mask1);
1361 return z;
1364 static __m128d
1365 gmx_mm_atan_pd(__m128d x)
1367 /* Same algorithm as cephes library */
1368 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1369 const __m128d limit1 = _mm_set1_pd(0.66);
1370 const __m128d limit2 = _mm_set1_pd(2.41421356237309504880);
1371 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1372 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1373 const __m128d mone = _mm_set1_pd(-1.0);
1374 const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
1375 const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
1377 const __m128d P4 = _mm_set1_pd(-8.750608600031904122785E-1);
1378 const __m128d P3 = _mm_set1_pd(-1.615753718733365076637E1);
1379 const __m128d P2 = _mm_set1_pd(-7.500855792314704667340E1);
1380 const __m128d P1 = _mm_set1_pd(-1.228866684490136173410E2);
1381 const __m128d P0 = _mm_set1_pd(-6.485021904942025371773E1);
1383 const __m128d Q4 = _mm_set1_pd(2.485846490142306297962E1);
1384 const __m128d Q3 = _mm_set1_pd(1.650270098316988542046E2);
1385 const __m128d Q2 = _mm_set1_pd(4.328810604912902668951E2);
1386 const __m128d Q1 = _mm_set1_pd(4.853903996359136964868E2);
1387 const __m128d Q0 = _mm_set1_pd(1.945506571482613964425E2);
1389 __m128d sign;
1390 __m128d mask1, mask2;
1391 __m128d y, t1, t2;
1392 __m128d z, z2;
1393 __m128d P_A, P_B, Q_A, Q_B;
1395 sign = _mm_andnot_pd(signmask, x);
1396 x = _mm_and_pd(x, signmask);
1398 mask1 = _mm_cmpgt_pd(x, limit1);
1399 mask2 = _mm_cmpgt_pd(x, limit2);
1401 t1 = _mm_mul_pd(_mm_add_pd(x, mone), gmx_mm_inv_pd(_mm_sub_pd(x, mone)));
1402 t2 = _mm_mul_pd(mone, gmx_mm_inv_pd(x));
1404 y = _mm_and_pd(mask1, quarterpi);
1405 y = _mm_or_pd( _mm_and_pd(mask2, halfpi), _mm_andnot_pd(mask2, y) );
1407 x = _mm_or_pd( _mm_and_pd(mask1, t1), _mm_andnot_pd(mask1, x) );
1408 x = _mm_or_pd( _mm_and_pd(mask2, t2), _mm_andnot_pd(mask2, x) );
1410 z = _mm_mul_pd(x, x);
1411 z2 = _mm_mul_pd(z, z);
1413 P_A = _mm_mul_pd(P4, z2);
1414 P_B = _mm_mul_pd(P3, z2);
1415 P_A = _mm_add_pd(P_A, P2);
1416 P_B = _mm_add_pd(P_B, P1);
1417 P_A = _mm_mul_pd(P_A, z2);
1418 P_B = _mm_mul_pd(P_B, z);
1419 P_A = _mm_add_pd(P_A, P0);
1420 P_A = _mm_add_pd(P_A, P_B);
1422 /* Q_A = z2 */
1423 Q_B = _mm_mul_pd(Q4, z2);
1424 Q_A = _mm_add_pd(z2, Q3);
1425 Q_B = _mm_add_pd(Q_B, Q2);
1426 Q_A = _mm_mul_pd(Q_A, z2);
1427 Q_B = _mm_mul_pd(Q_B, z2);
1428 Q_A = _mm_add_pd(Q_A, Q1);
1429 Q_B = _mm_add_pd(Q_B, Q0);
1430 Q_A = _mm_mul_pd(Q_A, z);
1431 Q_A = _mm_add_pd(Q_A, Q_B);
1433 z = _mm_mul_pd(z, P_A);
1434 z = _mm_mul_pd(z, gmx_mm_inv_pd(Q_A));
1435 z = _mm_mul_pd(z, x);
1436 z = _mm_add_pd(z, x);
1438 t1 = _mm_and_pd(mask1, morebits1);
1439 t1 = _mm_or_pd( _mm_and_pd(mask2, morebits2), _mm_andnot_pd(mask2, t1) );
1441 z = _mm_add_pd(z, t1);
1442 y = _mm_add_pd(y, z);
1444 y = _mm_xor_pd(y, sign);
1446 return y;
1450 static __m128d
1451 gmx_mm_atan2_pd(__m128d y, __m128d x)
1453 const __m128d pi = _mm_set1_pd(M_PI);
1454 const __m128d minuspi = _mm_set1_pd(-M_PI);
1455 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1456 const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
1458 __m128d z, z1, z3, z4;
1459 __m128d w;
1460 __m128d maskx_lt, maskx_eq;
1461 __m128d masky_lt, masky_eq;
1462 __m128d mask1, mask2, mask3, mask4, maskall;
1464 maskx_lt = _mm_cmplt_pd(x, _mm_setzero_pd());
1465 masky_lt = _mm_cmplt_pd(y, _mm_setzero_pd());
1466 maskx_eq = _mm_cmpeq_pd(x, _mm_setzero_pd());
1467 masky_eq = _mm_cmpeq_pd(y, _mm_setzero_pd());
1469 z = _mm_mul_pd(y, gmx_mm_inv_pd(x));
1470 z = gmx_mm_atan_pd(z);
1472 mask1 = _mm_and_pd(maskx_eq, masky_lt);
1473 mask2 = _mm_andnot_pd(maskx_lt, masky_eq);
1474 mask3 = _mm_andnot_pd( _mm_or_pd(masky_lt, masky_eq), maskx_eq);
1475 mask4 = _mm_and_pd(masky_eq, maskx_lt);
1477 maskall = _mm_or_pd( _mm_or_pd(mask1, mask2), _mm_or_pd(mask3, mask4) );
1479 z = _mm_andnot_pd(maskall, z);
1480 z1 = _mm_and_pd(mask1, minushalfpi);
1481 z3 = _mm_and_pd(mask3, halfpi);
1482 z4 = _mm_and_pd(mask4, pi);
1484 z = _mm_or_pd( _mm_or_pd(z, z1), _mm_or_pd(z3, z4) );
1486 w = _mm_blendv_pd(pi, minuspi, masky_lt);
1487 w = _mm_and_pd(w, maskx_lt);
1489 w = _mm_andnot_pd(maskall, w);
1491 z = _mm_add_pd(z, w);
1493 return z;
1496 #endif /*_gmx_math_x86_sse4_1_double_h_ */