Fixed LJ-14 error in free energy
[gromacs.git] / include / gmx_math_x86_avx_256_double.h
blobf84e92ea0e803743bd1f9b5f68037d635b58bdb1
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
3 *
4 * This file is part of GROMACS.
5 * Copyright (c) 2012-
7 * Written by the Gromacs development team under coordination of
8 * David van der Spoel, Berk Hess, and Erik Lindahl.
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * To help us fund GROMACS development, we humbly ask that you cite
16 * the research papers on the package. Check out http://www.gromacs.org
18 * And Hey:
19 * Gnomes, ROck Monsters And Chili Sauce
21 #ifndef _gmx_math_x86_avx_256_double_h_
22 #define _gmx_math_x86_avx_256_double_h_
24 #include "gmx_x86_avx_256.h"
28 #ifndef M_PI
29 # define M_PI 3.14159265358979323846264338327950288
30 #endif
33 /************************
34 * *
35 * Simple math routines *
36 * *
37 ************************/
39 /* 1.0/sqrt(x), 256 bit wide */
40 static gmx_inline __m256d
41 gmx_mm256_invsqrt_pd(__m256d x)
43 const __m256d half = _mm256_set1_pd(0.5);
44 const __m256d three = _mm256_set1_pd(3.0);
46 /* Lookup instruction only exists in single precision, convert back and forth... */
47 __m256d lu = _mm256_cvtps_pd(_mm_rsqrt_ps( _mm256_cvtpd_ps(x)));
49 lu = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu,lu),x)),lu));
50 return _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu,lu),x)),lu));
53 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
54 static void
55 gmx_mm256_invsqrt_pair_pd(__m256d x1, __m256d x2, __m256d *invsqrt1, __m256d *invsqrt2)
57 const __m256d half = _mm256_set1_pd(0.5);
58 const __m256d three = _mm256_set1_pd(3.0);
59 const __m256 halff = _mm256_set1_ps(0.5f);
60 const __m256 threef = _mm256_set1_ps(3.0f);
62 __m256 xf,luf;
63 __m256d lu1,lu2;
65 /* Do first N-R step in float for 2x throughput */
66 xf = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(x1)),_mm256_cvtpd_ps(x2), 0x1);
67 luf = _mm256_rsqrt_ps(xf);
69 luf = _mm256_mul_ps(halff,_mm256_mul_ps(_mm256_sub_ps(threef,_mm256_mul_ps(_mm256_mul_ps(luf,luf),xf)),luf));
71 lu2 = _mm256_cvtps_pd(_mm256_extractf128_ps(luf,0x1));
72 lu1 = _mm256_cvtps_pd(_mm256_castps256_ps128(luf));
74 *invsqrt1 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu1,lu1),x1)),lu1));
75 *invsqrt2 = _mm256_mul_pd(half,_mm256_mul_pd(_mm256_sub_pd(three,_mm256_mul_pd(_mm256_mul_pd(lu2,lu2),x2)),lu2));
78 /* 1.0/sqrt(x), 128 bit wide */
79 static gmx_inline __m128d
80 gmx_mm_invsqrt_pd(__m128d x)
82 const __m128d half = _mm_set1_pd(0.5);
83 const __m128d three = _mm_set1_pd(3.0);
85 /* Lookup instruction only exists in single precision, convert back and forth... */
86 __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
88 lu = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu,lu),x)),lu));
89 return _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu,lu),x)),lu));
92 /* 1.0/sqrt(x), done for two pairs to improve throughput */
93 static void
94 gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
96 const __m128d half = _mm_set1_pd(0.5);
97 const __m128d three = _mm_set1_pd(3.0);
98 const __m128 halff = _mm_set1_ps(0.5f);
99 const __m128 threef = _mm_set1_ps(3.0f);
101 __m128 xf,luf;
102 __m128d lu1,lu2;
104 /* Do first N-R step in float for 2x throughput */
105 xf = _mm_shuffle_ps(_mm_cvtpd_ps(x1),_mm_cvtpd_ps(x2),_MM_SHUFFLE(1,0,1,0));
106 luf = _mm_rsqrt_ps(xf);
107 luf = _mm_mul_ps(halff,_mm_mul_ps(_mm_sub_ps(threef,_mm_mul_ps(_mm_mul_ps(luf,luf),xf)),luf));
109 lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf,luf,_MM_SHUFFLE(3,2,3,2)));
110 lu1 = _mm_cvtps_pd(luf);
112 *invsqrt1 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu1,lu1),x1)),lu1));
113 *invsqrt2 = _mm_mul_pd(half,_mm_mul_pd(_mm_sub_pd(three,_mm_mul_pd(_mm_mul_pd(lu2,lu2),x2)),lu2));
116 /* sqrt(x) (256 bit)- Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
117 static gmx_inline __m256d
118 gmx_mm256_sqrt_pd(__m256d x)
120 __m256d mask;
121 __m256d res;
123 mask = _mm256_cmp_pd(x,_mm256_setzero_pd(), _CMP_EQ_OQ);
124 res = _mm256_andnot_pd(mask,gmx_mm256_invsqrt_pd(x));
126 res = _mm256_mul_pd(x,res);
128 return res;
131 /* sqrt(x) (128 bit) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
132 static gmx_inline __m128d
133 gmx_mm_sqrt_pd(__m128d x)
135 __m128d mask;
136 __m128d res;
138 mask = _mm_cmpeq_pd(x,_mm_setzero_pd());
139 res = _mm_andnot_pd(mask,gmx_mm_invsqrt_pd(x));
141 res = _mm_mul_pd(x,res);
143 return res;
147 /* 1.0/x, 256 bit wide */
148 static gmx_inline __m256d
149 gmx_mm256_inv_pd(__m256d x)
151 const __m256d two = _mm256_set1_pd(2.0);
153 /* Lookup instruction only exists in single precision, convert back and forth... */
154 __m256d lu = _mm256_cvtps_pd(_mm_rcp_ps( _mm256_cvtpd_ps(x)));
156 /* Perform two N-R steps for double precision */
157 lu = _mm256_mul_pd(lu,_mm256_sub_pd(two,_mm256_mul_pd(x,lu)));
158 return _mm256_mul_pd(lu,_mm256_sub_pd(two,_mm256_mul_pd(x,lu)));
161 /* 1.0/x, 128 bit */
162 static gmx_inline __m128d
163 gmx_mm_inv_pd(__m128d x)
165 const __m128d two = _mm_set1_pd(2.0);
167 /* Lookup instruction only exists in single precision, convert back and forth... */
168 __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
170 /* Perform two N-R steps for double precision */
171 lu = _mm_mul_pd(lu,_mm_sub_pd(two,_mm_mul_pd(x,lu)));
172 return _mm_mul_pd(lu,_mm_sub_pd(two,_mm_mul_pd(x,lu)));
176 static gmx_inline __m256d
177 gmx_mm256_abs_pd(__m256d x)
179 const __m256d signmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF,
180 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
182 return _mm256_and_pd(x,signmask);
185 static gmx_inline __m128d
186 gmx_mm_abs_pd(__m128d x)
188 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
190 return _mm_and_pd(x,signmask);
195 * 2^x function, 256 bit
197 * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
198 * [-0.5,0.5].
200 * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
201 * according to the same algorithm as used in the Cephes/netlib math routines.
203 static __m256d
204 gmx_mm256_exp2_pd(__m256d x)
206 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
207 const __m256d arglimit = _mm256_set1_pd(1022.0);
208 const __m128i expbase = _mm_set1_epi32(1023);
210 const __m256d P2 = _mm256_set1_pd(2.30933477057345225087e-2);
211 const __m256d P1 = _mm256_set1_pd(2.02020656693165307700e1);
212 const __m256d P0 = _mm256_set1_pd(1.51390680115615096133e3);
213 /* Q2 == 1.0 */
214 const __m256d Q1 = _mm256_set1_pd(2.33184211722314911771e2);
215 const __m256d Q0 = _mm256_set1_pd(4.36821166879210612817e3);
216 const __m256d one = _mm256_set1_pd(1.0);
217 const __m256d two = _mm256_set1_pd(2.0);
219 __m256d valuemask;
220 __m256i iexppart;
221 __m128i iexppart128a,iexppart128b;
222 __m256d fexppart;
223 __m256d intpart;
224 __m256d z,z2;
225 __m256d PolyP,PolyQ;
227 iexppart128a = _mm256_cvtpd_epi32(x);
228 intpart = _mm256_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
230 /* Add exponent bias */
231 iexppart128a = _mm_add_epi32(iexppart128a,expbase);
233 /* We now want to shift the exponent 52 positions left, but to achieve this we need
234 * to separate the 128-bit register data into two registers (4x64-bit > 128bit)
235 * shift them, and then merge into a single __m256d.
236 * Elements 0/1 should end up in iexppart128a, and 2/3 in iexppart128b.
237 * It doesnt matter what we put in the 2nd/4th position, since that data will be
238 * shifted out and replaced with zeros.
240 iexppart128b = _mm_shuffle_epi32(iexppart128a,_MM_SHUFFLE(3,3,2,2));
241 iexppart128a = _mm_shuffle_epi32(iexppart128a,_MM_SHUFFLE(1,1,0,0));
243 iexppart128b = _mm_slli_epi64(iexppart128b,52);
244 iexppart128a = _mm_slli_epi64(iexppart128a,52);
246 iexppart = _mm256_castsi128_si256(iexppart128a);
247 iexppart = _mm256_insertf128_si256(iexppart,iexppart128b,0x1);
249 valuemask = _mm256_cmp_pd(arglimit,gmx_mm256_abs_pd(x),_CMP_GE_OQ);
250 fexppart = _mm256_and_pd(valuemask,_mm256_castsi256_pd(iexppart));
252 z = _mm256_sub_pd(x,intpart);
254 z2 = _mm256_mul_pd(z,z);
256 PolyP = _mm256_mul_pd(P2,z2);
257 PolyP = _mm256_add_pd(PolyP,P1);
258 PolyQ = _mm256_add_pd(z2,Q1);
259 PolyP = _mm256_mul_pd(PolyP,z2);
260 PolyQ = _mm256_mul_pd(PolyQ,z2);
261 PolyP = _mm256_add_pd(PolyP,P0);
262 PolyQ = _mm256_add_pd(PolyQ,Q0);
263 PolyP = _mm256_mul_pd(PolyP,z);
265 z = _mm256_mul_pd(PolyP,gmx_mm256_inv_pd(_mm256_sub_pd(PolyQ,PolyP)));
266 z = _mm256_add_pd(one,_mm256_mul_pd(two,z));
268 z = _mm256_mul_pd(z,fexppart);
270 return z;
273 /* 2^x, 128 bit */
274 static __m128d
275 gmx_mm_exp2_pd(__m128d x)
277 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
278 const __m128d arglimit = _mm_set1_pd(1022.0);
279 const __m128i expbase = _mm_set1_epi32(1023);
281 const __m128d P2 = _mm_set1_pd(2.30933477057345225087e-2);
282 const __m128d P1 = _mm_set1_pd(2.02020656693165307700e1);
283 const __m128d P0 = _mm_set1_pd(1.51390680115615096133e3);
284 /* Q2 == 1.0 */
285 const __m128d Q1 = _mm_set1_pd(2.33184211722314911771e2);
286 const __m128d Q0 = _mm_set1_pd(4.36821166879210612817e3);
287 const __m128d one = _mm_set1_pd(1.0);
288 const __m128d two = _mm_set1_pd(2.0);
290 __m128d valuemask;
291 __m128i iexppart;
292 __m128d fexppart;
293 __m128d intpart;
294 __m128d z,z2;
295 __m128d PolyP,PolyQ;
297 iexppart = _mm_cvtpd_epi32(x);
298 intpart = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
300 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
301 * To be able to shift it into the exponent for a double precision number we first need to
302 * shuffle so that the lower half contains the first element, and the upper half the second.
303 * This should really be done as a zero-extension, but since the next instructions will shift
304 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
305 * (thus we just use element 2 from iexppart).
307 iexppart = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
309 /* Do the shift operation on the 64-bit registers */
310 iexppart = _mm_add_epi32(iexppart,expbase);
311 iexppart = _mm_slli_epi64(iexppart,52);
313 valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
314 fexppart = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
316 z = _mm_sub_pd(x,intpart);
317 z2 = _mm_mul_pd(z,z);
319 PolyP = _mm_mul_pd(P2,z2);
320 PolyP = _mm_add_pd(PolyP,P1);
321 PolyQ = _mm_add_pd(z2,Q1);
322 PolyP = _mm_mul_pd(PolyP,z2);
323 PolyQ = _mm_mul_pd(PolyQ,z2);
324 PolyP = _mm_add_pd(PolyP,P0);
325 PolyQ = _mm_add_pd(PolyQ,Q0);
326 PolyP = _mm_mul_pd(PolyP,z);
328 z = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
329 z = _mm_add_pd(one,_mm_mul_pd(two,z));
331 z = _mm_mul_pd(z,fexppart);
333 return z;
337 /* Exponential function, 256 bit. This could be calculated from 2^x as Exp(x)=2^(y),
338 * where y=log2(e)*x, but there will then be a small rounding error since we lose
339 * some precision due to the multiplication. This will then be magnified a lot by
340 * the exponential.
342 * Instead, we calculate the fractional part directly as a Padé approximation of
343 * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
344 * remaining after 2^y, which avoids the precision-loss.
346 static __m256d
347 gmx_mm256_exp_pd(__m256d exparg)
349 const __m256d argscale = _mm256_set1_pd(1.4426950408889634073599);
350 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
351 const __m256d arglimit = _mm256_set1_pd(1022.0);
352 const __m128i expbase = _mm_set1_epi32(1023);
354 const __m256d invargscale0 = _mm256_set1_pd(6.93145751953125e-1);
355 const __m256d invargscale1 = _mm256_set1_pd(1.42860682030941723212e-6);
357 const __m256d P2 = _mm256_set1_pd(1.26177193074810590878e-4);
358 const __m256d P1 = _mm256_set1_pd(3.02994407707441961300e-2);
359 /* P0 == 1.0 */
360 const __m256d Q3 = _mm256_set1_pd(3.00198505138664455042E-6);
361 const __m256d Q2 = _mm256_set1_pd(2.52448340349684104192E-3);
362 const __m256d Q1 = _mm256_set1_pd(2.27265548208155028766E-1);
363 /* Q0 == 2.0 */
364 const __m256d one = _mm256_set1_pd(1.0);
365 const __m256d two = _mm256_set1_pd(2.0);
367 __m256d valuemask;
368 __m256i iexppart;
369 __m128i iexppart128a,iexppart128b;
370 __m256d fexppart;
371 __m256d intpart;
372 __m256d x,z,z2;
373 __m256d PolyP,PolyQ;
375 x = _mm256_mul_pd(exparg,argscale);
377 iexppart128a = _mm256_cvtpd_epi32(x);
378 intpart = _mm256_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
380 /* Add exponent bias */
381 iexppart128a = _mm_add_epi32(iexppart128a,expbase);
383 /* We now want to shift the exponent 52 positions left, but to achieve this we need
384 * to separate the 128-bit register data into two registers (4x64-bit > 128bit)
385 * shift them, and then merge into a single __m256d.
386 * Elements 0/1 should end up in iexppart128a, and 2/3 in iexppart128b.
387 * It doesnt matter what we put in the 2nd/4th position, since that data will be
388 * shifted out and replaced with zeros.
390 iexppart128b = _mm_shuffle_epi32(iexppart128a,_MM_SHUFFLE(3,3,2,2));
391 iexppart128a = _mm_shuffle_epi32(iexppart128a,_MM_SHUFFLE(1,1,0,0));
393 iexppart128b = _mm_slli_epi64(iexppart128b,52);
394 iexppart128a = _mm_slli_epi64(iexppart128a,52);
396 iexppart = _mm256_castsi128_si256(iexppart128a);
397 iexppart = _mm256_insertf128_si256(iexppart,iexppart128b,0x1);
399 valuemask = _mm256_cmp_pd(arglimit,gmx_mm256_abs_pd(x),_CMP_GE_OQ);
400 fexppart = _mm256_and_pd(valuemask,_mm256_castsi256_pd(iexppart));
402 z = _mm256_sub_pd(exparg,_mm256_mul_pd(invargscale0,intpart));
403 z = _mm256_sub_pd(z,_mm256_mul_pd(invargscale1,intpart));
405 z2 = _mm256_mul_pd(z,z);
407 PolyQ = _mm256_mul_pd(Q3,z2);
408 PolyQ = _mm256_add_pd(PolyQ,Q2);
409 PolyP = _mm256_mul_pd(P2,z2);
410 PolyQ = _mm256_mul_pd(PolyQ,z2);
411 PolyP = _mm256_add_pd(PolyP,P1);
412 PolyQ = _mm256_add_pd(PolyQ,Q1);
413 PolyP = _mm256_mul_pd(PolyP,z2);
414 PolyQ = _mm256_mul_pd(PolyQ,z2);
415 PolyP = _mm256_add_pd(PolyP,one);
416 PolyQ = _mm256_add_pd(PolyQ,two);
418 PolyP = _mm256_mul_pd(PolyP,z);
420 z = _mm256_mul_pd(PolyP,gmx_mm256_inv_pd(_mm256_sub_pd(PolyQ,PolyP)));
421 z = _mm256_add_pd(one,_mm256_mul_pd(two,z));
423 z = _mm256_mul_pd(z,fexppart);
425 return z;
428 /* exp(), 128 bit */
429 static __m128d
430 gmx_mm_exp_pd(__m128d exparg)
432 const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
433 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
434 const __m128d arglimit = _mm_set1_pd(1022.0);
435 const __m128i expbase = _mm_set1_epi32(1023);
437 const __m128d invargscale0 = _mm_set1_pd(6.93145751953125e-1);
438 const __m128d invargscale1 = _mm_set1_pd(1.42860682030941723212e-6);
440 const __m128d P2 = _mm_set1_pd(1.26177193074810590878e-4);
441 const __m128d P1 = _mm_set1_pd(3.02994407707441961300e-2);
442 /* P0 == 1.0 */
443 const __m128d Q3 = _mm_set1_pd(3.00198505138664455042E-6);
444 const __m128d Q2 = _mm_set1_pd(2.52448340349684104192E-3);
445 const __m128d Q1 = _mm_set1_pd(2.27265548208155028766E-1);
446 /* Q0 == 2.0 */
447 const __m128d one = _mm_set1_pd(1.0);
448 const __m128d two = _mm_set1_pd(2.0);
450 __m128d valuemask;
451 __m128i iexppart;
452 __m128d fexppart;
453 __m128d intpart;
454 __m128d x,z,z2;
455 __m128d PolyP,PolyQ;
457 x = _mm_mul_pd(exparg,argscale);
459 iexppart = _mm_cvtpd_epi32(x);
460 intpart = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
462 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
463 * To be able to shift it into the exponent for a double precision number we first need to
464 * shuffle so that the lower half contains the first element, and the upper half the second.
465 * This should really be done as a zero-extension, but since the next instructions will shift
466 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
467 * (thus we just use element 2 from iexppart).
469 iexppart = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
471 /* Do the shift operation on the 64-bit registers */
472 iexppart = _mm_add_epi32(iexppart,expbase);
473 iexppart = _mm_slli_epi64(iexppart,52);
475 valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
476 fexppart = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
478 z = _mm_sub_pd(exparg,_mm_mul_pd(invargscale0,intpart));
479 z = _mm_sub_pd(z,_mm_mul_pd(invargscale1,intpart));
481 z2 = _mm_mul_pd(z,z);
483 PolyQ = _mm_mul_pd(Q3,z2);
484 PolyQ = _mm_add_pd(PolyQ,Q2);
485 PolyP = _mm_mul_pd(P2,z2);
486 PolyQ = _mm_mul_pd(PolyQ,z2);
487 PolyP = _mm_add_pd(PolyP,P1);
488 PolyQ = _mm_add_pd(PolyQ,Q1);
489 PolyP = _mm_mul_pd(PolyP,z2);
490 PolyQ = _mm_mul_pd(PolyQ,z2);
491 PolyP = _mm_add_pd(PolyP,one);
492 PolyQ = _mm_add_pd(PolyQ,two);
494 PolyP = _mm_mul_pd(PolyP,z);
496 z = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
497 z = _mm_add_pd(one,_mm_mul_pd(two,z));
499 z = _mm_mul_pd(z,fexppart);
501 return z;
505 static __m256d
506 gmx_mm256_log_pd(__m256d x)
508 /* Same algorithm as cephes library */
509 const __m256d expmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000,
510 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
512 const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
514 const __m256d half = _mm256_set1_pd(0.5);
515 const __m256d one = _mm256_set1_pd(1.0);
516 const __m256d two = _mm256_set1_pd(2.0);
517 const __m256d invsq2 = _mm256_set1_pd(1.0/sqrt(2.0));
519 const __m256d corr1 = _mm256_set1_pd(-2.121944400546905827679e-4);
520 const __m256d corr2 = _mm256_set1_pd(0.693359375);
522 const __m256d P5 = _mm256_set1_pd(1.01875663804580931796e-4);
523 const __m256d P4 = _mm256_set1_pd(4.97494994976747001425e-1);
524 const __m256d P3 = _mm256_set1_pd(4.70579119878881725854e0);
525 const __m256d P2 = _mm256_set1_pd(1.44989225341610930846e1);
526 const __m256d P1 = _mm256_set1_pd(1.79368678507819816313e1);
527 const __m256d P0 = _mm256_set1_pd(7.70838733755885391666e0);
529 const __m256d Q4 = _mm256_set1_pd(1.12873587189167450590e1);
530 const __m256d Q3 = _mm256_set1_pd(4.52279145837532221105e1);
531 const __m256d Q2 = _mm256_set1_pd(8.29875266912776603211e1);
532 const __m256d Q1 = _mm256_set1_pd(7.11544750618563894466e1);
533 const __m256d Q0 = _mm256_set1_pd(2.31251620126765340583e1);
535 const __m256d R2 = _mm256_set1_pd(-7.89580278884799154124e-1);
536 const __m256d R1 = _mm256_set1_pd(1.63866645699558079767e1);
537 const __m256d R0 = _mm256_set1_pd(-6.41409952958715622951e1);
539 const __m256d S2 = _mm256_set1_pd(-3.56722798256324312549E1);
540 const __m256d S1 = _mm256_set1_pd(3.12093766372244180303E2);
541 const __m256d S0 = _mm256_set1_pd(-7.69691943550460008604E2);
543 __m256d fexp;
544 __m256i iexp;
545 __m128i iexp128a,iexp128b;
547 __m256d mask1,mask2;
548 __m256d corr,t1,t2,q;
549 __m256d zA,yA,xA,zB,yB,xB,z;
550 __m256d polyR,polyS;
551 __m256d polyP1,polyP2,polyQ1,polyQ2;
553 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
554 fexp = _mm256_and_pd(x,expmask);
556 iexp = _mm256_castpd_si256(fexp);
557 iexp128b = _mm256_extractf128_si256(iexp,0x1);
558 iexp128a = _mm256_castsi256_si128(iexp);
560 iexp128a = _mm_srli_epi64(iexp128a,52);
561 iexp128b = _mm_srli_epi64(iexp128b,52);
562 /* Merge into a single register */
563 iexp128a = _mm_shuffle_epi32(iexp128a,_MM_SHUFFLE(1,1,2,0));
564 iexp128b = _mm_shuffle_epi32(iexp128b,_MM_SHUFFLE(2,0,1,1));
565 iexp128a = _mm_or_si128(iexp128a,iexp128b);
566 iexp128a = _mm_sub_epi32(iexp128a,expbase_m1);
568 fexp = _mm256_cvtepi32_pd(iexp128a);
570 x = _mm256_andnot_pd(expmask,x); /* Get mantissa */
571 x = _mm256_or_pd(x,one);
572 x = _mm256_mul_pd(x,half);
574 mask1 = _mm256_cmp_pd(gmx_mm256_abs_pd(fexp),two,_CMP_GT_OQ);
575 mask2 = _mm256_cmp_pd(x,invsq2,_CMP_LT_OQ);
577 fexp = _mm256_sub_pd(fexp,_mm256_and_pd(mask2,one));
579 /* If mask1 is set ('A') */
580 zA = _mm256_sub_pd(x,half);
581 t1 = _mm256_blendv_pd( zA,x,mask2 );
582 zA = _mm256_sub_pd(t1,half);
583 t2 = _mm256_blendv_pd( x,zA,mask2 );
584 yA = _mm256_mul_pd(half,_mm256_add_pd(t2,one));
586 xA = _mm256_mul_pd(zA,gmx_mm256_inv_pd(yA));
587 zA = _mm256_mul_pd(xA,xA);
589 /* EVALUATE POLY */
590 polyR = _mm256_mul_pd(R2,zA);
591 polyR = _mm256_add_pd(polyR,R1);
592 polyR = _mm256_mul_pd(polyR,zA);
593 polyR = _mm256_add_pd(polyR,R0);
595 polyS = _mm256_add_pd(zA,S2);
596 polyS = _mm256_mul_pd(polyS,zA);
597 polyS = _mm256_add_pd(polyS,S1);
598 polyS = _mm256_mul_pd(polyS,zA);
599 polyS = _mm256_add_pd(polyS,S0);
601 q = _mm256_mul_pd(polyR,gmx_mm256_inv_pd(polyS));
602 zA = _mm256_mul_pd(_mm256_mul_pd(xA,zA),q);
604 zA = _mm256_add_pd(zA,_mm256_mul_pd(corr1,fexp));
605 zA = _mm256_add_pd(zA,xA);
606 zA = _mm256_add_pd(zA,_mm256_mul_pd(corr2,fexp));
608 /* If mask1 is not set ('B') */
609 corr = _mm256_and_pd(mask2,x);
610 xB = _mm256_add_pd(x,corr);
611 xB = _mm256_sub_pd(xB,one);
612 zB = _mm256_mul_pd(xB,xB);
614 polyP1 = _mm256_mul_pd(P5,zB);
615 polyP2 = _mm256_mul_pd(P4,zB);
616 polyP1 = _mm256_add_pd(polyP1,P3);
617 polyP2 = _mm256_add_pd(polyP2,P2);
618 polyP1 = _mm256_mul_pd(polyP1,zB);
619 polyP2 = _mm256_mul_pd(polyP2,zB);
620 polyP1 = _mm256_add_pd(polyP1,P1);
621 polyP2 = _mm256_add_pd(polyP2,P0);
622 polyP1 = _mm256_mul_pd(polyP1,xB);
623 polyP1 = _mm256_add_pd(polyP1,polyP2);
625 polyQ2 = _mm256_mul_pd(Q4,zB);
626 polyQ1 = _mm256_add_pd(zB,Q3);
627 polyQ2 = _mm256_add_pd(polyQ2,Q2);
628 polyQ1 = _mm256_mul_pd(polyQ1,zB);
629 polyQ2 = _mm256_mul_pd(polyQ2,zB);
630 polyQ1 = _mm256_add_pd(polyQ1,Q1);
631 polyQ2 = _mm256_add_pd(polyQ2,Q0);
632 polyQ1 = _mm256_mul_pd(polyQ1,xB);
633 polyQ1 = _mm256_add_pd(polyQ1,polyQ2);
635 fexp = _mm256_and_pd(fexp,_mm256_cmp_pd(fexp,_mm256_setzero_pd(),_CMP_NEQ_OQ));
637 q = _mm256_mul_pd(polyP1,gmx_mm256_inv_pd(polyQ1));
638 yB = _mm256_mul_pd(_mm256_mul_pd(xB,zB),q);
640 yB = _mm256_add_pd(yB,_mm256_mul_pd(corr1,fexp));
641 yB = _mm256_sub_pd(yB,_mm256_mul_pd(half,zB));
642 zB = _mm256_add_pd(xB,yB);
643 zB = _mm256_add_pd(zB,_mm256_mul_pd(corr2,fexp));
645 z = _mm256_blendv_pd( zB,zA,mask1 );
647 return z;
650 static __m128d
651 gmx_mm_log_pd(__m128d x)
653 /* Same algorithm as cephes library */
654 const __m128d expmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
656 const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
658 const __m128d half = _mm_set1_pd(0.5);
659 const __m128d one = _mm_set1_pd(1.0);
660 const __m128d two = _mm_set1_pd(2.0);
661 const __m128d invsq2 = _mm_set1_pd(1.0/sqrt(2.0));
663 const __m128d corr1 = _mm_set1_pd(-2.121944400546905827679e-4);
664 const __m128d corr2 = _mm_set1_pd(0.693359375);
666 const __m128d P5 = _mm_set1_pd(1.01875663804580931796e-4);
667 const __m128d P4 = _mm_set1_pd(4.97494994976747001425e-1);
668 const __m128d P3 = _mm_set1_pd(4.70579119878881725854e0);
669 const __m128d P2 = _mm_set1_pd(1.44989225341610930846e1);
670 const __m128d P1 = _mm_set1_pd(1.79368678507819816313e1);
671 const __m128d P0 = _mm_set1_pd(7.70838733755885391666e0);
673 const __m128d Q4 = _mm_set1_pd(1.12873587189167450590e1);
674 const __m128d Q3 = _mm_set1_pd(4.52279145837532221105e1);
675 const __m128d Q2 = _mm_set1_pd(8.29875266912776603211e1);
676 const __m128d Q1 = _mm_set1_pd(7.11544750618563894466e1);
677 const __m128d Q0 = _mm_set1_pd(2.31251620126765340583e1);
679 const __m128d R2 = _mm_set1_pd(-7.89580278884799154124e-1);
680 const __m128d R1 = _mm_set1_pd(1.63866645699558079767e1);
681 const __m128d R0 = _mm_set1_pd(-6.41409952958715622951e1);
683 const __m128d S2 = _mm_set1_pd(-3.56722798256324312549E1);
684 const __m128d S1 = _mm_set1_pd(3.12093766372244180303E2);
685 const __m128d S0 = _mm_set1_pd(-7.69691943550460008604E2);
687 __m128d fexp;
688 __m128i iexp;
690 __m128d mask1,mask2;
691 __m128d corr,t1,t2,q;
692 __m128d zA,yA,xA,zB,yB,xB,z;
693 __m128d polyR,polyS;
694 __m128d polyP1,polyP2,polyQ1,polyQ2;
696 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
697 fexp = _mm_and_pd(x,expmask);
698 iexp = gmx_mm_castpd_si128(fexp);
699 iexp = _mm_srli_epi64(iexp,52);
700 iexp = _mm_sub_epi32(iexp,expbase_m1);
701 iexp = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1,1,2,0) );
702 fexp = _mm_cvtepi32_pd(iexp);
704 x = _mm_andnot_pd(expmask,x);
705 x = _mm_or_pd(x,one);
706 x = _mm_mul_pd(x,half);
708 mask1 = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp),two);
709 mask2 = _mm_cmplt_pd(x,invsq2);
711 fexp = _mm_sub_pd(fexp,_mm_and_pd(mask2,one));
713 /* If mask1 is set ('A') */
714 zA = _mm_sub_pd(x,half);
715 t1 = _mm_blendv_pd( zA,x,mask2 );
716 zA = _mm_sub_pd(t1,half);
717 t2 = _mm_blendv_pd( x,zA,mask2 );
718 yA = _mm_mul_pd(half,_mm_add_pd(t2,one));
720 xA = _mm_mul_pd(zA,gmx_mm_inv_pd(yA));
721 zA = _mm_mul_pd(xA,xA);
723 /* EVALUATE POLY */
724 polyR = _mm_mul_pd(R2,zA);
725 polyR = _mm_add_pd(polyR,R1);
726 polyR = _mm_mul_pd(polyR,zA);
727 polyR = _mm_add_pd(polyR,R0);
729 polyS = _mm_add_pd(zA,S2);
730 polyS = _mm_mul_pd(polyS,zA);
731 polyS = _mm_add_pd(polyS,S1);
732 polyS = _mm_mul_pd(polyS,zA);
733 polyS = _mm_add_pd(polyS,S0);
735 q = _mm_mul_pd(polyR,gmx_mm_inv_pd(polyS));
736 zA = _mm_mul_pd(_mm_mul_pd(xA,zA),q);
738 zA = _mm_add_pd(zA,_mm_mul_pd(corr1,fexp));
739 zA = _mm_add_pd(zA,xA);
740 zA = _mm_add_pd(zA,_mm_mul_pd(corr2,fexp));
742 /* If mask1 is not set ('B') */
743 corr = _mm_and_pd(mask2,x);
744 xB = _mm_add_pd(x,corr);
745 xB = _mm_sub_pd(xB,one);
746 zB = _mm_mul_pd(xB,xB);
748 polyP1 = _mm_mul_pd(P5,zB);
749 polyP2 = _mm_mul_pd(P4,zB);
750 polyP1 = _mm_add_pd(polyP1,P3);
751 polyP2 = _mm_add_pd(polyP2,P2);
752 polyP1 = _mm_mul_pd(polyP1,zB);
753 polyP2 = _mm_mul_pd(polyP2,zB);
754 polyP1 = _mm_add_pd(polyP1,P1);
755 polyP2 = _mm_add_pd(polyP2,P0);
756 polyP1 = _mm_mul_pd(polyP1,xB);
757 polyP1 = _mm_add_pd(polyP1,polyP2);
759 polyQ2 = _mm_mul_pd(Q4,zB);
760 polyQ1 = _mm_add_pd(zB,Q3);
761 polyQ2 = _mm_add_pd(polyQ2,Q2);
762 polyQ1 = _mm_mul_pd(polyQ1,zB);
763 polyQ2 = _mm_mul_pd(polyQ2,zB);
764 polyQ1 = _mm_add_pd(polyQ1,Q1);
765 polyQ2 = _mm_add_pd(polyQ2,Q0);
766 polyQ1 = _mm_mul_pd(polyQ1,xB);
767 polyQ1 = _mm_add_pd(polyQ1,polyQ2);
769 fexp = _mm_and_pd(fexp,_mm_cmpneq_pd(fexp,_mm_setzero_pd()));
771 q = _mm_mul_pd(polyP1,gmx_mm_inv_pd(polyQ1));
772 yB = _mm_mul_pd(_mm_mul_pd(xB,zB),q);
774 yB = _mm_add_pd(yB,_mm_mul_pd(corr1,fexp));
775 yB = _mm_sub_pd(yB,_mm_mul_pd(half,zB));
776 zB = _mm_add_pd(xB,yB);
777 zB = _mm_add_pd(zB,_mm_mul_pd(corr2,fexp));
779 z = _mm_blendv_pd( zB,zA,mask1 );
781 return z;
785 static __m256d
786 gmx_mm256_erf_pd(__m256d x)
788 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
789 const __m256d CAP4 = _mm256_set1_pd(-0.431780540597889301512e-4);
790 const __m256d CAP3 = _mm256_set1_pd(-0.00578562306260059236059);
791 const __m256d CAP2 = _mm256_set1_pd(-0.028593586920219752446);
792 const __m256d CAP1 = _mm256_set1_pd(-0.315924962948621698209);
793 const __m256d CAP0 = _mm256_set1_pd(0.14952975608477029151);
795 const __m256d CAQ5 = _mm256_set1_pd(-0.374089300177174709737e-5);
796 const __m256d CAQ4 = _mm256_set1_pd(0.00015126584532155383535);
797 const __m256d CAQ3 = _mm256_set1_pd(0.00536692680669480725423);
798 const __m256d CAQ2 = _mm256_set1_pd(0.0668686825594046122636);
799 const __m256d CAQ1 = _mm256_set1_pd(0.402604990869284362773);
800 /* CAQ0 == 1.0 */
801 const __m256d CAoffset = _mm256_set1_pd(0.9788494110107421875);
803 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
804 const __m256d CBP6 = _mm256_set1_pd(2.49650423685462752497647637088e-10);
805 const __m256d CBP5 = _mm256_set1_pd(0.00119770193298159629350136085658);
806 const __m256d CBP4 = _mm256_set1_pd(0.0164944422378370965881008942733);
807 const __m256d CBP3 = _mm256_set1_pd(0.0984581468691775932063932439252);
808 const __m256d CBP2 = _mm256_set1_pd(0.317364595806937763843589437418);
809 const __m256d CBP1 = _mm256_set1_pd(0.554167062641455850932670067075);
810 const __m256d CBP0 = _mm256_set1_pd(0.427583576155807163756925301060);
811 const __m256d CBQ7 = _mm256_set1_pd(0.00212288829699830145976198384930);
812 const __m256d CBQ6 = _mm256_set1_pd(0.0334810979522685300554606393425);
813 const __m256d CBQ5 = _mm256_set1_pd(0.2361713785181450957579508850717);
814 const __m256d CBQ4 = _mm256_set1_pd(0.955364736493055670530981883072);
815 const __m256d CBQ3 = _mm256_set1_pd(2.36815675631420037315349279199);
816 const __m256d CBQ2 = _mm256_set1_pd(3.55261649184083035537184223542);
817 const __m256d CBQ1 = _mm256_set1_pd(2.93501136050160872574376997993);
818 /* CBQ0 == 1.0 */
820 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
821 const __m256d CCP6 = _mm256_set1_pd(-2.8175401114513378771);
822 const __m256d CCP5 = _mm256_set1_pd(-3.22729451764143718517);
823 const __m256d CCP4 = _mm256_set1_pd(-2.5518551727311523996);
824 const __m256d CCP3 = _mm256_set1_pd(-0.687717681153649930619);
825 const __m256d CCP2 = _mm256_set1_pd(-0.212652252872804219852);
826 const __m256d CCP1 = _mm256_set1_pd(0.0175389834052493308818);
827 const __m256d CCP0 = _mm256_set1_pd(0.00628057170626964891937);
829 const __m256d CCQ6 = _mm256_set1_pd(5.48409182238641741584);
830 const __m256d CCQ5 = _mm256_set1_pd(13.5064170191802889145);
831 const __m256d CCQ4 = _mm256_set1_pd(22.9367376522880577224);
832 const __m256d CCQ3 = _mm256_set1_pd(15.930646027911794143);
833 const __m256d CCQ2 = _mm256_set1_pd(11.0567237927800161565);
834 const __m256d CCQ1 = _mm256_set1_pd(2.79257750980575282228);
835 /* CCQ0 == 1.0 */
836 const __m256d CCoffset = _mm256_set1_pd(0.5579090118408203125);
838 const __m256d one = _mm256_set1_pd(1.0);
839 const __m256d two = _mm256_set1_pd(2.0);
841 const __m256d signbit = _mm256_castsi256_pd( _mm256_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000,
842 0x80000000,0x00000000,0x80000000,0x00000000) );
844 __m256d xabs,x2,x4,t,t2,w,w2;
845 __m256d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
846 __m256d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
847 __m256d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
848 __m256d res_erf,res_erfcB,res_erfcC,res_erfc,res;
849 __m256d mask,expmx2;
851 /* Calculate erf() */
852 xabs = gmx_mm256_abs_pd(x);
853 x2 = _mm256_mul_pd(x,x);
854 x4 = _mm256_mul_pd(x2,x2);
856 PolyAP0 = _mm256_mul_pd(CAP4,x4);
857 PolyAP1 = _mm256_mul_pd(CAP3,x4);
858 PolyAP0 = _mm256_add_pd(PolyAP0,CAP2);
859 PolyAP1 = _mm256_add_pd(PolyAP1,CAP1);
860 PolyAP0 = _mm256_mul_pd(PolyAP0,x4);
861 PolyAP1 = _mm256_mul_pd(PolyAP1,x2);
862 PolyAP0 = _mm256_add_pd(PolyAP0,CAP0);
863 PolyAP0 = _mm256_add_pd(PolyAP0,PolyAP1);
865 PolyAQ1 = _mm256_mul_pd(CAQ5,x4);
866 PolyAQ0 = _mm256_mul_pd(CAQ4,x4);
867 PolyAQ1 = _mm256_add_pd(PolyAQ1,CAQ3);
868 PolyAQ0 = _mm256_add_pd(PolyAQ0,CAQ2);
869 PolyAQ1 = _mm256_mul_pd(PolyAQ1,x4);
870 PolyAQ0 = _mm256_mul_pd(PolyAQ0,x4);
871 PolyAQ1 = _mm256_add_pd(PolyAQ1,CAQ1);
872 PolyAQ0 = _mm256_add_pd(PolyAQ0,one);
873 PolyAQ1 = _mm256_mul_pd(PolyAQ1,x2);
874 PolyAQ0 = _mm256_add_pd(PolyAQ0,PolyAQ1);
876 res_erf = _mm256_mul_pd(PolyAP0,gmx_mm256_inv_pd(PolyAQ0));
877 res_erf = _mm256_add_pd(CAoffset,res_erf);
878 res_erf = _mm256_mul_pd(x,res_erf);
880 /* Calculate erfc() in range [1,4.5] */
881 t = _mm256_sub_pd(xabs,one);
882 t2 = _mm256_mul_pd(t,t);
884 PolyBP0 = _mm256_mul_pd(CBP6,t2);
885 PolyBP1 = _mm256_mul_pd(CBP5,t2);
886 PolyBP0 = _mm256_add_pd(PolyBP0,CBP4);
887 PolyBP1 = _mm256_add_pd(PolyBP1,CBP3);
888 PolyBP0 = _mm256_mul_pd(PolyBP0,t2);
889 PolyBP1 = _mm256_mul_pd(PolyBP1,t2);
890 PolyBP0 = _mm256_add_pd(PolyBP0,CBP2);
891 PolyBP1 = _mm256_add_pd(PolyBP1,CBP1);
892 PolyBP0 = _mm256_mul_pd(PolyBP0,t2);
893 PolyBP1 = _mm256_mul_pd(PolyBP1,t);
894 PolyBP0 = _mm256_add_pd(PolyBP0,CBP0);
895 PolyBP0 = _mm256_add_pd(PolyBP0,PolyBP1);
897 PolyBQ1 = _mm256_mul_pd(CBQ7,t2);
898 PolyBQ0 = _mm256_mul_pd(CBQ6,t2);
899 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ5);
900 PolyBQ0 = _mm256_add_pd(PolyBQ0,CBQ4);
901 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t2);
902 PolyBQ0 = _mm256_mul_pd(PolyBQ0,t2);
903 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ3);
904 PolyBQ0 = _mm256_add_pd(PolyBQ0,CBQ2);
905 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t2);
906 PolyBQ0 = _mm256_mul_pd(PolyBQ0,t2);
907 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ1);
908 PolyBQ0 = _mm256_add_pd(PolyBQ0,one);
909 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t);
910 PolyBQ0 = _mm256_add_pd(PolyBQ0,PolyBQ1);
912 res_erfcB = _mm256_mul_pd(PolyBP0,gmx_mm256_inv_pd(PolyBQ0));
914 res_erfcB = _mm256_mul_pd(res_erfcB,xabs);
916 /* Calculate erfc() in range [4.5,inf] */
917 w = gmx_mm256_inv_pd(xabs);
918 w2 = _mm256_mul_pd(w,w);
920 PolyCP0 = _mm256_mul_pd(CCP6,w2);
921 PolyCP1 = _mm256_mul_pd(CCP5,w2);
922 PolyCP0 = _mm256_add_pd(PolyCP0,CCP4);
923 PolyCP1 = _mm256_add_pd(PolyCP1,CCP3);
924 PolyCP0 = _mm256_mul_pd(PolyCP0,w2);
925 PolyCP1 = _mm256_mul_pd(PolyCP1,w2);
926 PolyCP0 = _mm256_add_pd(PolyCP0,CCP2);
927 PolyCP1 = _mm256_add_pd(PolyCP1,CCP1);
928 PolyCP0 = _mm256_mul_pd(PolyCP0,w2);
929 PolyCP1 = _mm256_mul_pd(PolyCP1,w);
930 PolyCP0 = _mm256_add_pd(PolyCP0,CCP0);
931 PolyCP0 = _mm256_add_pd(PolyCP0,PolyCP1);
933 PolyCQ0 = _mm256_mul_pd(CCQ6,w2);
934 PolyCQ1 = _mm256_mul_pd(CCQ5,w2);
935 PolyCQ0 = _mm256_add_pd(PolyCQ0,CCQ4);
936 PolyCQ1 = _mm256_add_pd(PolyCQ1,CCQ3);
937 PolyCQ0 = _mm256_mul_pd(PolyCQ0,w2);
938 PolyCQ1 = _mm256_mul_pd(PolyCQ1,w2);
939 PolyCQ0 = _mm256_add_pd(PolyCQ0,CCQ2);
940 PolyCQ1 = _mm256_add_pd(PolyCQ1,CCQ1);
941 PolyCQ0 = _mm256_mul_pd(PolyCQ0,w2);
942 PolyCQ1 = _mm256_mul_pd(PolyCQ1,w);
943 PolyCQ0 = _mm256_add_pd(PolyCQ0,one);
944 PolyCQ0 = _mm256_add_pd(PolyCQ0,PolyCQ1);
946 expmx2 = gmx_mm256_exp_pd( _mm256_or_pd(signbit, x2) );
948 res_erfcC = _mm256_mul_pd(PolyCP0,gmx_mm256_inv_pd(PolyCQ0));
949 res_erfcC = _mm256_add_pd(res_erfcC,CCoffset);
950 res_erfcC = _mm256_mul_pd(res_erfcC,w);
952 mask = _mm256_cmp_pd(xabs,_mm256_set1_pd(4.5),_CMP_GT_OQ);
953 res_erfc = _mm256_blendv_pd(res_erfcB,res_erfcC,mask);
955 res_erfc = _mm256_mul_pd(res_erfc,expmx2);
957 /* erfc(x<0) = 2-erfc(|x|) */
958 mask = _mm256_cmp_pd(x,_mm256_setzero_pd(),_CMP_LT_OQ);
959 res_erfc = _mm256_blendv_pd(res_erfc,_mm256_sub_pd(two,res_erfc),mask);
961 /* Select erf() or erfc() */
962 mask = _mm256_cmp_pd(xabs,one,_CMP_LT_OQ);
963 res = _mm256_blendv_pd(_mm256_sub_pd(one,res_erfc),res_erf,mask);
965 return res;
968 static __m128d
969 gmx_mm_erf_pd(__m128d x)
971 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
972 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
973 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
974 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
975 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
976 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
978 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
979 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
980 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
981 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
982 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
983 /* CAQ0 == 1.0 */
984 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
986 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
987 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
988 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
989 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
990 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
991 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
992 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
993 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
994 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
995 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
996 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
997 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
998 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
999 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
1000 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
1001 /* CBQ0 == 1.0 */
1003 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1004 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
1005 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
1006 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
1007 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
1008 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
1009 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
1010 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
1012 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
1013 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
1014 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
1015 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
1016 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
1017 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
1018 /* CCQ0 == 1.0 */
1019 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
1021 const __m128d one = _mm_set1_pd(1.0);
1022 const __m128d two = _mm_set1_pd(2.0);
1024 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
1026 __m128d xabs,x2,x4,t,t2,w,w2;
1027 __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
1028 __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
1029 __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
1030 __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
1031 __m128d mask,expmx2;
1033 /* Calculate erf() */
1034 xabs = gmx_mm_abs_pd(x);
1035 x2 = _mm_mul_pd(x,x);
1036 x4 = _mm_mul_pd(x2,x2);
1038 PolyAP0 = _mm_mul_pd(CAP4,x4);
1039 PolyAP1 = _mm_mul_pd(CAP3,x4);
1040 PolyAP0 = _mm_add_pd(PolyAP0,CAP2);
1041 PolyAP1 = _mm_add_pd(PolyAP1,CAP1);
1042 PolyAP0 = _mm_mul_pd(PolyAP0,x4);
1043 PolyAP1 = _mm_mul_pd(PolyAP1,x2);
1044 PolyAP0 = _mm_add_pd(PolyAP0,CAP0);
1045 PolyAP0 = _mm_add_pd(PolyAP0,PolyAP1);
1047 PolyAQ1 = _mm_mul_pd(CAQ5,x4);
1048 PolyAQ0 = _mm_mul_pd(CAQ4,x4);
1049 PolyAQ1 = _mm_add_pd(PolyAQ1,CAQ3);
1050 PolyAQ0 = _mm_add_pd(PolyAQ0,CAQ2);
1051 PolyAQ1 = _mm_mul_pd(PolyAQ1,x4);
1052 PolyAQ0 = _mm_mul_pd(PolyAQ0,x4);
1053 PolyAQ1 = _mm_add_pd(PolyAQ1,CAQ1);
1054 PolyAQ0 = _mm_add_pd(PolyAQ0,one);
1055 PolyAQ1 = _mm_mul_pd(PolyAQ1,x2);
1056 PolyAQ0 = _mm_add_pd(PolyAQ0,PolyAQ1);
1058 res_erf = _mm_mul_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0));
1059 res_erf = _mm_add_pd(CAoffset,res_erf);
1060 res_erf = _mm_mul_pd(x,res_erf);
1062 /* Calculate erfc() in range [1,4.5] */
1063 t = _mm_sub_pd(xabs,one);
1064 t2 = _mm_mul_pd(t,t);
1066 PolyBP0 = _mm_mul_pd(CBP6,t2);
1067 PolyBP1 = _mm_mul_pd(CBP5,t2);
1068 PolyBP0 = _mm_add_pd(PolyBP0,CBP4);
1069 PolyBP1 = _mm_add_pd(PolyBP1,CBP3);
1070 PolyBP0 = _mm_mul_pd(PolyBP0,t2);
1071 PolyBP1 = _mm_mul_pd(PolyBP1,t2);
1072 PolyBP0 = _mm_add_pd(PolyBP0,CBP2);
1073 PolyBP1 = _mm_add_pd(PolyBP1,CBP1);
1074 PolyBP0 = _mm_mul_pd(PolyBP0,t2);
1075 PolyBP1 = _mm_mul_pd(PolyBP1,t);
1076 PolyBP0 = _mm_add_pd(PolyBP0,CBP0);
1077 PolyBP0 = _mm_add_pd(PolyBP0,PolyBP1);
1079 PolyBQ1 = _mm_mul_pd(CBQ7,t2);
1080 PolyBQ0 = _mm_mul_pd(CBQ6,t2);
1081 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ5);
1082 PolyBQ0 = _mm_add_pd(PolyBQ0,CBQ4);
1083 PolyBQ1 = _mm_mul_pd(PolyBQ1,t2);
1084 PolyBQ0 = _mm_mul_pd(PolyBQ0,t2);
1085 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ3);
1086 PolyBQ0 = _mm_add_pd(PolyBQ0,CBQ2);
1087 PolyBQ1 = _mm_mul_pd(PolyBQ1,t2);
1088 PolyBQ0 = _mm_mul_pd(PolyBQ0,t2);
1089 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ1);
1090 PolyBQ0 = _mm_add_pd(PolyBQ0,one);
1091 PolyBQ1 = _mm_mul_pd(PolyBQ1,t);
1092 PolyBQ0 = _mm_add_pd(PolyBQ0,PolyBQ1);
1094 res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
1096 res_erfcB = _mm_mul_pd(res_erfcB,xabs);
1098 /* Calculate erfc() in range [4.5,inf] */
1099 w = gmx_mm_inv_pd(xabs);
1100 w2 = _mm_mul_pd(w,w);
1102 PolyCP0 = _mm_mul_pd(CCP6,w2);
1103 PolyCP1 = _mm_mul_pd(CCP5,w2);
1104 PolyCP0 = _mm_add_pd(PolyCP0,CCP4);
1105 PolyCP1 = _mm_add_pd(PolyCP1,CCP3);
1106 PolyCP0 = _mm_mul_pd(PolyCP0,w2);
1107 PolyCP1 = _mm_mul_pd(PolyCP1,w2);
1108 PolyCP0 = _mm_add_pd(PolyCP0,CCP2);
1109 PolyCP1 = _mm_add_pd(PolyCP1,CCP1);
1110 PolyCP0 = _mm_mul_pd(PolyCP0,w2);
1111 PolyCP1 = _mm_mul_pd(PolyCP1,w);
1112 PolyCP0 = _mm_add_pd(PolyCP0,CCP0);
1113 PolyCP0 = _mm_add_pd(PolyCP0,PolyCP1);
1115 PolyCQ0 = _mm_mul_pd(CCQ6,w2);
1116 PolyCQ1 = _mm_mul_pd(CCQ5,w2);
1117 PolyCQ0 = _mm_add_pd(PolyCQ0,CCQ4);
1118 PolyCQ1 = _mm_add_pd(PolyCQ1,CCQ3);
1119 PolyCQ0 = _mm_mul_pd(PolyCQ0,w2);
1120 PolyCQ1 = _mm_mul_pd(PolyCQ1,w2);
1121 PolyCQ0 = _mm_add_pd(PolyCQ0,CCQ2);
1122 PolyCQ1 = _mm_add_pd(PolyCQ1,CCQ1);
1123 PolyCQ0 = _mm_mul_pd(PolyCQ0,w2);
1124 PolyCQ1 = _mm_mul_pd(PolyCQ1,w);
1125 PolyCQ0 = _mm_add_pd(PolyCQ0,one);
1126 PolyCQ0 = _mm_add_pd(PolyCQ0,PolyCQ1);
1128 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
1130 res_erfcC = _mm_mul_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0));
1131 res_erfcC = _mm_add_pd(res_erfcC,CCoffset);
1132 res_erfcC = _mm_mul_pd(res_erfcC,w);
1134 mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
1135 res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
1137 res_erfc = _mm_mul_pd(res_erfc,expmx2);
1139 /* erfc(x<0) = 2-erfc(|x|) */
1140 mask = _mm_cmplt_pd(x,_mm_setzero_pd());
1141 res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
1143 /* Select erf() or erfc() */
1144 mask = _mm_cmplt_pd(xabs,one);
1145 res = _mm_blendv_pd(_mm_sub_pd(one,res_erfc),res_erf,mask);
1147 return res;
1151 static __m256d
1152 gmx_mm256_erfc_pd(__m256d x)
1154 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1155 const __m256d CAP4 = _mm256_set1_pd(-0.431780540597889301512e-4);
1156 const __m256d CAP3 = _mm256_set1_pd(-0.00578562306260059236059);
1157 const __m256d CAP2 = _mm256_set1_pd(-0.028593586920219752446);
1158 const __m256d CAP1 = _mm256_set1_pd(-0.315924962948621698209);
1159 const __m256d CAP0 = _mm256_set1_pd(0.14952975608477029151);
1161 const __m256d CAQ5 = _mm256_set1_pd(-0.374089300177174709737e-5);
1162 const __m256d CAQ4 = _mm256_set1_pd(0.00015126584532155383535);
1163 const __m256d CAQ3 = _mm256_set1_pd(0.00536692680669480725423);
1164 const __m256d CAQ2 = _mm256_set1_pd(0.0668686825594046122636);
1165 const __m256d CAQ1 = _mm256_set1_pd(0.402604990869284362773);
1166 /* CAQ0 == 1.0 */
1167 const __m256d CAoffset = _mm256_set1_pd(0.9788494110107421875);
1169 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1170 const __m256d CBP6 = _mm256_set1_pd(2.49650423685462752497647637088e-10);
1171 const __m256d CBP5 = _mm256_set1_pd(0.00119770193298159629350136085658);
1172 const __m256d CBP4 = _mm256_set1_pd(0.0164944422378370965881008942733);
1173 const __m256d CBP3 = _mm256_set1_pd(0.0984581468691775932063932439252);
1174 const __m256d CBP2 = _mm256_set1_pd(0.317364595806937763843589437418);
1175 const __m256d CBP1 = _mm256_set1_pd(0.554167062641455850932670067075);
1176 const __m256d CBP0 = _mm256_set1_pd(0.427583576155807163756925301060);
1177 const __m256d CBQ7 = _mm256_set1_pd(0.00212288829699830145976198384930);
1178 const __m256d CBQ6 = _mm256_set1_pd(0.0334810979522685300554606393425);
1179 const __m256d CBQ5 = _mm256_set1_pd(0.2361713785181450957579508850717);
1180 const __m256d CBQ4 = _mm256_set1_pd(0.955364736493055670530981883072);
1181 const __m256d CBQ3 = _mm256_set1_pd(2.36815675631420037315349279199);
1182 const __m256d CBQ2 = _mm256_set1_pd(3.55261649184083035537184223542);
1183 const __m256d CBQ1 = _mm256_set1_pd(2.93501136050160872574376997993);
1184 /* CBQ0 == 1.0 */
1186 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1187 const __m256d CCP6 = _mm256_set1_pd(-2.8175401114513378771);
1188 const __m256d CCP5 = _mm256_set1_pd(-3.22729451764143718517);
1189 const __m256d CCP4 = _mm256_set1_pd(-2.5518551727311523996);
1190 const __m256d CCP3 = _mm256_set1_pd(-0.687717681153649930619);
1191 const __m256d CCP2 = _mm256_set1_pd(-0.212652252872804219852);
1192 const __m256d CCP1 = _mm256_set1_pd(0.0175389834052493308818);
1193 const __m256d CCP0 = _mm256_set1_pd(0.00628057170626964891937);
1195 const __m256d CCQ6 = _mm256_set1_pd(5.48409182238641741584);
1196 const __m256d CCQ5 = _mm256_set1_pd(13.5064170191802889145);
1197 const __m256d CCQ4 = _mm256_set1_pd(22.9367376522880577224);
1198 const __m256d CCQ3 = _mm256_set1_pd(15.930646027911794143);
1199 const __m256d CCQ2 = _mm256_set1_pd(11.0567237927800161565);
1200 const __m256d CCQ1 = _mm256_set1_pd(2.79257750980575282228);
1201 /* CCQ0 == 1.0 */
1202 const __m256d CCoffset = _mm256_set1_pd(0.5579090118408203125);
1204 const __m256d one = _mm256_set1_pd(1.0);
1205 const __m256d two = _mm256_set1_pd(2.0);
1207 const __m256d signbit = _mm256_castsi256_pd( _mm256_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000,
1208 0x80000000,0x00000000,0x80000000,0x00000000) );
1210 __m256d xabs,x2,x4,t,t2,w,w2;
1211 __m256d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
1212 __m256d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
1213 __m256d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
1214 __m256d res_erf,res_erfcB,res_erfcC,res_erfc,res;
1215 __m256d mask,expmx2;
1217 /* Calculate erf() */
1218 xabs = gmx_mm256_abs_pd(x);
1219 x2 = _mm256_mul_pd(x,x);
1220 x4 = _mm256_mul_pd(x2,x2);
1222 PolyAP0 = _mm256_mul_pd(CAP4,x4);
1223 PolyAP1 = _mm256_mul_pd(CAP3,x4);
1224 PolyAP0 = _mm256_add_pd(PolyAP0,CAP2);
1225 PolyAP1 = _mm256_add_pd(PolyAP1,CAP1);
1226 PolyAP0 = _mm256_mul_pd(PolyAP0,x4);
1227 PolyAP1 = _mm256_mul_pd(PolyAP1,x2);
1228 PolyAP0 = _mm256_add_pd(PolyAP0,CAP0);
1229 PolyAP0 = _mm256_add_pd(PolyAP0,PolyAP1);
1231 PolyAQ1 = _mm256_mul_pd(CAQ5,x4);
1232 PolyAQ0 = _mm256_mul_pd(CAQ4,x4);
1233 PolyAQ1 = _mm256_add_pd(PolyAQ1,CAQ3);
1234 PolyAQ0 = _mm256_add_pd(PolyAQ0,CAQ2);
1235 PolyAQ1 = _mm256_mul_pd(PolyAQ1,x4);
1236 PolyAQ0 = _mm256_mul_pd(PolyAQ0,x4);
1237 PolyAQ1 = _mm256_add_pd(PolyAQ1,CAQ1);
1238 PolyAQ0 = _mm256_add_pd(PolyAQ0,one);
1239 PolyAQ1 = _mm256_mul_pd(PolyAQ1,x2);
1240 PolyAQ0 = _mm256_add_pd(PolyAQ0,PolyAQ1);
1242 res_erf = _mm256_mul_pd(PolyAP0,gmx_mm256_inv_pd(PolyAQ0));
1243 res_erf = _mm256_add_pd(CAoffset,res_erf);
1244 res_erf = _mm256_mul_pd(x,res_erf);
1246 /* Calculate erfc() in range [1,4.5] */
1247 t = _mm256_sub_pd(xabs,one);
1248 t2 = _mm256_mul_pd(t,t);
1250 PolyBP0 = _mm256_mul_pd(CBP6,t2);
1251 PolyBP1 = _mm256_mul_pd(CBP5,t2);
1252 PolyBP0 = _mm256_add_pd(PolyBP0,CBP4);
1253 PolyBP1 = _mm256_add_pd(PolyBP1,CBP3);
1254 PolyBP0 = _mm256_mul_pd(PolyBP0,t2);
1255 PolyBP1 = _mm256_mul_pd(PolyBP1,t2);
1256 PolyBP0 = _mm256_add_pd(PolyBP0,CBP2);
1257 PolyBP1 = _mm256_add_pd(PolyBP1,CBP1);
1258 PolyBP0 = _mm256_mul_pd(PolyBP0,t2);
1259 PolyBP1 = _mm256_mul_pd(PolyBP1,t);
1260 PolyBP0 = _mm256_add_pd(PolyBP0,CBP0);
1261 PolyBP0 = _mm256_add_pd(PolyBP0,PolyBP1);
1263 PolyBQ1 = _mm256_mul_pd(CBQ7,t2);
1264 PolyBQ0 = _mm256_mul_pd(CBQ6,t2);
1265 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ5);
1266 PolyBQ0 = _mm256_add_pd(PolyBQ0,CBQ4);
1267 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t2);
1268 PolyBQ0 = _mm256_mul_pd(PolyBQ0,t2);
1269 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ3);
1270 PolyBQ0 = _mm256_add_pd(PolyBQ0,CBQ2);
1271 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t2);
1272 PolyBQ0 = _mm256_mul_pd(PolyBQ0,t2);
1273 PolyBQ1 = _mm256_add_pd(PolyBQ1,CBQ1);
1274 PolyBQ0 = _mm256_add_pd(PolyBQ0,one);
1275 PolyBQ1 = _mm256_mul_pd(PolyBQ1,t);
1276 PolyBQ0 = _mm256_add_pd(PolyBQ0,PolyBQ1);
1278 res_erfcB = _mm256_mul_pd(PolyBP0,gmx_mm256_inv_pd(PolyBQ0));
1280 res_erfcB = _mm256_mul_pd(res_erfcB,xabs);
1282 /* Calculate erfc() in range [4.5,inf] */
1283 w = gmx_mm256_inv_pd(xabs);
1284 w2 = _mm256_mul_pd(w,w);
1286 PolyCP0 = _mm256_mul_pd(CCP6,w2);
1287 PolyCP1 = _mm256_mul_pd(CCP5,w2);
1288 PolyCP0 = _mm256_add_pd(PolyCP0,CCP4);
1289 PolyCP1 = _mm256_add_pd(PolyCP1,CCP3);
1290 PolyCP0 = _mm256_mul_pd(PolyCP0,w2);
1291 PolyCP1 = _mm256_mul_pd(PolyCP1,w2);
1292 PolyCP0 = _mm256_add_pd(PolyCP0,CCP2);
1293 PolyCP1 = _mm256_add_pd(PolyCP1,CCP1);
1294 PolyCP0 = _mm256_mul_pd(PolyCP0,w2);
1295 PolyCP1 = _mm256_mul_pd(PolyCP1,w);
1296 PolyCP0 = _mm256_add_pd(PolyCP0,CCP0);
1297 PolyCP0 = _mm256_add_pd(PolyCP0,PolyCP1);
1299 PolyCQ0 = _mm256_mul_pd(CCQ6,w2);
1300 PolyCQ1 = _mm256_mul_pd(CCQ5,w2);
1301 PolyCQ0 = _mm256_add_pd(PolyCQ0,CCQ4);
1302 PolyCQ1 = _mm256_add_pd(PolyCQ1,CCQ3);
1303 PolyCQ0 = _mm256_mul_pd(PolyCQ0,w2);
1304 PolyCQ1 = _mm256_mul_pd(PolyCQ1,w2);
1305 PolyCQ0 = _mm256_add_pd(PolyCQ0,CCQ2);
1306 PolyCQ1 = _mm256_add_pd(PolyCQ1,CCQ1);
1307 PolyCQ0 = _mm256_mul_pd(PolyCQ0,w2);
1308 PolyCQ1 = _mm256_mul_pd(PolyCQ1,w);
1309 PolyCQ0 = _mm256_add_pd(PolyCQ0,one);
1310 PolyCQ0 = _mm256_add_pd(PolyCQ0,PolyCQ1);
1312 expmx2 = gmx_mm256_exp_pd( _mm256_or_pd(signbit, x2) );
1314 res_erfcC = _mm256_mul_pd(PolyCP0,gmx_mm256_inv_pd(PolyCQ0));
1315 res_erfcC = _mm256_add_pd(res_erfcC,CCoffset);
1316 res_erfcC = _mm256_mul_pd(res_erfcC,w);
1318 mask = _mm256_cmp_pd(xabs,_mm256_set1_pd(4.5),_CMP_GT_OQ);
1319 res_erfc = _mm256_blendv_pd(res_erfcB,res_erfcC,mask);
1321 res_erfc = _mm256_mul_pd(res_erfc,expmx2);
1323 /* erfc(x<0) = 2-erfc(|x|) */
1324 mask = _mm256_cmp_pd(x,_mm256_setzero_pd(),_CMP_LT_OQ);
1325 res_erfc = _mm256_blendv_pd(res_erfc,_mm256_sub_pd(two,res_erfc),mask);
1327 /* Select erf() or erfc() */
1328 mask = _mm256_cmp_pd(xabs,one,_CMP_LT_OQ);
1329 res = _mm256_blendv_pd(res_erfc,_mm256_sub_pd(one,res_erf),mask);
1331 return res;
1335 static __m128d
1336 gmx_mm_erfc_pd(__m128d x)
1338 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1339 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
1340 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
1341 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
1342 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
1343 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
1345 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
1346 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
1347 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
1348 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
1349 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
1350 /* CAQ0 == 1.0 */
1351 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
1353 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1354 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
1355 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
1356 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
1357 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
1358 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
1359 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
1360 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
1361 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
1362 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
1363 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
1364 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
1365 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
1366 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
1367 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
1368 /* CBQ0 == 1.0 */
1370 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1371 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
1372 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
1373 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
1374 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
1375 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
1376 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
1377 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
1379 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
1380 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
1381 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
1382 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
1383 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
1384 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
1385 /* CCQ0 == 1.0 */
1386 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
1388 const __m128d one = _mm_set1_pd(1.0);
1389 const __m128d two = _mm_set1_pd(2.0);
1391 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
1393 __m128d xabs,x2,x4,t,t2,w,w2;
1394 __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
1395 __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
1396 __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
1397 __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
1398 __m128d mask,expmx2;
1400 /* Calculate erf() */
1401 xabs = gmx_mm_abs_pd(x);
1402 x2 = _mm_mul_pd(x,x);
1403 x4 = _mm_mul_pd(x2,x2);
1405 PolyAP0 = _mm_mul_pd(CAP4,x4);
1406 PolyAP1 = _mm_mul_pd(CAP3,x4);
1407 PolyAP0 = _mm_add_pd(PolyAP0,CAP2);
1408 PolyAP1 = _mm_add_pd(PolyAP1,CAP1);
1409 PolyAP0 = _mm_mul_pd(PolyAP0,x4);
1410 PolyAP1 = _mm_mul_pd(PolyAP1,x2);
1411 PolyAP0 = _mm_add_pd(PolyAP0,CAP0);
1412 PolyAP0 = _mm_add_pd(PolyAP0,PolyAP1);
1414 PolyAQ1 = _mm_mul_pd(CAQ5,x4);
1415 PolyAQ0 = _mm_mul_pd(CAQ4,x4);
1416 PolyAQ1 = _mm_add_pd(PolyAQ1,CAQ3);
1417 PolyAQ0 = _mm_add_pd(PolyAQ0,CAQ2);
1418 PolyAQ1 = _mm_mul_pd(PolyAQ1,x4);
1419 PolyAQ0 = _mm_mul_pd(PolyAQ0,x4);
1420 PolyAQ1 = _mm_add_pd(PolyAQ1,CAQ1);
1421 PolyAQ0 = _mm_add_pd(PolyAQ0,one);
1422 PolyAQ1 = _mm_mul_pd(PolyAQ1,x2);
1423 PolyAQ0 = _mm_add_pd(PolyAQ0,PolyAQ1);
1425 res_erf = _mm_mul_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0));
1426 res_erf = _mm_add_pd(CAoffset,res_erf);
1427 res_erf = _mm_mul_pd(x,res_erf);
1429 /* Calculate erfc() in range [1,4.5] */
1430 t = _mm_sub_pd(xabs,one);
1431 t2 = _mm_mul_pd(t,t);
1433 PolyBP0 = _mm_mul_pd(CBP6,t2);
1434 PolyBP1 = _mm_mul_pd(CBP5,t2);
1435 PolyBP0 = _mm_add_pd(PolyBP0,CBP4);
1436 PolyBP1 = _mm_add_pd(PolyBP1,CBP3);
1437 PolyBP0 = _mm_mul_pd(PolyBP0,t2);
1438 PolyBP1 = _mm_mul_pd(PolyBP1,t2);
1439 PolyBP0 = _mm_add_pd(PolyBP0,CBP2);
1440 PolyBP1 = _mm_add_pd(PolyBP1,CBP1);
1441 PolyBP0 = _mm_mul_pd(PolyBP0,t2);
1442 PolyBP1 = _mm_mul_pd(PolyBP1,t);
1443 PolyBP0 = _mm_add_pd(PolyBP0,CBP0);
1444 PolyBP0 = _mm_add_pd(PolyBP0,PolyBP1);
1446 PolyBQ1 = _mm_mul_pd(CBQ7,t2);
1447 PolyBQ0 = _mm_mul_pd(CBQ6,t2);
1448 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ5);
1449 PolyBQ0 = _mm_add_pd(PolyBQ0,CBQ4);
1450 PolyBQ1 = _mm_mul_pd(PolyBQ1,t2);
1451 PolyBQ0 = _mm_mul_pd(PolyBQ0,t2);
1452 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ3);
1453 PolyBQ0 = _mm_add_pd(PolyBQ0,CBQ2);
1454 PolyBQ1 = _mm_mul_pd(PolyBQ1,t2);
1455 PolyBQ0 = _mm_mul_pd(PolyBQ0,t2);
1456 PolyBQ1 = _mm_add_pd(PolyBQ1,CBQ1);
1457 PolyBQ0 = _mm_add_pd(PolyBQ0,one);
1458 PolyBQ1 = _mm_mul_pd(PolyBQ1,t);
1459 PolyBQ0 = _mm_add_pd(PolyBQ0,PolyBQ1);
1461 res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
1463 res_erfcB = _mm_mul_pd(res_erfcB,xabs);
1465 /* Calculate erfc() in range [4.5,inf] */
1466 w = gmx_mm_inv_pd(xabs);
1467 w2 = _mm_mul_pd(w,w);
1469 PolyCP0 = _mm_mul_pd(CCP6,w2);
1470 PolyCP1 = _mm_mul_pd(CCP5,w2);
1471 PolyCP0 = _mm_add_pd(PolyCP0,CCP4);
1472 PolyCP1 = _mm_add_pd(PolyCP1,CCP3);
1473 PolyCP0 = _mm_mul_pd(PolyCP0,w2);
1474 PolyCP1 = _mm_mul_pd(PolyCP1,w2);
1475 PolyCP0 = _mm_add_pd(PolyCP0,CCP2);
1476 PolyCP1 = _mm_add_pd(PolyCP1,CCP1);
1477 PolyCP0 = _mm_mul_pd(PolyCP0,w2);
1478 PolyCP1 = _mm_mul_pd(PolyCP1,w);
1479 PolyCP0 = _mm_add_pd(PolyCP0,CCP0);
1480 PolyCP0 = _mm_add_pd(PolyCP0,PolyCP1);
1482 PolyCQ0 = _mm_mul_pd(CCQ6,w2);
1483 PolyCQ1 = _mm_mul_pd(CCQ5,w2);
1484 PolyCQ0 = _mm_add_pd(PolyCQ0,CCQ4);
1485 PolyCQ1 = _mm_add_pd(PolyCQ1,CCQ3);
1486 PolyCQ0 = _mm_mul_pd(PolyCQ0,w2);
1487 PolyCQ1 = _mm_mul_pd(PolyCQ1,w2);
1488 PolyCQ0 = _mm_add_pd(PolyCQ0,CCQ2);
1489 PolyCQ1 = _mm_add_pd(PolyCQ1,CCQ1);
1490 PolyCQ0 = _mm_mul_pd(PolyCQ0,w2);
1491 PolyCQ1 = _mm_mul_pd(PolyCQ1,w);
1492 PolyCQ0 = _mm_add_pd(PolyCQ0,one);
1493 PolyCQ0 = _mm_add_pd(PolyCQ0,PolyCQ1);
1495 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
1497 res_erfcC = _mm_mul_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0));
1498 res_erfcC = _mm_add_pd(res_erfcC,CCoffset);
1499 res_erfcC = _mm_mul_pd(res_erfcC,w);
1501 mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
1502 res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
1504 res_erfc = _mm_mul_pd(res_erfc,expmx2);
1506 /* erfc(x<0) = 2-erfc(|x|) */
1507 mask = _mm_cmplt_pd(x,_mm_setzero_pd());
1508 res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
1510 /* Select erf() or erfc() */
1511 mask = _mm_cmplt_pd(xabs,one);
1512 res = _mm_blendv_pd(res_erfc,_mm_sub_pd(one,res_erf),mask);
1514 return res;
1518 /* Calculate the force correction due to PME analytically.
1520 * This routine is meant to enable analytical evaluation of the
1521 * direct-space PME electrostatic force to avoid tables.
1523 * The direct-space potential should be Erfc(beta*r)/r, but there
1524 * are some problems evaluating that:
1526 * First, the error function is difficult (read: expensive) to
1527 * approxmiate accurately for intermediate to large arguments, and
1528 * this happens already in ranges of beta*r that occur in simulations.
1529 * Second, we now try to avoid calculating potentials in Gromacs but
1530 * use forces directly.
1532 * We can simply things slight by noting that the PME part is really
1533 * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
1535 * V= 1/r - Erf(beta*r)/r
1537 * The first term we already have from the inverse square root, so
1538 * that we can leave out of this routine.
1540 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
1541 * the argument beta*r will be in the range 0.15 to ~4. Use your
1542 * favorite plotting program to realize how well-behaved Erf(z)/z is
1543 * in this range!
1545 * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
1546 * However, it turns out it is more efficient to approximate f(z)/z and
1547 * then only use even powers. This is another minor optimization, since
1548 * we actually WANT f(z)/z, because it is going to be multiplied by
1549 * the vector between the two atoms to get the vectorial force. The
1550 * fastest flops are the ones we can avoid calculating!
1552 * So, here's how it should be used:
1554 * 1. Calculate r^2.
1555 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
1556 * 3. Evaluate this routine with z^2 as the argument.
1557 * 4. The return value is the expression:
1560 * 2*exp(-z^2) erf(z)
1561 * ------------ - --------
1562 * sqrt(Pi)*z^2 z^3
1564 * 5. Multiply the entire expression by beta^3. This will get you
1566 * beta^3*2*exp(-z^2) beta^3*erf(z)
1567 * ------------------ - ---------------
1568 * sqrt(Pi)*z^2 z^3
1570 * or, switching back to r (z=r*beta):
1572 * 2*beta*exp(-r^2*beta^2) erf(r*beta)
1573 * ----------------------- - -----------
1574 * sqrt(Pi)*r^2 r^3
1577 * With a bit of math exercise you should be able to confirm that
1578 * this is exactly D[Erf[beta*r]/r,r] divided by r another time.
1580 * 6. Add the result to 1/r^3, multiply by the product of the charges,
1581 * and you have your force (divided by r). A final multiplication
1582 * with the vector connecting the two particles and you have your
1583 * vectorial force to add to the particles.
1586 static __m256d
1587 gmx_mm256_pmecorrF_pd(__m256d z2)
1589 const __m256d FN10 = _mm256_set1_pd(-8.0072854618360083154e-14);
1590 const __m256d FN9 = _mm256_set1_pd(1.1859116242260148027e-11);
1591 const __m256d FN8 = _mm256_set1_pd(-8.1490406329798423616e-10);
1592 const __m256d FN7 = _mm256_set1_pd(3.4404793543907847655e-8);
1593 const __m256d FN6 = _mm256_set1_pd(-9.9471420832602741006e-7);
1594 const __m256d FN5 = _mm256_set1_pd(0.000020740315999115847456);
1595 const __m256d FN4 = _mm256_set1_pd(-0.00031991745139313364005);
1596 const __m256d FN3 = _mm256_set1_pd(0.0035074449373659008203);
1597 const __m256d FN2 = _mm256_set1_pd(-0.031750380176100813405);
1598 const __m256d FN1 = _mm256_set1_pd(0.13884101728898463426);
1599 const __m256d FN0 = _mm256_set1_pd(-0.75225277815249618847);
1601 const __m256d FD5 = _mm256_set1_pd(0.000016009278224355026701);
1602 const __m256d FD4 = _mm256_set1_pd(0.00051055686934806966046);
1603 const __m256d FD3 = _mm256_set1_pd(0.0081803507497974289008);
1604 const __m256d FD2 = _mm256_set1_pd(0.077181146026670287235);
1605 const __m256d FD1 = _mm256_set1_pd(0.41543303143712535988);
1606 const __m256d FD0 = _mm256_set1_pd(1.0);
1608 __m256d z4;
1609 __m256d polyFN0,polyFN1,polyFD0,polyFD1;
1611 z4 = _mm256_mul_pd(z2,z2);
1613 polyFD1 = _mm256_mul_pd(FD5,z4);
1614 polyFD0 = _mm256_mul_pd(FD4,z4);
1615 polyFD1 = _mm256_add_pd(polyFD1,FD3);
1616 polyFD0 = _mm256_add_pd(polyFD0,FD2);
1617 polyFD1 = _mm256_mul_pd(polyFD1,z4);
1618 polyFD0 = _mm256_mul_pd(polyFD0,z4);
1619 polyFD1 = _mm256_add_pd(polyFD1,FD1);
1620 polyFD0 = _mm256_add_pd(polyFD0,FD0);
1621 polyFD1 = _mm256_mul_pd(polyFD1,z2);
1622 polyFD0 = _mm256_add_pd(polyFD0,polyFD1);
1624 polyFD0 = gmx_mm256_inv_pd(polyFD0);
1626 polyFN0 = _mm256_mul_pd(FN10,z4);
1627 polyFN1 = _mm256_mul_pd(FN9,z4);
1628 polyFN0 = _mm256_add_pd(polyFN0,FN8);
1629 polyFN1 = _mm256_add_pd(polyFN1,FN7);
1630 polyFN0 = _mm256_mul_pd(polyFN0,z4);
1631 polyFN1 = _mm256_mul_pd(polyFN1,z4);
1632 polyFN0 = _mm256_add_pd(polyFN0,FN6);
1633 polyFN1 = _mm256_add_pd(polyFN1,FN5);
1634 polyFN0 = _mm256_mul_pd(polyFN0,z4);
1635 polyFN1 = _mm256_mul_pd(polyFN1,z4);
1636 polyFN0 = _mm256_add_pd(polyFN0,FN4);
1637 polyFN1 = _mm256_add_pd(polyFN1,FN3);
1638 polyFN0 = _mm256_mul_pd(polyFN0,z4);
1639 polyFN1 = _mm256_mul_pd(polyFN1,z4);
1640 polyFN0 = _mm256_add_pd(polyFN0,FN2);
1641 polyFN1 = _mm256_add_pd(polyFN1,FN1);
1642 polyFN0 = _mm256_mul_pd(polyFN0,z4);
1643 polyFN1 = _mm256_mul_pd(polyFN1,z2);
1644 polyFN0 = _mm256_add_pd(polyFN0,FN0);
1645 polyFN0 = _mm256_add_pd(polyFN0,polyFN1);
1647 return _mm256_mul_pd(polyFN0,polyFD0);
1651 static __m128d
1652 gmx_mm_pmecorrF_pd(__m128d z2)
1654 const __m128d FN10 = _mm_set1_pd(-8.0072854618360083154e-14);
1655 const __m128d FN9 = _mm_set1_pd(1.1859116242260148027e-11);
1656 const __m128d FN8 = _mm_set1_pd(-8.1490406329798423616e-10);
1657 const __m128d FN7 = _mm_set1_pd(3.4404793543907847655e-8);
1658 const __m128d FN6 = _mm_set1_pd(-9.9471420832602741006e-7);
1659 const __m128d FN5 = _mm_set1_pd(0.000020740315999115847456);
1660 const __m128d FN4 = _mm_set1_pd(-0.00031991745139313364005);
1661 const __m128d FN3 = _mm_set1_pd(0.0035074449373659008203);
1662 const __m128d FN2 = _mm_set1_pd(-0.031750380176100813405);
1663 const __m128d FN1 = _mm_set1_pd(0.13884101728898463426);
1664 const __m128d FN0 = _mm_set1_pd(-0.75225277815249618847);
1666 const __m128d FD5 = _mm_set1_pd(0.000016009278224355026701);
1667 const __m128d FD4 = _mm_set1_pd(0.00051055686934806966046);
1668 const __m128d FD3 = _mm_set1_pd(0.0081803507497974289008);
1669 const __m128d FD2 = _mm_set1_pd(0.077181146026670287235);
1670 const __m128d FD1 = _mm_set1_pd(0.41543303143712535988);
1671 const __m128d FD0 = _mm_set1_pd(1.0);
1673 __m128d z4;
1674 __m128d polyFN0,polyFN1,polyFD0,polyFD1;
1676 z4 = _mm_mul_pd(z2,z2);
1678 polyFD1 = _mm_mul_pd(FD5,z4);
1679 polyFD0 = _mm_mul_pd(FD4,z4);
1680 polyFD1 = _mm_add_pd(polyFD1,FD3);
1681 polyFD0 = _mm_add_pd(polyFD0,FD2);
1682 polyFD1 = _mm_mul_pd(polyFD1,z4);
1683 polyFD0 = _mm_mul_pd(polyFD0,z4);
1684 polyFD1 = _mm_add_pd(polyFD1,FD1);
1685 polyFD0 = _mm_add_pd(polyFD0,FD0);
1686 polyFD1 = _mm_mul_pd(polyFD1,z2);
1687 polyFD0 = _mm_add_pd(polyFD0,polyFD1);
1689 polyFD0 = gmx_mm_inv_pd(polyFD0);
1691 polyFN0 = _mm_mul_pd(FN10,z4);
1692 polyFN1 = _mm_mul_pd(FN9,z4);
1693 polyFN0 = _mm_add_pd(polyFN0,FN8);
1694 polyFN1 = _mm_add_pd(polyFN1,FN7);
1695 polyFN0 = _mm_mul_pd(polyFN0,z4);
1696 polyFN1 = _mm_mul_pd(polyFN1,z4);
1697 polyFN0 = _mm_add_pd(polyFN0,FN6);
1698 polyFN1 = _mm_add_pd(polyFN1,FN5);
1699 polyFN0 = _mm_mul_pd(polyFN0,z4);
1700 polyFN1 = _mm_mul_pd(polyFN1,z4);
1701 polyFN0 = _mm_add_pd(polyFN0,FN4);
1702 polyFN1 = _mm_add_pd(polyFN1,FN3);
1703 polyFN0 = _mm_mul_pd(polyFN0,z4);
1704 polyFN1 = _mm_mul_pd(polyFN1,z4);
1705 polyFN0 = _mm_add_pd(polyFN0,FN2);
1706 polyFN1 = _mm_add_pd(polyFN1,FN1);
1707 polyFN0 = _mm_mul_pd(polyFN0,z4);
1708 polyFN1 = _mm_mul_pd(polyFN1,z2);
1709 polyFN0 = _mm_add_pd(polyFN0,FN0);
1710 polyFN0 = _mm_add_pd(polyFN0,polyFN1);
1712 return _mm_mul_pd(polyFN0,polyFD0);
1718 /* Calculate the potential correction due to PME analytically.
1720 * See gmx_mm256_pmecorrF_ps() for details about the approximation.
1722 * This routine calculates Erf(z)/z, although you should provide z^2
1723 * as the input argument.
1725 * Here's how it should be used:
1727 * 1. Calculate r^2.
1728 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
1729 * 3. Evaluate this routine with z^2 as the argument.
1730 * 4. The return value is the expression:
1733 * erf(z)
1734 * --------
1737 * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
1739 * erf(r*beta)
1740 * -----------
1741 * r
1743 * 6. Add the result to 1/r, multiply by the product of the charges,
1744 * and you have your potential.
1747 static __m256d
1748 gmx_mm256_pmecorrV_pd(__m256d z2)
1750 const __m256d VN9 = _mm256_set1_pd(-9.3723776169321855475e-13);
1751 const __m256d VN8 = _mm256_set1_pd(1.2280156762674215741e-10);
1752 const __m256d VN7 = _mm256_set1_pd(-7.3562157912251309487e-9);
1753 const __m256d VN6 = _mm256_set1_pd(2.6215886208032517509e-7);
1754 const __m256d VN5 = _mm256_set1_pd(-4.9532491651265819499e-6);
1755 const __m256d VN4 = _mm256_set1_pd(0.00025907400778966060389);
1756 const __m256d VN3 = _mm256_set1_pd(0.0010585044856156469792);
1757 const __m256d VN2 = _mm256_set1_pd(0.045247661136833092885);
1758 const __m256d VN1 = _mm256_set1_pd(0.11643931522926034421);
1759 const __m256d VN0 = _mm256_set1_pd(1.1283791671726767970);
1761 const __m256d VD5 = _mm256_set1_pd(0.000021784709867336150342);
1762 const __m256d VD4 = _mm256_set1_pd(0.00064293662010911388448);
1763 const __m256d VD3 = _mm256_set1_pd(0.0096311444822588683504);
1764 const __m256d VD2 = _mm256_set1_pd(0.085608012351550627051);
1765 const __m256d VD1 = _mm256_set1_pd(0.43652499166614811084);
1766 const __m256d VD0 = _mm256_set1_pd(1.0);
1768 __m256d z4;
1769 __m256d polyVN0,polyVN1,polyVD0,polyVD1;
1771 z4 = _mm256_mul_pd(z2,z2);
1773 polyVD1 = _mm256_mul_pd(VD5,z4);
1774 polyVD0 = _mm256_mul_pd(VD4,z4);
1775 polyVD1 = _mm256_add_pd(polyVD1,VD3);
1776 polyVD0 = _mm256_add_pd(polyVD0,VD2);
1777 polyVD1 = _mm256_mul_pd(polyVD1,z4);
1778 polyVD0 = _mm256_mul_pd(polyVD0,z4);
1779 polyVD1 = _mm256_add_pd(polyVD1,VD1);
1780 polyVD0 = _mm256_add_pd(polyVD0,VD0);
1781 polyVD1 = _mm256_mul_pd(polyVD1,z2);
1782 polyVD0 = _mm256_add_pd(polyVD0,polyVD1);
1784 polyVD0 = gmx_mm256_inv_pd(polyVD0);
1786 polyVN1 = _mm256_mul_pd(VN9,z4);
1787 polyVN0 = _mm256_mul_pd(VN8,z4);
1788 polyVN1 = _mm256_add_pd(polyVN1,VN7);
1789 polyVN0 = _mm256_add_pd(polyVN0,VN6);
1790 polyVN1 = _mm256_mul_pd(polyVN1,z4);
1791 polyVN0 = _mm256_mul_pd(polyVN0,z4);
1792 polyVN1 = _mm256_add_pd(polyVN1,VN5);
1793 polyVN0 = _mm256_add_pd(polyVN0,VN4);
1794 polyVN1 = _mm256_mul_pd(polyVN1,z4);
1795 polyVN0 = _mm256_mul_pd(polyVN0,z4);
1796 polyVN1 = _mm256_add_pd(polyVN1,VN3);
1797 polyVN0 = _mm256_add_pd(polyVN0,VN2);
1798 polyVN1 = _mm256_mul_pd(polyVN1,z4);
1799 polyVN0 = _mm256_mul_pd(polyVN0,z4);
1800 polyVN1 = _mm256_add_pd(polyVN1,VN1);
1801 polyVN0 = _mm256_add_pd(polyVN0,VN0);
1802 polyVN1 = _mm256_mul_pd(polyVN1,z2);
1803 polyVN0 = _mm256_add_pd(polyVN0,polyVN1);
1805 return _mm256_mul_pd(polyVN0,polyVD0);
1809 static __m128d
1810 gmx_mm_pmecorrV_pd(__m128d z2)
1812 const __m128d VN9 = _mm_set1_pd(-9.3723776169321855475e-13);
1813 const __m128d VN8 = _mm_set1_pd(1.2280156762674215741e-10);
1814 const __m128d VN7 = _mm_set1_pd(-7.3562157912251309487e-9);
1815 const __m128d VN6 = _mm_set1_pd(2.6215886208032517509e-7);
1816 const __m128d VN5 = _mm_set1_pd(-4.9532491651265819499e-6);
1817 const __m128d VN4 = _mm_set1_pd(0.00025907400778966060389);
1818 const __m128d VN3 = _mm_set1_pd(0.0010585044856156469792);
1819 const __m128d VN2 = _mm_set1_pd(0.045247661136833092885);
1820 const __m128d VN1 = _mm_set1_pd(0.11643931522926034421);
1821 const __m128d VN0 = _mm_set1_pd(1.1283791671726767970);
1823 const __m128d VD5 = _mm_set1_pd(0.000021784709867336150342);
1824 const __m128d VD4 = _mm_set1_pd(0.00064293662010911388448);
1825 const __m128d VD3 = _mm_set1_pd(0.0096311444822588683504);
1826 const __m128d VD2 = _mm_set1_pd(0.085608012351550627051);
1827 const __m128d VD1 = _mm_set1_pd(0.43652499166614811084);
1828 const __m128d VD0 = _mm_set1_pd(1.0);
1830 __m128d z4;
1831 __m128d polyVN0,polyVN1,polyVD0,polyVD1;
1833 z4 = _mm_mul_pd(z2,z2);
1835 polyVD1 = _mm_mul_pd(VD5,z4);
1836 polyVD0 = _mm_mul_pd(VD4,z4);
1837 polyVD1 = _mm_add_pd(polyVD1,VD3);
1838 polyVD0 = _mm_add_pd(polyVD0,VD2);
1839 polyVD1 = _mm_mul_pd(polyVD1,z4);
1840 polyVD0 = _mm_mul_pd(polyVD0,z4);
1841 polyVD1 = _mm_add_pd(polyVD1,VD1);
1842 polyVD0 = _mm_add_pd(polyVD0,VD0);
1843 polyVD1 = _mm_mul_pd(polyVD1,z2);
1844 polyVD0 = _mm_add_pd(polyVD0,polyVD1);
1846 polyVD0 = gmx_mm_inv_pd(polyVD0);
1848 polyVN1 = _mm_mul_pd(VN9,z4);
1849 polyVN0 = _mm_mul_pd(VN8,z4);
1850 polyVN1 = _mm_add_pd(polyVN1,VN7);
1851 polyVN0 = _mm_add_pd(polyVN0,VN6);
1852 polyVN1 = _mm_mul_pd(polyVN1,z4);
1853 polyVN0 = _mm_mul_pd(polyVN0,z4);
1854 polyVN1 = _mm_add_pd(polyVN1,VN5);
1855 polyVN0 = _mm_add_pd(polyVN0,VN4);
1856 polyVN1 = _mm_mul_pd(polyVN1,z4);
1857 polyVN0 = _mm_mul_pd(polyVN0,z4);
1858 polyVN1 = _mm_add_pd(polyVN1,VN3);
1859 polyVN0 = _mm_add_pd(polyVN0,VN2);
1860 polyVN1 = _mm_mul_pd(polyVN1,z4);
1861 polyVN0 = _mm_mul_pd(polyVN0,z4);
1862 polyVN1 = _mm_add_pd(polyVN1,VN1);
1863 polyVN0 = _mm_add_pd(polyVN0,VN0);
1864 polyVN1 = _mm_mul_pd(polyVN1,z2);
1865 polyVN0 = _mm_add_pd(polyVN0,polyVN1);
1867 return _mm_mul_pd(polyVN0,polyVD0);
1872 static int
1873 gmx_mm256_sincos_pd(__m256d x,
1874 __m256d *sinval,
1875 __m256d *cosval)
1877 #ifdef _MSC_VER
1878 __declspec(align(16))
1879 const double sintable[34] =
1881 1.00000000000000000e+00 , 0.00000000000000000e+00 ,
1882 9.95184726672196929e-01 , 9.80171403295606036e-02 ,
1883 9.80785280403230431e-01 , 1.95090322016128248e-01 ,
1884 9.56940335732208824e-01 , 2.90284677254462331e-01 ,
1885 9.23879532511286738e-01 , 3.82683432365089782e-01 ,
1886 8.81921264348355050e-01 , 4.71396736825997642e-01 ,
1887 8.31469612302545236e-01 , 5.55570233019602178e-01 ,
1888 7.73010453362736993e-01 , 6.34393284163645488e-01 ,
1889 7.07106781186547573e-01 , 7.07106781186547462e-01 ,
1890 6.34393284163645599e-01 , 7.73010453362736882e-01 ,
1891 5.55570233019602289e-01 , 8.31469612302545125e-01 ,
1892 4.71396736825997809e-01 , 8.81921264348354939e-01 ,
1893 3.82683432365089837e-01 , 9.23879532511286738e-01 ,
1894 2.90284677254462276e-01 , 9.56940335732208935e-01 ,
1895 1.95090322016128304e-01 , 9.80785280403230431e-01 ,
1896 9.80171403295607702e-02 , 9.95184726672196818e-01 ,
1897 0.0 , 1.00000000000000000e+00
1899 #else
1900 const __m128d sintable[17] =
1902 _mm_set_pd( 0.0 , 1.0 ),
1903 _mm_set_pd( sin( 1.0 * (M_PI/2.0) / 16.0) , cos( 1.0 * (M_PI/2.0) / 16.0) ),
1904 _mm_set_pd( sin( 2.0 * (M_PI/2.0) / 16.0) , cos( 2.0 * (M_PI/2.0) / 16.0) ),
1905 _mm_set_pd( sin( 3.0 * (M_PI/2.0) / 16.0) , cos( 3.0 * (M_PI/2.0) / 16.0) ),
1906 _mm_set_pd( sin( 4.0 * (M_PI/2.0) / 16.0) , cos( 4.0 * (M_PI/2.0) / 16.0) ),
1907 _mm_set_pd( sin( 5.0 * (M_PI/2.0) / 16.0) , cos( 5.0 * (M_PI/2.0) / 16.0) ),
1908 _mm_set_pd( sin( 6.0 * (M_PI/2.0) / 16.0) , cos( 6.0 * (M_PI/2.0) / 16.0) ),
1909 _mm_set_pd( sin( 7.0 * (M_PI/2.0) / 16.0) , cos( 7.0 * (M_PI/2.0) / 16.0) ),
1910 _mm_set_pd( sin( 8.0 * (M_PI/2.0) / 16.0) , cos( 8.0 * (M_PI/2.0) / 16.0) ),
1911 _mm_set_pd( sin( 9.0 * (M_PI/2.0) / 16.0) , cos( 9.0 * (M_PI/2.0) / 16.0) ),
1912 _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0) , cos( 10.0 * (M_PI/2.0) / 16.0) ),
1913 _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0) , cos( 11.0 * (M_PI/2.0) / 16.0) ),
1914 _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0) , cos( 12.0 * (M_PI/2.0) / 16.0) ),
1915 _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0) , cos( 13.0 * (M_PI/2.0) / 16.0) ),
1916 _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0) , cos( 14.0 * (M_PI/2.0) / 16.0) ),
1917 _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0) , cos( 15.0 * (M_PI/2.0) / 16.0) ),
1918 _mm_set_pd( 1.0 , 0.0 )
1920 #endif
1922 const __m256d signmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
1923 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1924 const __m128i signbit_epi32 = _mm_set1_epi32(0x80000000);
1926 const __m256d tabscale = _mm256_set1_pd(32.0/M_PI);
1927 const __m256d invtabscale0 = _mm256_set1_pd(9.81747508049011230469e-02);
1928 const __m256d invtabscale1 = _mm256_set1_pd(1.96197799156550576057e-08);
1929 const __m128i ione = _mm_set1_epi32(1);
1930 const __m128i i32 = _mm_set1_epi32(32);
1931 const __m128i i16 = _mm_set1_epi32(16);
1932 const __m128i tabmask = _mm_set1_epi32(0x3F);
1933 const __m256d sinP7 = _mm256_set1_pd(-1.0/5040.0);
1934 const __m256d sinP5 = _mm256_set1_pd(1.0/120.0);
1935 const __m256d sinP3 = _mm256_set1_pd(-1.0/6.0);
1936 const __m256d sinP1 = _mm256_set1_pd(1.0);
1938 const __m256d cosP6 = _mm256_set1_pd(-1.0/720.0);
1939 const __m256d cosP4 = _mm256_set1_pd(1.0/24.0);
1940 const __m256d cosP2 = _mm256_set1_pd(-1.0/2.0);
1941 const __m256d cosP0 = _mm256_set1_pd(1.0);
1943 __m256d scalex;
1944 __m128i tabidx,corridx;
1945 __m256d xabs,z,z2,polySin,polyCos;
1946 __m256d xpoint;
1947 __m256d t1,t2,t3,t4;
1949 __m256d sinpoint,cospoint;
1950 __m256d xsign,ssign,csign;
1951 __m128i imask,sswapsign,cswapsign;
1952 __m256d minusone;
1954 xsign = _mm256_andnot_pd(signmask,x);
1955 xabs = _mm256_and_pd(x,signmask);
1957 scalex = _mm256_mul_pd(tabscale,xabs);
1958 tabidx = _mm256_cvtpd_epi32(scalex);
1960 xpoint = _mm256_round_pd(scalex,_MM_FROUND_TO_NEAREST_INT);
1962 /* Extended precision arithmetics */
1963 z = _mm256_sub_pd(xabs,_mm256_mul_pd(invtabscale0,xpoint));
1964 z = _mm256_sub_pd(z,_mm256_mul_pd(invtabscale1,xpoint));
1966 /* Range reduction to 0..2*Pi */
1967 tabidx = _mm_and_si128(tabidx,tabmask);
1969 /* tabidx is now in range [0,..,64] */
1970 imask = _mm_cmpgt_epi32(tabidx,i32);
1971 sswapsign = imask;
1972 cswapsign = imask;
1973 corridx = _mm_and_si128(imask,i32);
1974 tabidx = _mm_sub_epi32(tabidx,corridx);
1976 /* tabidx is now in range [0..32] */
1977 imask = _mm_cmpgt_epi32(tabidx,i16);
1978 cswapsign = _mm_xor_si128(cswapsign,imask);
1979 corridx = _mm_sub_epi32(i32,tabidx);
1980 tabidx = _mm_blendv_epi8(tabidx,corridx,imask);
1981 /* tabidx is now in range [0..16] */
1982 ssign = _mm256_cvtepi32_pd( _mm_or_si128( sswapsign , ione ) );
1983 csign = _mm256_cvtepi32_pd( _mm_or_si128( cswapsign , ione ) );
1985 /* First lookup into table */
1986 #ifdef _MSC_VER
1987 t1 = _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,0))),
1988 _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,2)), 0x1);
1989 t2 = _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,1))),
1990 _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,3)), 0x1);
1991 #else
1992 t1 = _mm256_insertf128_pd(_mm256_castpd128_pd256(sintable[_mm_extract_epi32(tabidx,0)]),
1993 sintable[_mm_extract_epi32(tabidx,2)], 0x1);
1994 t2 = _mm256_insertf128_pd(_mm256_castpd128_pd256(sintable[_mm_extract_epi32(tabidx,1)]),
1995 sintable[_mm_extract_epi32(tabidx,3)], 0x1);
1996 #endif
1998 sinpoint = _mm256_unpackhi_pd(t1,t2);
1999 cospoint = _mm256_unpacklo_pd(t1,t2);
2001 sinpoint = _mm256_mul_pd(sinpoint,ssign);
2002 cospoint = _mm256_mul_pd(cospoint,csign);
2004 z2 = _mm256_mul_pd(z,z);
2006 polySin = _mm256_mul_pd(sinP7,z2);
2007 polySin = _mm256_add_pd(polySin,sinP5);
2008 polySin = _mm256_mul_pd(polySin,z2);
2009 polySin = _mm256_add_pd(polySin,sinP3);
2010 polySin = _mm256_mul_pd(polySin,z2);
2011 polySin = _mm256_add_pd(polySin,sinP1);
2012 polySin = _mm256_mul_pd(polySin,z);
2014 polyCos = _mm256_mul_pd(cosP6,z2);
2015 polyCos = _mm256_add_pd(polyCos,cosP4);
2016 polyCos = _mm256_mul_pd(polyCos,z2);
2017 polyCos = _mm256_add_pd(polyCos,cosP2);
2018 polyCos = _mm256_mul_pd(polyCos,z2);
2019 polyCos = _mm256_add_pd(polyCos,cosP0);
2021 *sinval = _mm256_xor_pd(_mm256_add_pd( _mm256_mul_pd(sinpoint,polyCos) , _mm256_mul_pd(cospoint,polySin) ),xsign);
2022 *cosval = _mm256_sub_pd( _mm256_mul_pd(cospoint,polyCos) , _mm256_mul_pd(sinpoint,polySin) );
2024 return 0;
2027 static int
2028 gmx_mm_sincos_pd(__m128d x,
2029 __m128d *sinval,
2030 __m128d *cosval)
2032 #ifdef _MSC_VER
2033 __declspec(align(16))
2034 const double sintable[34] =
2036 1.00000000000000000e+00 , 0.00000000000000000e+00 ,
2037 9.95184726672196929e-01 , 9.80171403295606036e-02 ,
2038 9.80785280403230431e-01 , 1.95090322016128248e-01 ,
2039 9.56940335732208824e-01 , 2.90284677254462331e-01 ,
2040 9.23879532511286738e-01 , 3.82683432365089782e-01 ,
2041 8.81921264348355050e-01 , 4.71396736825997642e-01 ,
2042 8.31469612302545236e-01 , 5.55570233019602178e-01 ,
2043 7.73010453362736993e-01 , 6.34393284163645488e-01 ,
2044 7.07106781186547573e-01 , 7.07106781186547462e-01 ,
2045 6.34393284163645599e-01 , 7.73010453362736882e-01 ,
2046 5.55570233019602289e-01 , 8.31469612302545125e-01 ,
2047 4.71396736825997809e-01 , 8.81921264348354939e-01 ,
2048 3.82683432365089837e-01 , 9.23879532511286738e-01 ,
2049 2.90284677254462276e-01 , 9.56940335732208935e-01 ,
2050 1.95090322016128304e-01 , 9.80785280403230431e-01 ,
2051 9.80171403295607702e-02 , 9.95184726672196818e-01 ,
2052 0.0 , 1.00000000000000000e+00
2054 #else
2055 const __m128d sintable[17] =
2057 _mm_set_pd( 0.0 , 1.0 ),
2058 _mm_set_pd( sin( 1.0 * (M_PI/2.0) / 16.0) , cos( 1.0 * (M_PI/2.0) / 16.0) ),
2059 _mm_set_pd( sin( 2.0 * (M_PI/2.0) / 16.0) , cos( 2.0 * (M_PI/2.0) / 16.0) ),
2060 _mm_set_pd( sin( 3.0 * (M_PI/2.0) / 16.0) , cos( 3.0 * (M_PI/2.0) / 16.0) ),
2061 _mm_set_pd( sin( 4.0 * (M_PI/2.0) / 16.0) , cos( 4.0 * (M_PI/2.0) / 16.0) ),
2062 _mm_set_pd( sin( 5.0 * (M_PI/2.0) / 16.0) , cos( 5.0 * (M_PI/2.0) / 16.0) ),
2063 _mm_set_pd( sin( 6.0 * (M_PI/2.0) / 16.0) , cos( 6.0 * (M_PI/2.0) / 16.0) ),
2064 _mm_set_pd( sin( 7.0 * (M_PI/2.0) / 16.0) , cos( 7.0 * (M_PI/2.0) / 16.0) ),
2065 _mm_set_pd( sin( 8.0 * (M_PI/2.0) / 16.0) , cos( 8.0 * (M_PI/2.0) / 16.0) ),
2066 _mm_set_pd( sin( 9.0 * (M_PI/2.0) / 16.0) , cos( 9.0 * (M_PI/2.0) / 16.0) ),
2067 _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0) , cos( 10.0 * (M_PI/2.0) / 16.0) ),
2068 _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0) , cos( 11.0 * (M_PI/2.0) / 16.0) ),
2069 _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0) , cos( 12.0 * (M_PI/2.0) / 16.0) ),
2070 _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0) , cos( 13.0 * (M_PI/2.0) / 16.0) ),
2071 _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0) , cos( 14.0 * (M_PI/2.0) / 16.0) ),
2072 _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0) , cos( 15.0 * (M_PI/2.0) / 16.0) ),
2073 _mm_set_pd( 1.0 , 0.0 )
2075 #endif
2077 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2078 const __m128i signbit_epi32 = _mm_set1_epi32(0x80000000);
2080 const __m128d tabscale = _mm_set1_pd(32.0/M_PI);
2081 const __m128d invtabscale0 = _mm_set1_pd(9.81747508049011230469e-02);
2082 const __m128d invtabscale1 = _mm_set1_pd(1.96197799156550576057e-08);
2083 const __m128i ione = _mm_set1_epi32(1);
2084 const __m128i i32 = _mm_set1_epi32(32);
2085 const __m128i i16 = _mm_set1_epi32(16);
2086 const __m128i tabmask = _mm_set1_epi32(0x3F);
2087 const __m128d sinP7 = _mm_set1_pd(-1.0/5040.0);
2088 const __m128d sinP5 = _mm_set1_pd(1.0/120.0);
2089 const __m128d sinP3 = _mm_set1_pd(-1.0/6.0);
2090 const __m128d sinP1 = _mm_set1_pd(1.0);
2092 const __m128d cosP6 = _mm_set1_pd(-1.0/720.0);
2093 const __m128d cosP4 = _mm_set1_pd(1.0/24.0);
2094 const __m128d cosP2 = _mm_set1_pd(-1.0/2.0);
2095 const __m128d cosP0 = _mm_set1_pd(1.0);
2097 __m128d scalex;
2098 __m128i tabidx,corridx;
2099 __m128d xabs,z,z2,polySin,polyCos;
2100 __m128d xpoint;
2101 __m128d ypoint0,ypoint1;
2103 __m128d sinpoint,cospoint;
2104 __m128d xsign,ssign,csign;
2105 __m128i imask,sswapsign,cswapsign;
2106 __m128d minusone;
2108 xsign = _mm_andnot_pd(signmask,x);
2109 xabs = _mm_and_pd(x,signmask);
2111 scalex = _mm_mul_pd(tabscale,xabs);
2112 tabidx = _mm_cvtpd_epi32(scalex);
2114 xpoint = _mm_round_pd(scalex,_MM_FROUND_TO_NEAREST_INT);
2116 /* Extended precision arithmetics */
2117 z = _mm_sub_pd(xabs,_mm_mul_pd(invtabscale0,xpoint));
2118 z = _mm_sub_pd(z,_mm_mul_pd(invtabscale1,xpoint));
2120 /* Range reduction to 0..2*Pi */
2121 tabidx = _mm_and_si128(tabidx,tabmask);
2123 /* tabidx is now in range [0,..,64] */
2124 imask = _mm_cmpgt_epi32(tabidx,i32);
2125 sswapsign = imask;
2126 cswapsign = imask;
2127 corridx = _mm_and_si128(imask,i32);
2128 tabidx = _mm_sub_epi32(tabidx,corridx);
2130 /* tabidx is now in range [0..32] */
2131 imask = _mm_cmpgt_epi32(tabidx,i16);
2132 cswapsign = _mm_xor_si128(cswapsign,imask);
2133 corridx = _mm_sub_epi32(i32,tabidx);
2134 tabidx = _mm_blendv_epi8(tabidx,corridx,imask);
2135 /* tabidx is now in range [0..16] */
2136 ssign = _mm_cvtepi32_pd( _mm_or_si128( sswapsign , ione ) );
2137 csign = _mm_cvtepi32_pd( _mm_or_si128( cswapsign , ione ) );
2139 #ifdef _MSC_VER
2140 ypoint0 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,0));
2141 ypoint1 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,1));
2142 #else
2143 ypoint0 = sintable[_mm_extract_epi32(tabidx,0)];
2144 ypoint1 = sintable[_mm_extract_epi32(tabidx,1)];
2145 #endif
2146 sinpoint = _mm_unpackhi_pd(ypoint0,ypoint1);
2147 cospoint = _mm_unpacklo_pd(ypoint0,ypoint1);
2149 sinpoint = _mm_mul_pd(sinpoint,ssign);
2150 cospoint = _mm_mul_pd(cospoint,csign);
2152 z2 = _mm_mul_pd(z,z);
2154 polySin = _mm_mul_pd(sinP7,z2);
2155 polySin = _mm_add_pd(polySin,sinP5);
2156 polySin = _mm_mul_pd(polySin,z2);
2157 polySin = _mm_add_pd(polySin,sinP3);
2158 polySin = _mm_mul_pd(polySin,z2);
2159 polySin = _mm_add_pd(polySin,sinP1);
2160 polySin = _mm_mul_pd(polySin,z);
2162 polyCos = _mm_mul_pd(cosP6,z2);
2163 polyCos = _mm_add_pd(polyCos,cosP4);
2164 polyCos = _mm_mul_pd(polyCos,z2);
2165 polyCos = _mm_add_pd(polyCos,cosP2);
2166 polyCos = _mm_mul_pd(polyCos,z2);
2167 polyCos = _mm_add_pd(polyCos,cosP0);
2169 *sinval = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint,polyCos) , _mm_mul_pd(cospoint,polySin) ),xsign);
2170 *cosval = _mm_sub_pd( _mm_mul_pd(cospoint,polyCos) , _mm_mul_pd(sinpoint,polySin) );
2172 return 0;
2177 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
2178 * will then call the sincos() routine and waste a factor 2 in performance!
2180 static __m256d
2181 gmx_mm256_sin_pd(__m256d x)
2183 __m256d s,c;
2184 gmx_mm256_sincos_pd(x,&s,&c);
2185 return s;
2188 static __m128d
2189 gmx_mm_sin_pd(__m128d x)
2191 __m128d s,c;
2192 gmx_mm_sincos_pd(x,&s,&c);
2193 return s;
2197 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
2198 * will then call the sincos() routine and waste a factor 2 in performance!
2200 static __m256d
2201 gmx_mm256_cos_pd(__m256d x)
2203 __m256d s,c;
2204 gmx_mm256_sincos_pd(x,&s,&c);
2205 return c;
2208 static __m128d
2209 gmx_mm_cos_pd(__m128d x)
2211 __m128d s,c;
2212 gmx_mm_sincos_pd(x,&s,&c);
2213 return c;
2217 static __m256d
2218 gmx_mm256_tan_pd(__m256d x)
2220 __m256d sinval,cosval;
2221 __m256d tanval;
2223 gmx_mm256_sincos_pd(x,&sinval,&cosval);
2225 tanval = _mm256_mul_pd(sinval,gmx_mm256_inv_pd(cosval));
2227 return tanval;
2230 static __m128d
2231 gmx_mm_tan_pd(__m128d x)
2233 __m128d sinval,cosval;
2234 __m128d tanval;
2236 gmx_mm_sincos_pd(x,&sinval,&cosval);
2238 tanval = _mm_mul_pd(sinval,gmx_mm_inv_pd(cosval));
2240 return tanval;
2244 static __m256d
2245 gmx_mm256_asin_pd(__m256d x)
2247 /* Same algorithm as cephes library */
2248 const __m256d signmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2249 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2250 const __m256d limit1 = _mm256_set1_pd(0.625);
2251 const __m256d limit2 = _mm256_set1_pd(1e-8);
2252 const __m256d one = _mm256_set1_pd(1.0);
2253 const __m256d halfpi = _mm256_set1_pd(M_PI/2.0);
2254 const __m256d quarterpi = _mm256_set1_pd(M_PI/4.0);
2255 const __m256d morebits = _mm256_set1_pd(6.123233995736765886130e-17);
2257 const __m256d P5 = _mm256_set1_pd(4.253011369004428248960e-3);
2258 const __m256d P4 = _mm256_set1_pd(-6.019598008014123785661e-1);
2259 const __m256d P3 = _mm256_set1_pd(5.444622390564711410273e0);
2260 const __m256d P2 = _mm256_set1_pd(-1.626247967210700244449e1);
2261 const __m256d P1 = _mm256_set1_pd(1.956261983317594739197e1);
2262 const __m256d P0 = _mm256_set1_pd(-8.198089802484824371615e0);
2264 const __m256d Q4 = _mm256_set1_pd(-1.474091372988853791896e1);
2265 const __m256d Q3 = _mm256_set1_pd(7.049610280856842141659e1);
2266 const __m256d Q2 = _mm256_set1_pd(-1.471791292232726029859e2);
2267 const __m256d Q1 = _mm256_set1_pd(1.395105614657485689735e2);
2268 const __m256d Q0 = _mm256_set1_pd(-4.918853881490881290097e1);
2270 const __m256d R4 = _mm256_set1_pd(2.967721961301243206100e-3);
2271 const __m256d R3 = _mm256_set1_pd(-5.634242780008963776856e-1);
2272 const __m256d R2 = _mm256_set1_pd(6.968710824104713396794e0);
2273 const __m256d R1 = _mm256_set1_pd(-2.556901049652824852289e1);
2274 const __m256d R0 = _mm256_set1_pd(2.853665548261061424989e1);
2276 const __m256d S3 = _mm256_set1_pd(-2.194779531642920639778e1);
2277 const __m256d S2 = _mm256_set1_pd(1.470656354026814941758e2);
2278 const __m256d S1 = _mm256_set1_pd(-3.838770957603691357202e2);
2279 const __m256d S0 = _mm256_set1_pd(3.424398657913078477438e2);
2281 __m256d sign;
2282 __m256d mask;
2283 __m256d xabs;
2284 __m256d zz,ww,z,q,w,y,zz2,ww2;
2285 __m256d PA,PB;
2286 __m256d QA,QB;
2287 __m256d RA,RB;
2288 __m256d SA,SB;
2289 __m256d nom,denom;
2291 sign = _mm256_andnot_pd(signmask,x);
2292 xabs = _mm256_and_pd(x,signmask);
2294 mask = _mm256_cmp_pd(xabs,limit1,_CMP_GT_OQ);
2296 zz = _mm256_sub_pd(one,xabs);
2297 ww = _mm256_mul_pd(xabs,xabs);
2298 zz2 = _mm256_mul_pd(zz,zz);
2299 ww2 = _mm256_mul_pd(ww,ww);
2301 /* R */
2302 RA = _mm256_mul_pd(R4,zz2);
2303 RB = _mm256_mul_pd(R3,zz2);
2304 RA = _mm256_add_pd(RA,R2);
2305 RB = _mm256_add_pd(RB,R1);
2306 RA = _mm256_mul_pd(RA,zz2);
2307 RB = _mm256_mul_pd(RB,zz);
2308 RA = _mm256_add_pd(RA,R0);
2309 RA = _mm256_add_pd(RA,RB);
2311 /* S, SA = zz2 */
2312 SB = _mm256_mul_pd(S3,zz2);
2313 SA = _mm256_add_pd(zz2,S2);
2314 SB = _mm256_add_pd(SB,S1);
2315 SA = _mm256_mul_pd(SA,zz2);
2316 SB = _mm256_mul_pd(SB,zz);
2317 SA = _mm256_add_pd(SA,S0);
2318 SA = _mm256_add_pd(SA,SB);
2320 /* P */
2321 PA = _mm256_mul_pd(P5,ww2);
2322 PB = _mm256_mul_pd(P4,ww2);
2323 PA = _mm256_add_pd(PA,P3);
2324 PB = _mm256_add_pd(PB,P2);
2325 PA = _mm256_mul_pd(PA,ww2);
2326 PB = _mm256_mul_pd(PB,ww2);
2327 PA = _mm256_add_pd(PA,P1);
2328 PB = _mm256_add_pd(PB,P0);
2329 PA = _mm256_mul_pd(PA,ww);
2330 PA = _mm256_add_pd(PA,PB);
2332 /* Q, QA = ww2 */
2333 QB = _mm256_mul_pd(Q4,ww2);
2334 QA = _mm256_add_pd(ww2,Q3);
2335 QB = _mm256_add_pd(QB,Q2);
2336 QA = _mm256_mul_pd(QA,ww2);
2337 QB = _mm256_mul_pd(QB,ww2);
2338 QA = _mm256_add_pd(QA,Q1);
2339 QB = _mm256_add_pd(QB,Q0);
2340 QA = _mm256_mul_pd(QA,ww);
2341 QA = _mm256_add_pd(QA,QB);
2343 RA = _mm256_mul_pd(RA,zz);
2344 PA = _mm256_mul_pd(PA,ww);
2346 nom = _mm256_blendv_pd( PA,RA,mask );
2347 denom = _mm256_blendv_pd( QA,SA,mask );
2349 q = _mm256_mul_pd( nom, gmx_mm256_inv_pd(denom) );
2351 zz = _mm256_add_pd(zz,zz);
2352 zz = gmx_mm256_sqrt_pd(zz);
2353 z = _mm256_sub_pd(quarterpi,zz);
2354 zz = _mm256_mul_pd(zz,q);
2355 zz = _mm256_sub_pd(zz,morebits);
2356 z = _mm256_sub_pd(z,zz);
2357 z = _mm256_add_pd(z,quarterpi);
2359 w = _mm256_mul_pd(xabs,q);
2360 w = _mm256_add_pd(w,xabs);
2362 z = _mm256_blendv_pd( w,z,mask );
2364 mask = _mm256_cmp_pd(xabs,limit2,_CMP_GT_OQ);
2365 z = _mm256_blendv_pd( xabs,z,mask );
2367 z = _mm256_xor_pd(z,sign);
2369 return z;
2372 static __m128d
2373 gmx_mm_asin_pd(__m128d x)
2375 /* Same algorithm as cephes library */
2376 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2377 const __m128d limit1 = _mm_set1_pd(0.625);
2378 const __m128d limit2 = _mm_set1_pd(1e-8);
2379 const __m128d one = _mm_set1_pd(1.0);
2380 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
2381 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
2382 const __m128d morebits = _mm_set1_pd(6.123233995736765886130e-17);
2384 const __m128d P5 = _mm_set1_pd(4.253011369004428248960e-3);
2385 const __m128d P4 = _mm_set1_pd(-6.019598008014123785661e-1);
2386 const __m128d P3 = _mm_set1_pd(5.444622390564711410273e0);
2387 const __m128d P2 = _mm_set1_pd(-1.626247967210700244449e1);
2388 const __m128d P1 = _mm_set1_pd(1.956261983317594739197e1);
2389 const __m128d P0 = _mm_set1_pd(-8.198089802484824371615e0);
2391 const __m128d Q4 = _mm_set1_pd(-1.474091372988853791896e1);
2392 const __m128d Q3 = _mm_set1_pd(7.049610280856842141659e1);
2393 const __m128d Q2 = _mm_set1_pd(-1.471791292232726029859e2);
2394 const __m128d Q1 = _mm_set1_pd(1.395105614657485689735e2);
2395 const __m128d Q0 = _mm_set1_pd(-4.918853881490881290097e1);
2397 const __m128d R4 = _mm_set1_pd(2.967721961301243206100e-3);
2398 const __m128d R3 = _mm_set1_pd(-5.634242780008963776856e-1);
2399 const __m128d R2 = _mm_set1_pd(6.968710824104713396794e0);
2400 const __m128d R1 = _mm_set1_pd(-2.556901049652824852289e1);
2401 const __m128d R0 = _mm_set1_pd(2.853665548261061424989e1);
2403 const __m128d S3 = _mm_set1_pd(-2.194779531642920639778e1);
2404 const __m128d S2 = _mm_set1_pd(1.470656354026814941758e2);
2405 const __m128d S1 = _mm_set1_pd(-3.838770957603691357202e2);
2406 const __m128d S0 = _mm_set1_pd(3.424398657913078477438e2);
2408 __m128d sign;
2409 __m128d mask;
2410 __m128d xabs;
2411 __m128d zz,ww,z,q,w,y,zz2,ww2;
2412 __m128d PA,PB;
2413 __m128d QA,QB;
2414 __m128d RA,RB;
2415 __m128d SA,SB;
2416 __m128d nom,denom;
2418 sign = _mm_andnot_pd(signmask,x);
2419 xabs = _mm_and_pd(x,signmask);
2421 mask = _mm_cmpgt_pd(xabs,limit1);
2423 zz = _mm_sub_pd(one,xabs);
2424 ww = _mm_mul_pd(xabs,xabs);
2425 zz2 = _mm_mul_pd(zz,zz);
2426 ww2 = _mm_mul_pd(ww,ww);
2428 /* R */
2429 RA = _mm_mul_pd(R4,zz2);
2430 RB = _mm_mul_pd(R3,zz2);
2431 RA = _mm_add_pd(RA,R2);
2432 RB = _mm_add_pd(RB,R1);
2433 RA = _mm_mul_pd(RA,zz2);
2434 RB = _mm_mul_pd(RB,zz);
2435 RA = _mm_add_pd(RA,R0);
2436 RA = _mm_add_pd(RA,RB);
2438 /* S, SA = zz2 */
2439 SB = _mm_mul_pd(S3,zz2);
2440 SA = _mm_add_pd(zz2,S2);
2441 SB = _mm_add_pd(SB,S1);
2442 SA = _mm_mul_pd(SA,zz2);
2443 SB = _mm_mul_pd(SB,zz);
2444 SA = _mm_add_pd(SA,S0);
2445 SA = _mm_add_pd(SA,SB);
2447 /* P */
2448 PA = _mm_mul_pd(P5,ww2);
2449 PB = _mm_mul_pd(P4,ww2);
2450 PA = _mm_add_pd(PA,P3);
2451 PB = _mm_add_pd(PB,P2);
2452 PA = _mm_mul_pd(PA,ww2);
2453 PB = _mm_mul_pd(PB,ww2);
2454 PA = _mm_add_pd(PA,P1);
2455 PB = _mm_add_pd(PB,P0);
2456 PA = _mm_mul_pd(PA,ww);
2457 PA = _mm_add_pd(PA,PB);
2459 /* Q, QA = ww2 */
2460 QB = _mm_mul_pd(Q4,ww2);
2461 QA = _mm_add_pd(ww2,Q3);
2462 QB = _mm_add_pd(QB,Q2);
2463 QA = _mm_mul_pd(QA,ww2);
2464 QB = _mm_mul_pd(QB,ww2);
2465 QA = _mm_add_pd(QA,Q1);
2466 QB = _mm_add_pd(QB,Q0);
2467 QA = _mm_mul_pd(QA,ww);
2468 QA = _mm_add_pd(QA,QB);
2470 RA = _mm_mul_pd(RA,zz);
2471 PA = _mm_mul_pd(PA,ww);
2473 nom = _mm_blendv_pd( PA,RA,mask );
2474 denom = _mm_blendv_pd( QA,SA,mask );
2476 q = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
2478 zz = _mm_add_pd(zz,zz);
2479 zz = gmx_mm_sqrt_pd(zz);
2480 z = _mm_sub_pd(quarterpi,zz);
2481 zz = _mm_mul_pd(zz,q);
2482 zz = _mm_sub_pd(zz,morebits);
2483 z = _mm_sub_pd(z,zz);
2484 z = _mm_add_pd(z,quarterpi);
2486 w = _mm_mul_pd(xabs,q);
2487 w = _mm_add_pd(w,xabs);
2489 z = _mm_blendv_pd( w,z,mask );
2491 mask = _mm_cmpgt_pd(xabs,limit2);
2492 z = _mm_blendv_pd( xabs,z,mask );
2494 z = _mm_xor_pd(z,sign);
2496 return z;
2500 static __m256d
2501 gmx_mm256_acos_pd(__m256d x)
2503 const __m256d signmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2504 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2505 const __m256d one = _mm256_set1_pd(1.0);
2506 const __m256d half = _mm256_set1_pd(0.5);
2507 const __m256d pi = _mm256_set1_pd(M_PI);
2508 const __m256d quarterpi0 = _mm256_set1_pd(7.85398163397448309616e-1);
2509 const __m256d quarterpi1 = _mm256_set1_pd(6.123233995736765886130e-17);
2512 __m256d mask1;
2514 __m256d z,z1,z2;
2516 mask1 = _mm256_cmp_pd(x,half,_CMP_GT_OQ);
2517 z1 = _mm256_mul_pd(half,_mm256_sub_pd(one,x));
2518 z1 = gmx_mm256_sqrt_pd(z1);
2519 z = _mm256_blendv_pd( x,z1,mask1 );
2521 z = gmx_mm256_asin_pd(z);
2523 z1 = _mm256_add_pd(z,z);
2525 z2 = _mm256_sub_pd(quarterpi0,z);
2526 z2 = _mm256_add_pd(z2,quarterpi1);
2527 z2 = _mm256_add_pd(z2,quarterpi0);
2529 z = _mm256_blendv_pd(z2,z1,mask1);
2531 return z;
2534 static __m128d
2535 gmx_mm_acos_pd(__m128d x)
2537 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2538 const __m128d one = _mm_set1_pd(1.0);
2539 const __m128d half = _mm_set1_pd(0.5);
2540 const __m128d pi = _mm_set1_pd(M_PI);
2541 const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
2542 const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
2545 __m128d mask1;
2547 __m128d z,z1,z2;
2549 mask1 = _mm_cmpgt_pd(x,half);
2550 z1 = _mm_mul_pd(half,_mm_sub_pd(one,x));
2551 z1 = gmx_mm_sqrt_pd(z1);
2552 z = _mm_blendv_pd( x,z1,mask1 );
2554 z = gmx_mm_asin_pd(z);
2556 z1 = _mm_add_pd(z,z);
2558 z2 = _mm_sub_pd(quarterpi0,z);
2559 z2 = _mm_add_pd(z2,quarterpi1);
2560 z2 = _mm_add_pd(z2,quarterpi0);
2562 z = _mm_blendv_pd(z2,z1,mask1);
2564 return z;
2568 static __m256d
2569 gmx_mm256_atan_pd(__m256d x)
2571 /* Same algorithm as cephes library */
2572 const __m256d signmask = _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2573 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2574 const __m256d limit1 = _mm256_set1_pd(0.66);
2575 const __m256d limit2 = _mm256_set1_pd(2.41421356237309504880);
2576 const __m256d quarterpi = _mm256_set1_pd(M_PI/4.0);
2577 const __m256d halfpi = _mm256_set1_pd(M_PI/2.0);
2578 const __m256d mone = _mm256_set1_pd(-1.0);
2579 const __m256d morebits1 = _mm256_set1_pd(0.5*6.123233995736765886130E-17);
2580 const __m256d morebits2 = _mm256_set1_pd(6.123233995736765886130E-17);
2582 const __m256d P4 = _mm256_set1_pd(-8.750608600031904122785E-1);
2583 const __m256d P3 = _mm256_set1_pd(-1.615753718733365076637E1);
2584 const __m256d P2 = _mm256_set1_pd(-7.500855792314704667340E1);
2585 const __m256d P1 = _mm256_set1_pd(-1.228866684490136173410E2);
2586 const __m256d P0 = _mm256_set1_pd(-6.485021904942025371773E1);
2588 const __m256d Q4 = _mm256_set1_pd(2.485846490142306297962E1);
2589 const __m256d Q3 = _mm256_set1_pd(1.650270098316988542046E2);
2590 const __m256d Q2 = _mm256_set1_pd(4.328810604912902668951E2);
2591 const __m256d Q1 = _mm256_set1_pd(4.853903996359136964868E2);
2592 const __m256d Q0 = _mm256_set1_pd(1.945506571482613964425E2);
2594 __m256d sign;
2595 __m256d mask1,mask2;
2596 __m256d y,t1,t2;
2597 __m256d z,z2;
2598 __m256d P_A,P_B,Q_A,Q_B;
2600 sign = _mm256_andnot_pd(signmask,x);
2601 x = _mm256_and_pd(x,signmask);
2603 mask1 = _mm256_cmp_pd(x,limit1,_CMP_GT_OQ);
2604 mask2 = _mm256_cmp_pd(x,limit2,_CMP_GT_OQ);
2606 t1 = _mm256_mul_pd(_mm256_add_pd(x,mone),gmx_mm256_inv_pd(_mm256_sub_pd(x,mone)));
2607 t2 = _mm256_mul_pd(mone,gmx_mm256_inv_pd(x));
2609 y = _mm256_and_pd(mask1,quarterpi);
2610 y = _mm256_or_pd( _mm256_and_pd(mask2,halfpi) , _mm256_andnot_pd(mask2,y) );
2612 x = _mm256_or_pd( _mm256_and_pd(mask1,t1) , _mm256_andnot_pd(mask1,x) );
2613 x = _mm256_or_pd( _mm256_and_pd(mask2,t2) , _mm256_andnot_pd(mask2,x) );
2615 z = _mm256_mul_pd(x,x);
2616 z2 = _mm256_mul_pd(z,z);
2618 P_A = _mm256_mul_pd(P4,z2);
2619 P_B = _mm256_mul_pd(P3,z2);
2620 P_A = _mm256_add_pd(P_A,P2);
2621 P_B = _mm256_add_pd(P_B,P1);
2622 P_A = _mm256_mul_pd(P_A,z2);
2623 P_B = _mm256_mul_pd(P_B,z);
2624 P_A = _mm256_add_pd(P_A,P0);
2625 P_A = _mm256_add_pd(P_A,P_B);
2627 /* Q_A = z2 */
2628 Q_B = _mm256_mul_pd(Q4,z2);
2629 Q_A = _mm256_add_pd(z2,Q3);
2630 Q_B = _mm256_add_pd(Q_B,Q2);
2631 Q_A = _mm256_mul_pd(Q_A,z2);
2632 Q_B = _mm256_mul_pd(Q_B,z2);
2633 Q_A = _mm256_add_pd(Q_A,Q1);
2634 Q_B = _mm256_add_pd(Q_B,Q0);
2635 Q_A = _mm256_mul_pd(Q_A,z);
2636 Q_A = _mm256_add_pd(Q_A,Q_B);
2638 z = _mm256_mul_pd(z,P_A);
2639 z = _mm256_mul_pd(z,gmx_mm256_inv_pd(Q_A));
2640 z = _mm256_mul_pd(z,x);
2641 z = _mm256_add_pd(z,x);
2643 t1 = _mm256_and_pd(mask1,morebits1);
2644 t1 = _mm256_or_pd( _mm256_and_pd(mask2,morebits2) , _mm256_andnot_pd(mask2,t1) );
2646 z = _mm256_add_pd(z,t1);
2647 y = _mm256_add_pd(y,z);
2649 y = _mm256_xor_pd(y,sign);
2651 return y;
2654 static __m128d
2655 gmx_mm_atan_pd(__m128d x)
2657 /* Same algorithm as cephes library */
2658 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2659 const __m128d limit1 = _mm_set1_pd(0.66);
2660 const __m128d limit2 = _mm_set1_pd(2.41421356237309504880);
2661 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
2662 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
2663 const __m128d mone = _mm_set1_pd(-1.0);
2664 const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
2665 const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
2667 const __m128d P4 = _mm_set1_pd(-8.750608600031904122785E-1);
2668 const __m128d P3 = _mm_set1_pd(-1.615753718733365076637E1);
2669 const __m128d P2 = _mm_set1_pd(-7.500855792314704667340E1);
2670 const __m128d P1 = _mm_set1_pd(-1.228866684490136173410E2);
2671 const __m128d P0 = _mm_set1_pd(-6.485021904942025371773E1);
2673 const __m128d Q4 = _mm_set1_pd(2.485846490142306297962E1);
2674 const __m128d Q3 = _mm_set1_pd(1.650270098316988542046E2);
2675 const __m128d Q2 = _mm_set1_pd(4.328810604912902668951E2);
2676 const __m128d Q1 = _mm_set1_pd(4.853903996359136964868E2);
2677 const __m128d Q0 = _mm_set1_pd(1.945506571482613964425E2);
2679 __m128d sign;
2680 __m128d mask1,mask2;
2681 __m128d y,t1,t2;
2682 __m128d z,z2;
2683 __m128d P_A,P_B,Q_A,Q_B;
2685 sign = _mm_andnot_pd(signmask,x);
2686 x = _mm_and_pd(x,signmask);
2688 mask1 = _mm_cmpgt_pd(x,limit1);
2689 mask2 = _mm_cmpgt_pd(x,limit2);
2691 t1 = _mm_mul_pd(_mm_add_pd(x,mone),gmx_mm_inv_pd(_mm_sub_pd(x,mone)));
2692 t2 = _mm_mul_pd(mone,gmx_mm_inv_pd(x));
2694 y = _mm_and_pd(mask1,quarterpi);
2695 y = _mm_or_pd( _mm_and_pd(mask2,halfpi) , _mm_andnot_pd(mask2,y) );
2697 x = _mm_or_pd( _mm_and_pd(mask1,t1) , _mm_andnot_pd(mask1,x) );
2698 x = _mm_or_pd( _mm_and_pd(mask2,t2) , _mm_andnot_pd(mask2,x) );
2700 z = _mm_mul_pd(x,x);
2701 z2 = _mm_mul_pd(z,z);
2703 P_A = _mm_mul_pd(P4,z2);
2704 P_B = _mm_mul_pd(P3,z2);
2705 P_A = _mm_add_pd(P_A,P2);
2706 P_B = _mm_add_pd(P_B,P1);
2707 P_A = _mm_mul_pd(P_A,z2);
2708 P_B = _mm_mul_pd(P_B,z);
2709 P_A = _mm_add_pd(P_A,P0);
2710 P_A = _mm_add_pd(P_A,P_B);
2712 /* Q_A = z2 */
2713 Q_B = _mm_mul_pd(Q4,z2);
2714 Q_A = _mm_add_pd(z2,Q3);
2715 Q_B = _mm_add_pd(Q_B,Q2);
2716 Q_A = _mm_mul_pd(Q_A,z2);
2717 Q_B = _mm_mul_pd(Q_B,z2);
2718 Q_A = _mm_add_pd(Q_A,Q1);
2719 Q_B = _mm_add_pd(Q_B,Q0);
2720 Q_A = _mm_mul_pd(Q_A,z);
2721 Q_A = _mm_add_pd(Q_A,Q_B);
2723 z = _mm_mul_pd(z,P_A);
2724 z = _mm_mul_pd(z,gmx_mm_inv_pd(Q_A));
2725 z = _mm_mul_pd(z,x);
2726 z = _mm_add_pd(z,x);
2728 t1 = _mm_and_pd(mask1,morebits1);
2729 t1 = _mm_or_pd( _mm_and_pd(mask2,morebits2) , _mm_andnot_pd(mask2,t1) );
2731 z = _mm_add_pd(z,t1);
2732 y = _mm_add_pd(y,z);
2734 y = _mm_xor_pd(y,sign);
2736 return y;
2741 static __m256d
2742 gmx_mm256_atan2_pd(__m256d y, __m256d x)
2744 const __m256d pi = _mm256_set1_pd(M_PI);
2745 const __m256d minuspi = _mm256_set1_pd(-M_PI);
2746 const __m256d halfpi = _mm256_set1_pd(M_PI/2.0);
2747 const __m256d minushalfpi = _mm256_set1_pd(-M_PI/2.0);
2749 __m256d z,z1,z3,z4;
2750 __m256d w;
2751 __m256d maskx_lt,maskx_eq;
2752 __m256d masky_lt,masky_eq;
2753 __m256d mask1,mask2,mask3,mask4,maskall;
2755 maskx_lt = _mm256_cmp_pd(x,_mm256_setzero_pd(),_CMP_LT_OQ);
2756 masky_lt = _mm256_cmp_pd(y,_mm256_setzero_pd(),_CMP_LT_OQ);
2757 maskx_eq = _mm256_cmp_pd(x,_mm256_setzero_pd(),_CMP_EQ_OQ);
2758 masky_eq = _mm256_cmp_pd(y,_mm256_setzero_pd(),_CMP_EQ_OQ);
2760 z = _mm256_mul_pd(y,gmx_mm256_inv_pd(x));
2761 z = gmx_mm256_atan_pd(z);
2763 mask1 = _mm256_and_pd(maskx_eq,masky_lt);
2764 mask2 = _mm256_andnot_pd(maskx_lt,masky_eq);
2765 mask3 = _mm256_andnot_pd( _mm256_or_pd(masky_lt,masky_eq) , maskx_eq);
2766 mask4 = _mm256_and_pd(masky_eq,maskx_lt);
2768 maskall = _mm256_or_pd( _mm256_or_pd(mask1,mask2), _mm256_or_pd(mask3,mask4) );
2770 z = _mm256_andnot_pd(maskall,z);
2771 z1 = _mm256_and_pd(mask1,minushalfpi);
2772 z3 = _mm256_and_pd(mask3,halfpi);
2773 z4 = _mm256_and_pd(mask4,pi);
2775 z = _mm256_or_pd( _mm256_or_pd(z,z1), _mm256_or_pd(z3,z4) );
2777 w = _mm256_blendv_pd(pi,minuspi,masky_lt);
2778 w = _mm256_and_pd(w,maskx_lt);
2780 w = _mm256_andnot_pd(maskall,w);
2782 z = _mm256_add_pd(z,w);
2784 return z;
2787 static __m128d
2788 gmx_mm_atan2_pd(__m128d y, __m128d x)
2790 const __m128d pi = _mm_set1_pd(M_PI);
2791 const __m128d minuspi = _mm_set1_pd(-M_PI);
2792 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
2793 const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
2795 __m128d z,z1,z3,z4;
2796 __m128d w;
2797 __m128d maskx_lt,maskx_eq;
2798 __m128d masky_lt,masky_eq;
2799 __m128d mask1,mask2,mask3,mask4,maskall;
2801 maskx_lt = _mm_cmplt_pd(x,_mm_setzero_pd());
2802 masky_lt = _mm_cmplt_pd(y,_mm_setzero_pd());
2803 maskx_eq = _mm_cmpeq_pd(x,_mm_setzero_pd());
2804 masky_eq = _mm_cmpeq_pd(y,_mm_setzero_pd());
2806 z = _mm_mul_pd(y,gmx_mm_inv_pd(x));
2807 z = gmx_mm_atan_pd(z);
2809 mask1 = _mm_and_pd(maskx_eq,masky_lt);
2810 mask2 = _mm_andnot_pd(maskx_lt,masky_eq);
2811 mask3 = _mm_andnot_pd( _mm_or_pd(masky_lt,masky_eq) , maskx_eq);
2812 mask4 = _mm_and_pd(masky_eq,maskx_lt);
2814 maskall = _mm_or_pd( _mm_or_pd(mask1,mask2), _mm_or_pd(mask3,mask4) );
2816 z = _mm_andnot_pd(maskall,z);
2817 z1 = _mm_and_pd(mask1,minushalfpi);
2818 z3 = _mm_and_pd(mask3,halfpi);
2819 z4 = _mm_and_pd(mask4,pi);
2821 z = _mm_or_pd( _mm_or_pd(z,z1), _mm_or_pd(z3,z4) );
2823 w = _mm_blendv_pd(pi,minuspi,masky_lt);
2824 w = _mm_and_pd(w,maskx_lt);
2826 w = _mm_andnot_pd(maskall,w);
2828 z = _mm_add_pd(z,w);
2830 return z;
2833 #endif /* _gmx_math_x86_avx_256_double_h_ */