1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This file is part of GROMACS.
7 * Written by the Gromacs development team under coordination of
8 * David van der Spoel, Berk Hess, and Erik Lindahl.
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * To help us fund GROMACS development, we humbly ask that you cite
16 * the research papers on the package. Check out http://www.gromacs.org
19 * Gnomes, ROck Monsters And Chili Sauce
21 #ifndef _gmx_math_x86_avx_256_double_h_
22 #define _gmx_math_x86_avx_256_double_h_
24 #include "gmx_x86_avx_256.h"
29 # define M_PI 3.14159265358979323846264338327950288
33 /************************
35 * Simple math routines *
37 ************************/
39 /* 1.0/sqrt(x), 256 bit wide */
40 static gmx_inline __m256d
41 gmx_mm256_invsqrt_pd(__m256d x
)
43 const __m256d half
= _mm256_set1_pd(0.5);
44 const __m256d three
= _mm256_set1_pd(3.0);
46 /* Lookup instruction only exists in single precision, convert back and forth... */
47 __m256d lu
= _mm256_cvtps_pd(_mm_rsqrt_ps( _mm256_cvtpd_ps(x
)));
49 lu
= _mm256_mul_pd(half
,_mm256_mul_pd(_mm256_sub_pd(three
,_mm256_mul_pd(_mm256_mul_pd(lu
,lu
),x
)),lu
));
50 return _mm256_mul_pd(half
,_mm256_mul_pd(_mm256_sub_pd(three
,_mm256_mul_pd(_mm256_mul_pd(lu
,lu
),x
)),lu
));
53 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
55 gmx_mm256_invsqrt_pair_pd(__m256d x1
, __m256d x2
, __m256d
*invsqrt1
, __m256d
*invsqrt2
)
57 const __m256d half
= _mm256_set1_pd(0.5);
58 const __m256d three
= _mm256_set1_pd(3.0);
59 const __m256 halff
= _mm256_set1_ps(0.5f
);
60 const __m256 threef
= _mm256_set1_ps(3.0f
);
65 /* Do first N-R step in float for 2x throughput */
66 xf
= _mm256_insertf128_ps(_mm256_castps128_ps256(_mm256_cvtpd_ps(x1
)),_mm256_cvtpd_ps(x2
), 0x1);
67 luf
= _mm256_rsqrt_ps(xf
);
69 luf
= _mm256_mul_ps(halff
,_mm256_mul_ps(_mm256_sub_ps(threef
,_mm256_mul_ps(_mm256_mul_ps(luf
,luf
),xf
)),luf
));
71 lu2
= _mm256_cvtps_pd(_mm256_extractf128_ps(luf
,0x1));
72 lu1
= _mm256_cvtps_pd(_mm256_castps256_ps128(luf
));
74 *invsqrt1
= _mm256_mul_pd(half
,_mm256_mul_pd(_mm256_sub_pd(three
,_mm256_mul_pd(_mm256_mul_pd(lu1
,lu1
),x1
)),lu1
));
75 *invsqrt2
= _mm256_mul_pd(half
,_mm256_mul_pd(_mm256_sub_pd(three
,_mm256_mul_pd(_mm256_mul_pd(lu2
,lu2
),x2
)),lu2
));
78 /* 1.0/sqrt(x), 128 bit wide */
79 static gmx_inline __m128d
80 gmx_mm_invsqrt_pd(__m128d x
)
82 const __m128d half
= _mm_set1_pd(0.5);
83 const __m128d three
= _mm_set1_pd(3.0);
85 /* Lookup instruction only exists in single precision, convert back and forth... */
86 __m128d lu
= _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x
)));
88 lu
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu
,lu
),x
)),lu
));
89 return _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu
,lu
),x
)),lu
));
92 /* 1.0/sqrt(x), done for two pairs to improve throughput */
94 gmx_mm_invsqrt_pair_pd(__m128d x1
, __m128d x2
, __m128d
*invsqrt1
, __m128d
*invsqrt2
)
96 const __m128d half
= _mm_set1_pd(0.5);
97 const __m128d three
= _mm_set1_pd(3.0);
98 const __m128 halff
= _mm_set1_ps(0.5f
);
99 const __m128 threef
= _mm_set1_ps(3.0f
);
104 /* Do first N-R step in float for 2x throughput */
105 xf
= _mm_shuffle_ps(_mm_cvtpd_ps(x1
),_mm_cvtpd_ps(x2
),MM_SHUFFLE(1,0,1,0));
106 luf
= _mm_rsqrt_ps(xf
);
107 luf
= _mm_mul_ps(halff
,_mm_mul_ps(_mm_sub_ps(threef
,_mm_mul_ps(_mm_mul_ps(luf
,luf
),xf
)),luf
));
109 lu2
= _mm_cvtps_pd(_mm_shuffle_ps(luf
,luf
,MM_SHUFFLE(3,2,3,2)));
110 lu1
= _mm_cvtps_pd(luf
);
112 *invsqrt1
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu1
,lu1
),x1
)),lu1
));
113 *invsqrt2
= _mm_mul_pd(half
,_mm_mul_pd(_mm_sub_pd(three
,_mm_mul_pd(_mm_mul_pd(lu2
,lu2
),x2
)),lu2
));
116 /* sqrt(x) (256 bit)- Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
117 static gmx_inline __m256d
118 gmx_mm256_sqrt_pd(__m256d x
)
123 mask
= _mm256_cmp_pd(x
,_mm256_setzero_pd(), _CMP_EQ_OQ
);
124 res
= _mm256_andnot_pd(mask
,gmx_mm256_invsqrt_pd(x
));
126 res
= _mm256_mul_pd(x
,res
);
131 /* sqrt(x) (128 bit) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
132 static gmx_inline __m128d
133 gmx_mm_sqrt_pd(__m128d x
)
138 mask
= _mm_cmpeq_pd(x
,_mm_setzero_pd());
139 res
= _mm_andnot_pd(mask
,gmx_mm_invsqrt_pd(x
));
141 res
= _mm_mul_pd(x
,res
);
147 /* 1.0/x, 256 bit wide */
148 static gmx_inline __m256d
149 gmx_mm256_inv_pd(__m256d x
)
151 const __m256d two
= _mm256_set1_pd(2.0);
153 /* Lookup instruction only exists in single precision, convert back and forth... */
154 __m256d lu
= _mm256_cvtps_pd(_mm_rcp_ps( _mm256_cvtpd_ps(x
)));
156 /* Perform two N-R steps for double precision */
157 lu
= _mm256_mul_pd(lu
,_mm256_sub_pd(two
,_mm256_mul_pd(x
,lu
)));
158 return _mm256_mul_pd(lu
,_mm256_sub_pd(two
,_mm256_mul_pd(x
,lu
)));
162 static gmx_inline __m128d
163 gmx_mm_inv_pd(__m128d x
)
165 const __m128d two
= _mm_set1_pd(2.0);
167 /* Lookup instruction only exists in single precision, convert back and forth... */
168 __m128d lu
= _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x
)));
170 /* Perform two N-R steps for double precision */
171 lu
= _mm_mul_pd(lu
,_mm_sub_pd(two
,_mm_mul_pd(x
,lu
)));
172 return _mm_mul_pd(lu
,_mm_sub_pd(two
,_mm_mul_pd(x
,lu
)));
176 static gmx_inline __m256d
177 gmx_mm256_abs_pd(__m256d x
)
179 const __m256d signmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF,
180 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
182 return _mm256_and_pd(x
,signmask
);
185 static gmx_inline __m128d
186 gmx_mm_abs_pd(__m128d x
)
188 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
190 return _mm_and_pd(x
,signmask
);
195 * 2^x function, 256 bit
197 * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
200 * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
201 * according to the same algorithm as used in the Cephes/netlib math routines.
204 gmx_mm256_exp2_pd(__m256d x
)
206 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
207 const __m256d arglimit
= _mm256_set1_pd(1022.0);
208 const __m128i expbase
= _mm_set1_epi32(1023);
210 const __m256d P2
= _mm256_set1_pd(2.30933477057345225087e-2);
211 const __m256d P1
= _mm256_set1_pd(2.02020656693165307700e1
);
212 const __m256d P0
= _mm256_set1_pd(1.51390680115615096133e3
);
214 const __m256d Q1
= _mm256_set1_pd(2.33184211722314911771e2
);
215 const __m256d Q0
= _mm256_set1_pd(4.36821166879210612817e3
);
216 const __m256d one
= _mm256_set1_pd(1.0);
217 const __m256d two
= _mm256_set1_pd(2.0);
221 __m128i iexppart128a
,iexppart128b
;
227 iexppart128a
= _mm256_cvtpd_epi32(x
);
228 intpart
= _mm256_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
230 /* Add exponent bias */
231 iexppart128a
= _mm_add_epi32(iexppart128a
,expbase
);
233 /* We now want to shift the exponent 52 positions left, but to achieve this we need
234 * to separate the 128-bit register data into two registers (4x64-bit > 128bit)
235 * shift them, and then merge into a single __m256d.
236 * Elements 0/1 should end up in iexppart128a, and 2/3 in iexppart128b.
237 * It doesnt matter what we put in the 2nd/4th position, since that data will be
238 * shifted out and replaced with zeros.
240 iexppart128b
= _mm_shuffle_epi32(iexppart128a
,_MM_SHUFFLE(3,3,2,2));
241 iexppart128a
= _mm_shuffle_epi32(iexppart128a
,_MM_SHUFFLE(1,1,0,0));
243 iexppart128b
= _mm_slli_epi64(iexppart128b
,52);
244 iexppart128a
= _mm_slli_epi64(iexppart128a
,52);
246 iexppart
= _mm256_castsi128_si256(iexppart128a
);
247 iexppart
= _mm256_insertf128_si256(iexppart
,iexppart128b
,0x1);
249 valuemask
= _mm256_cmp_pd(arglimit
,gmx_mm256_abs_pd(x
),_CMP_GE_OQ
);
250 fexppart
= _mm256_and_pd(valuemask
,_mm256_castsi256_pd(iexppart
));
252 z
= _mm256_sub_pd(x
,intpart
);
254 z2
= _mm256_mul_pd(z
,z
);
256 PolyP
= _mm256_mul_pd(P2
,z2
);
257 PolyP
= _mm256_add_pd(PolyP
,P1
);
258 PolyQ
= _mm256_add_pd(z2
,Q1
);
259 PolyP
= _mm256_mul_pd(PolyP
,z2
);
260 PolyQ
= _mm256_mul_pd(PolyQ
,z2
);
261 PolyP
= _mm256_add_pd(PolyP
,P0
);
262 PolyQ
= _mm256_add_pd(PolyQ
,Q0
);
263 PolyP
= _mm256_mul_pd(PolyP
,z
);
265 z
= _mm256_mul_pd(PolyP
,gmx_mm256_inv_pd(_mm256_sub_pd(PolyQ
,PolyP
)));
266 z
= _mm256_add_pd(one
,_mm256_mul_pd(two
,z
));
268 z
= _mm256_mul_pd(z
,fexppart
);
275 gmx_mm_exp2_pd(__m128d x
)
277 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
278 const __m128d arglimit
= _mm_set1_pd(1022.0);
279 const __m128i expbase
= _mm_set1_epi32(1023);
281 const __m128d P2
= _mm_set1_pd(2.30933477057345225087e-2);
282 const __m128d P1
= _mm_set1_pd(2.02020656693165307700e1
);
283 const __m128d P0
= _mm_set1_pd(1.51390680115615096133e3
);
285 const __m128d Q1
= _mm_set1_pd(2.33184211722314911771e2
);
286 const __m128d Q0
= _mm_set1_pd(4.36821166879210612817e3
);
287 const __m128d one
= _mm_set1_pd(1.0);
288 const __m128d two
= _mm_set1_pd(2.0);
297 iexppart
= _mm_cvtpd_epi32(x
);
298 intpart
= _mm_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
300 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
301 * To be able to shift it into the exponent for a double precision number we first need to
302 * shuffle so that the lower half contains the first element, and the upper half the second.
303 * This should really be done as a zero-extension, but since the next instructions will shift
304 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
305 * (thus we just use element 2 from iexppart).
307 iexppart
= _mm_shuffle_epi32(iexppart
,_MM_SHUFFLE(2,1,2,0));
309 /* Do the shift operation on the 64-bit registers */
310 iexppart
= _mm_add_epi32(iexppart
,expbase
);
311 iexppart
= _mm_slli_epi64(iexppart
,52);
313 valuemask
= _mm_cmpge_pd(arglimit
,gmx_mm_abs_pd(x
));
314 fexppart
= _mm_and_pd(valuemask
,gmx_mm_castsi128_pd(iexppart
));
316 z
= _mm_sub_pd(x
,intpart
);
317 z2
= _mm_mul_pd(z
,z
);
319 PolyP
= _mm_mul_pd(P2
,z2
);
320 PolyP
= _mm_add_pd(PolyP
,P1
);
321 PolyQ
= _mm_add_pd(z2
,Q1
);
322 PolyP
= _mm_mul_pd(PolyP
,z2
);
323 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
324 PolyP
= _mm_add_pd(PolyP
,P0
);
325 PolyQ
= _mm_add_pd(PolyQ
,Q0
);
326 PolyP
= _mm_mul_pd(PolyP
,z
);
328 z
= _mm_mul_pd(PolyP
,gmx_mm_inv_pd(_mm_sub_pd(PolyQ
,PolyP
)));
329 z
= _mm_add_pd(one
,_mm_mul_pd(two
,z
));
331 z
= _mm_mul_pd(z
,fexppart
);
337 /* Exponential function, 256 bit. This could be calculated from 2^x as Exp(x)=2^(y),
338 * where y=log2(e)*x, but there will then be a small rounding error since we lose
339 * some precision due to the multiplication. This will then be magnified a lot by
342 * Instead, we calculate the fractional part directly as a Padé approximation of
343 * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
344 * remaining after 2^y, which avoids the precision-loss.
347 gmx_mm256_exp_pd(__m256d exparg
)
349 const __m256d argscale
= _mm256_set1_pd(1.4426950408889634073599);
350 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
351 const __m256d arglimit
= _mm256_set1_pd(1022.0);
352 const __m128i expbase
= _mm_set1_epi32(1023);
354 const __m256d invargscale0
= _mm256_set1_pd(6.93145751953125e-1);
355 const __m256d invargscale1
= _mm256_set1_pd(1.42860682030941723212e-6);
357 const __m256d P2
= _mm256_set1_pd(1.26177193074810590878e-4);
358 const __m256d P1
= _mm256_set1_pd(3.02994407707441961300e-2);
360 const __m256d Q3
= _mm256_set1_pd(3.00198505138664455042E-6);
361 const __m256d Q2
= _mm256_set1_pd(2.52448340349684104192E-3);
362 const __m256d Q1
= _mm256_set1_pd(2.27265548208155028766E-1);
364 const __m256d one
= _mm256_set1_pd(1.0);
365 const __m256d two
= _mm256_set1_pd(2.0);
369 __m128i iexppart128a
,iexppart128b
;
375 x
= _mm256_mul_pd(exparg
,argscale
);
377 iexppart128a
= _mm256_cvtpd_epi32(x
);
378 intpart
= _mm256_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
380 /* Add exponent bias */
381 iexppart128a
= _mm_add_epi32(iexppart128a
,expbase
);
383 /* We now want to shift the exponent 52 positions left, but to achieve this we need
384 * to separate the 128-bit register data into two registers (4x64-bit > 128bit)
385 * shift them, and then merge into a single __m256d.
386 * Elements 0/1 should end up in iexppart128a, and 2/3 in iexppart128b.
387 * It doesnt matter what we put in the 2nd/4th position, since that data will be
388 * shifted out and replaced with zeros.
390 iexppart128b
= _mm_shuffle_epi32(iexppart128a
,_MM_SHUFFLE(3,3,2,2));
391 iexppart128a
= _mm_shuffle_epi32(iexppart128a
,_MM_SHUFFLE(1,1,0,0));
393 iexppart128b
= _mm_slli_epi64(iexppart128b
,52);
394 iexppart128a
= _mm_slli_epi64(iexppart128a
,52);
396 iexppart
= _mm256_castsi128_si256(iexppart128a
);
397 iexppart
= _mm256_insertf128_si256(iexppart
,iexppart128b
,0x1);
399 valuemask
= _mm256_cmp_pd(arglimit
,gmx_mm256_abs_pd(x
),_CMP_GE_OQ
);
400 fexppart
= _mm256_and_pd(valuemask
,_mm256_castsi256_pd(iexppart
));
402 z
= _mm256_sub_pd(exparg
,_mm256_mul_pd(invargscale0
,intpart
));
403 z
= _mm256_sub_pd(z
,_mm256_mul_pd(invargscale1
,intpart
));
405 z2
= _mm256_mul_pd(z
,z
);
407 PolyQ
= _mm256_mul_pd(Q3
,z2
);
408 PolyQ
= _mm256_add_pd(PolyQ
,Q2
);
409 PolyP
= _mm256_mul_pd(P2
,z2
);
410 PolyQ
= _mm256_mul_pd(PolyQ
,z2
);
411 PolyP
= _mm256_add_pd(PolyP
,P1
);
412 PolyQ
= _mm256_add_pd(PolyQ
,Q1
);
413 PolyP
= _mm256_mul_pd(PolyP
,z2
);
414 PolyQ
= _mm256_mul_pd(PolyQ
,z2
);
415 PolyP
= _mm256_add_pd(PolyP
,one
);
416 PolyQ
= _mm256_add_pd(PolyQ
,two
);
418 PolyP
= _mm256_mul_pd(PolyP
,z
);
420 z
= _mm256_mul_pd(PolyP
,gmx_mm256_inv_pd(_mm256_sub_pd(PolyQ
,PolyP
)));
421 z
= _mm256_add_pd(one
,_mm256_mul_pd(two
,z
));
423 z
= _mm256_mul_pd(z
,fexppart
);
430 gmx_mm_exp_pd(__m128d exparg
)
432 const __m128d argscale
= _mm_set1_pd(1.4426950408889634073599);
433 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
434 const __m128d arglimit
= _mm_set1_pd(1022.0);
435 const __m128i expbase
= _mm_set1_epi32(1023);
437 const __m128d invargscale0
= _mm_set1_pd(6.93145751953125e-1);
438 const __m128d invargscale1
= _mm_set1_pd(1.42860682030941723212e-6);
440 const __m128d P2
= _mm_set1_pd(1.26177193074810590878e-4);
441 const __m128d P1
= _mm_set1_pd(3.02994407707441961300e-2);
443 const __m128d Q3
= _mm_set1_pd(3.00198505138664455042E-6);
444 const __m128d Q2
= _mm_set1_pd(2.52448340349684104192E-3);
445 const __m128d Q1
= _mm_set1_pd(2.27265548208155028766E-1);
447 const __m128d one
= _mm_set1_pd(1.0);
448 const __m128d two
= _mm_set1_pd(2.0);
457 x
= _mm_mul_pd(exparg
,argscale
);
459 iexppart
= _mm_cvtpd_epi32(x
);
460 intpart
= _mm_round_pd(x
,_MM_FROUND_TO_NEAREST_INT
);
462 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
463 * To be able to shift it into the exponent for a double precision number we first need to
464 * shuffle so that the lower half contains the first element, and the upper half the second.
465 * This should really be done as a zero-extension, but since the next instructions will shift
466 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
467 * (thus we just use element 2 from iexppart).
469 iexppart
= _mm_shuffle_epi32(iexppart
,_MM_SHUFFLE(2,1,2,0));
471 /* Do the shift operation on the 64-bit registers */
472 iexppart
= _mm_add_epi32(iexppart
,expbase
);
473 iexppart
= _mm_slli_epi64(iexppart
,52);
475 valuemask
= _mm_cmpge_pd(arglimit
,gmx_mm_abs_pd(x
));
476 fexppart
= _mm_and_pd(valuemask
,gmx_mm_castsi128_pd(iexppart
));
478 z
= _mm_sub_pd(exparg
,_mm_mul_pd(invargscale0
,intpart
));
479 z
= _mm_sub_pd(z
,_mm_mul_pd(invargscale1
,intpart
));
481 z2
= _mm_mul_pd(z
,z
);
483 PolyQ
= _mm_mul_pd(Q3
,z2
);
484 PolyQ
= _mm_add_pd(PolyQ
,Q2
);
485 PolyP
= _mm_mul_pd(P2
,z2
);
486 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
487 PolyP
= _mm_add_pd(PolyP
,P1
);
488 PolyQ
= _mm_add_pd(PolyQ
,Q1
);
489 PolyP
= _mm_mul_pd(PolyP
,z2
);
490 PolyQ
= _mm_mul_pd(PolyQ
,z2
);
491 PolyP
= _mm_add_pd(PolyP
,one
);
492 PolyQ
= _mm_add_pd(PolyQ
,two
);
494 PolyP
= _mm_mul_pd(PolyP
,z
);
496 z
= _mm_mul_pd(PolyP
,gmx_mm_inv_pd(_mm_sub_pd(PolyQ
,PolyP
)));
497 z
= _mm_add_pd(one
,_mm_mul_pd(two
,z
));
499 z
= _mm_mul_pd(z
,fexppart
);
506 gmx_mm256_log_pd(__m256d x
)
508 /* Same algorithm as cephes library */
509 const __m256d expmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000,
510 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
512 const __m128i expbase_m1
= _mm_set1_epi32(1023-1); /* We want non-IEEE format */
514 const __m256d half
= _mm256_set1_pd(0.5);
515 const __m256d one
= _mm256_set1_pd(1.0);
516 const __m256d two
= _mm256_set1_pd(2.0);
517 const __m256d invsq2
= _mm256_set1_pd(1.0/sqrt(2.0));
519 const __m256d corr1
= _mm256_set1_pd(-2.121944400546905827679e-4);
520 const __m256d corr2
= _mm256_set1_pd(0.693359375);
522 const __m256d P5
= _mm256_set1_pd(1.01875663804580931796e-4);
523 const __m256d P4
= _mm256_set1_pd(4.97494994976747001425e-1);
524 const __m256d P3
= _mm256_set1_pd(4.70579119878881725854e0
);
525 const __m256d P2
= _mm256_set1_pd(1.44989225341610930846e1
);
526 const __m256d P1
= _mm256_set1_pd(1.79368678507819816313e1
);
527 const __m256d P0
= _mm256_set1_pd(7.70838733755885391666e0
);
529 const __m256d Q4
= _mm256_set1_pd(1.12873587189167450590e1
);
530 const __m256d Q3
= _mm256_set1_pd(4.52279145837532221105e1
);
531 const __m256d Q2
= _mm256_set1_pd(8.29875266912776603211e1
);
532 const __m256d Q1
= _mm256_set1_pd(7.11544750618563894466e1
);
533 const __m256d Q0
= _mm256_set1_pd(2.31251620126765340583e1
);
535 const __m256d R2
= _mm256_set1_pd(-7.89580278884799154124e-1);
536 const __m256d R1
= _mm256_set1_pd(1.63866645699558079767e1
);
537 const __m256d R0
= _mm256_set1_pd(-6.41409952958715622951e1
);
539 const __m256d S2
= _mm256_set1_pd(-3.56722798256324312549E1
);
540 const __m256d S1
= _mm256_set1_pd(3.12093766372244180303E2
);
541 const __m256d S0
= _mm256_set1_pd(-7.69691943550460008604E2
);
545 __m128i iexp128a
,iexp128b
;
548 __m256d corr
,t1
,t2
,q
;
549 __m256d zA
,yA
,xA
,zB
,yB
,xB
,z
;
551 __m256d polyP1
,polyP2
,polyQ1
,polyQ2
;
553 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
554 fexp
= _mm256_and_pd(x
,expmask
);
556 iexp
= _mm256_castpd_si256(fexp
);
557 iexp128b
= _mm256_extractf128_si256(iexp
,0x1);
558 iexp128a
= _mm256_castsi256_si128(iexp
);
560 iexp128a
= _mm_srli_epi64(iexp128a
,52);
561 iexp128b
= _mm_srli_epi64(iexp128b
,52);
562 /* Merge into a single register */
563 iexp128a
= _mm_shuffle_epi32(iexp128a
,_MM_SHUFFLE(1,1,2,0));
564 iexp128b
= _mm_shuffle_epi32(iexp128b
,_MM_SHUFFLE(2,0,1,1));
565 iexp128a
= _mm_or_si128(iexp128a
,iexp128b
);
566 iexp128a
= _mm_sub_epi32(iexp128a
,expbase_m1
);
568 fexp
= _mm256_cvtepi32_pd(iexp128a
);
570 x
= _mm256_andnot_pd(expmask
,x
); /* Get mantissa */
571 x
= _mm256_or_pd(x
,one
);
572 x
= _mm256_mul_pd(x
,half
);
574 mask1
= _mm256_cmp_pd(gmx_mm256_abs_pd(fexp
),two
,_CMP_GT_OQ
);
575 mask2
= _mm256_cmp_pd(x
,invsq2
,_CMP_LT_OQ
);
577 fexp
= _mm256_sub_pd(fexp
,_mm256_and_pd(mask2
,one
));
579 /* If mask1 is set ('A') */
580 zA
= _mm256_sub_pd(x
,half
);
581 t1
= _mm256_blendv_pd( zA
,x
,mask2
);
582 zA
= _mm256_sub_pd(t1
,half
);
583 t2
= _mm256_blendv_pd( x
,zA
,mask2
);
584 yA
= _mm256_mul_pd(half
,_mm256_add_pd(t2
,one
));
586 xA
= _mm256_mul_pd(zA
,gmx_mm256_inv_pd(yA
));
587 zA
= _mm256_mul_pd(xA
,xA
);
590 polyR
= _mm256_mul_pd(R2
,zA
);
591 polyR
= _mm256_add_pd(polyR
,R1
);
592 polyR
= _mm256_mul_pd(polyR
,zA
);
593 polyR
= _mm256_add_pd(polyR
,R0
);
595 polyS
= _mm256_add_pd(zA
,S2
);
596 polyS
= _mm256_mul_pd(polyS
,zA
);
597 polyS
= _mm256_add_pd(polyS
,S1
);
598 polyS
= _mm256_mul_pd(polyS
,zA
);
599 polyS
= _mm256_add_pd(polyS
,S0
);
601 q
= _mm256_mul_pd(polyR
,gmx_mm256_inv_pd(polyS
));
602 zA
= _mm256_mul_pd(_mm256_mul_pd(xA
,zA
),q
);
604 zA
= _mm256_add_pd(zA
,_mm256_mul_pd(corr1
,fexp
));
605 zA
= _mm256_add_pd(zA
,xA
);
606 zA
= _mm256_add_pd(zA
,_mm256_mul_pd(corr2
,fexp
));
608 /* If mask1 is not set ('B') */
609 corr
= _mm256_and_pd(mask2
,x
);
610 xB
= _mm256_add_pd(x
,corr
);
611 xB
= _mm256_sub_pd(xB
,one
);
612 zB
= _mm256_mul_pd(xB
,xB
);
614 polyP1
= _mm256_mul_pd(P5
,zB
);
615 polyP2
= _mm256_mul_pd(P4
,zB
);
616 polyP1
= _mm256_add_pd(polyP1
,P3
);
617 polyP2
= _mm256_add_pd(polyP2
,P2
);
618 polyP1
= _mm256_mul_pd(polyP1
,zB
);
619 polyP2
= _mm256_mul_pd(polyP2
,zB
);
620 polyP1
= _mm256_add_pd(polyP1
,P1
);
621 polyP2
= _mm256_add_pd(polyP2
,P0
);
622 polyP1
= _mm256_mul_pd(polyP1
,xB
);
623 polyP1
= _mm256_add_pd(polyP1
,polyP2
);
625 polyQ2
= _mm256_mul_pd(Q4
,zB
);
626 polyQ1
= _mm256_add_pd(zB
,Q3
);
627 polyQ2
= _mm256_add_pd(polyQ2
,Q2
);
628 polyQ1
= _mm256_mul_pd(polyQ1
,zB
);
629 polyQ2
= _mm256_mul_pd(polyQ2
,zB
);
630 polyQ1
= _mm256_add_pd(polyQ1
,Q1
);
631 polyQ2
= _mm256_add_pd(polyQ2
,Q0
);
632 polyQ1
= _mm256_mul_pd(polyQ1
,xB
);
633 polyQ1
= _mm256_add_pd(polyQ1
,polyQ2
);
635 fexp
= _mm256_and_pd(fexp
,_mm256_cmp_pd(fexp
,_mm256_setzero_pd(),_CMP_NEQ_OQ
));
637 q
= _mm256_mul_pd(polyP1
,gmx_mm256_inv_pd(polyQ1
));
638 yB
= _mm256_mul_pd(_mm256_mul_pd(xB
,zB
),q
);
640 yB
= _mm256_add_pd(yB
,_mm256_mul_pd(corr1
,fexp
));
641 yB
= _mm256_sub_pd(yB
,_mm256_mul_pd(half
,zB
));
642 zB
= _mm256_add_pd(xB
,yB
);
643 zB
= _mm256_add_pd(zB
,_mm256_mul_pd(corr2
,fexp
));
645 z
= _mm256_blendv_pd( zB
,zA
,mask1
);
651 gmx_mm_log_pd(__m128d x
)
653 /* Same algorithm as cephes library */
654 const __m128d expmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
656 const __m128i expbase_m1
= _mm_set1_epi32(1023-1); /* We want non-IEEE format */
658 const __m128d half
= _mm_set1_pd(0.5);
659 const __m128d one
= _mm_set1_pd(1.0);
660 const __m128d two
= _mm_set1_pd(2.0);
661 const __m128d invsq2
= _mm_set1_pd(1.0/sqrt(2.0));
663 const __m128d corr1
= _mm_set1_pd(-2.121944400546905827679e-4);
664 const __m128d corr2
= _mm_set1_pd(0.693359375);
666 const __m128d P5
= _mm_set1_pd(1.01875663804580931796e-4);
667 const __m128d P4
= _mm_set1_pd(4.97494994976747001425e-1);
668 const __m128d P3
= _mm_set1_pd(4.70579119878881725854e0
);
669 const __m128d P2
= _mm_set1_pd(1.44989225341610930846e1
);
670 const __m128d P1
= _mm_set1_pd(1.79368678507819816313e1
);
671 const __m128d P0
= _mm_set1_pd(7.70838733755885391666e0
);
673 const __m128d Q4
= _mm_set1_pd(1.12873587189167450590e1
);
674 const __m128d Q3
= _mm_set1_pd(4.52279145837532221105e1
);
675 const __m128d Q2
= _mm_set1_pd(8.29875266912776603211e1
);
676 const __m128d Q1
= _mm_set1_pd(7.11544750618563894466e1
);
677 const __m128d Q0
= _mm_set1_pd(2.31251620126765340583e1
);
679 const __m128d R2
= _mm_set1_pd(-7.89580278884799154124e-1);
680 const __m128d R1
= _mm_set1_pd(1.63866645699558079767e1
);
681 const __m128d R0
= _mm_set1_pd(-6.41409952958715622951e1
);
683 const __m128d S2
= _mm_set1_pd(-3.56722798256324312549E1
);
684 const __m128d S1
= _mm_set1_pd(3.12093766372244180303E2
);
685 const __m128d S0
= _mm_set1_pd(-7.69691943550460008604E2
);
691 __m128d corr
,t1
,t2
,q
;
692 __m128d zA
,yA
,xA
,zB
,yB
,xB
,z
;
694 __m128d polyP1
,polyP2
,polyQ1
,polyQ2
;
696 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
697 fexp
= _mm_and_pd(x
,expmask
);
698 iexp
= gmx_mm_castpd_si128(fexp
);
699 iexp
= _mm_srli_epi64(iexp
,52);
700 iexp
= _mm_sub_epi32(iexp
,expbase_m1
);
701 iexp
= _mm_shuffle_epi32(iexp
, _MM_SHUFFLE(1,1,2,0) );
702 fexp
= _mm_cvtepi32_pd(iexp
);
704 x
= _mm_andnot_pd(expmask
,x
);
705 x
= _mm_or_pd(x
,one
);
706 x
= _mm_mul_pd(x
,half
);
708 mask1
= _mm_cmpgt_pd(gmx_mm_abs_pd(fexp
),two
);
709 mask2
= _mm_cmplt_pd(x
,invsq2
);
711 fexp
= _mm_sub_pd(fexp
,_mm_and_pd(mask2
,one
));
713 /* If mask1 is set ('A') */
714 zA
= _mm_sub_pd(x
,half
);
715 t1
= _mm_blendv_pd( zA
,x
,mask2
);
716 zA
= _mm_sub_pd(t1
,half
);
717 t2
= _mm_blendv_pd( x
,zA
,mask2
);
718 yA
= _mm_mul_pd(half
,_mm_add_pd(t2
,one
));
720 xA
= _mm_mul_pd(zA
,gmx_mm_inv_pd(yA
));
721 zA
= _mm_mul_pd(xA
,xA
);
724 polyR
= _mm_mul_pd(R2
,zA
);
725 polyR
= _mm_add_pd(polyR
,R1
);
726 polyR
= _mm_mul_pd(polyR
,zA
);
727 polyR
= _mm_add_pd(polyR
,R0
);
729 polyS
= _mm_add_pd(zA
,S2
);
730 polyS
= _mm_mul_pd(polyS
,zA
);
731 polyS
= _mm_add_pd(polyS
,S1
);
732 polyS
= _mm_mul_pd(polyS
,zA
);
733 polyS
= _mm_add_pd(polyS
,S0
);
735 q
= _mm_mul_pd(polyR
,gmx_mm_inv_pd(polyS
));
736 zA
= _mm_mul_pd(_mm_mul_pd(xA
,zA
),q
);
738 zA
= _mm_add_pd(zA
,_mm_mul_pd(corr1
,fexp
));
739 zA
= _mm_add_pd(zA
,xA
);
740 zA
= _mm_add_pd(zA
,_mm_mul_pd(corr2
,fexp
));
742 /* If mask1 is not set ('B') */
743 corr
= _mm_and_pd(mask2
,x
);
744 xB
= _mm_add_pd(x
,corr
);
745 xB
= _mm_sub_pd(xB
,one
);
746 zB
= _mm_mul_pd(xB
,xB
);
748 polyP1
= _mm_mul_pd(P5
,zB
);
749 polyP2
= _mm_mul_pd(P4
,zB
);
750 polyP1
= _mm_add_pd(polyP1
,P3
);
751 polyP2
= _mm_add_pd(polyP2
,P2
);
752 polyP1
= _mm_mul_pd(polyP1
,zB
);
753 polyP2
= _mm_mul_pd(polyP2
,zB
);
754 polyP1
= _mm_add_pd(polyP1
,P1
);
755 polyP2
= _mm_add_pd(polyP2
,P0
);
756 polyP1
= _mm_mul_pd(polyP1
,xB
);
757 polyP1
= _mm_add_pd(polyP1
,polyP2
);
759 polyQ2
= _mm_mul_pd(Q4
,zB
);
760 polyQ1
= _mm_add_pd(zB
,Q3
);
761 polyQ2
= _mm_add_pd(polyQ2
,Q2
);
762 polyQ1
= _mm_mul_pd(polyQ1
,zB
);
763 polyQ2
= _mm_mul_pd(polyQ2
,zB
);
764 polyQ1
= _mm_add_pd(polyQ1
,Q1
);
765 polyQ2
= _mm_add_pd(polyQ2
,Q0
);
766 polyQ1
= _mm_mul_pd(polyQ1
,xB
);
767 polyQ1
= _mm_add_pd(polyQ1
,polyQ2
);
769 fexp
= _mm_and_pd(fexp
,_mm_cmpneq_pd(fexp
,_mm_setzero_pd()));
771 q
= _mm_mul_pd(polyP1
,gmx_mm_inv_pd(polyQ1
));
772 yB
= _mm_mul_pd(_mm_mul_pd(xB
,zB
),q
);
774 yB
= _mm_add_pd(yB
,_mm_mul_pd(corr1
,fexp
));
775 yB
= _mm_sub_pd(yB
,_mm_mul_pd(half
,zB
));
776 zB
= _mm_add_pd(xB
,yB
);
777 zB
= _mm_add_pd(zB
,_mm_mul_pd(corr2
,fexp
));
779 z
= _mm_blendv_pd( zB
,zA
,mask1
);
786 gmx_mm256_erf_pd(__m256d x
)
788 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
789 const __m256d CAP4
= _mm256_set1_pd(-0.431780540597889301512e-4);
790 const __m256d CAP3
= _mm256_set1_pd(-0.00578562306260059236059);
791 const __m256d CAP2
= _mm256_set1_pd(-0.028593586920219752446);
792 const __m256d CAP1
= _mm256_set1_pd(-0.315924962948621698209);
793 const __m256d CAP0
= _mm256_set1_pd(0.14952975608477029151);
795 const __m256d CAQ5
= _mm256_set1_pd(-0.374089300177174709737e-5);
796 const __m256d CAQ4
= _mm256_set1_pd(0.00015126584532155383535);
797 const __m256d CAQ3
= _mm256_set1_pd(0.00536692680669480725423);
798 const __m256d CAQ2
= _mm256_set1_pd(0.0668686825594046122636);
799 const __m256d CAQ1
= _mm256_set1_pd(0.402604990869284362773);
801 const __m256d CAoffset
= _mm256_set1_pd(0.9788494110107421875);
803 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
804 const __m256d CBP6
= _mm256_set1_pd(2.49650423685462752497647637088e-10);
805 const __m256d CBP5
= _mm256_set1_pd(0.00119770193298159629350136085658);
806 const __m256d CBP4
= _mm256_set1_pd(0.0164944422378370965881008942733);
807 const __m256d CBP3
= _mm256_set1_pd(0.0984581468691775932063932439252);
808 const __m256d CBP2
= _mm256_set1_pd(0.317364595806937763843589437418);
809 const __m256d CBP1
= _mm256_set1_pd(0.554167062641455850932670067075);
810 const __m256d CBP0
= _mm256_set1_pd(0.427583576155807163756925301060);
811 const __m256d CBQ7
= _mm256_set1_pd(0.00212288829699830145976198384930);
812 const __m256d CBQ6
= _mm256_set1_pd(0.0334810979522685300554606393425);
813 const __m256d CBQ5
= _mm256_set1_pd(0.2361713785181450957579508850717);
814 const __m256d CBQ4
= _mm256_set1_pd(0.955364736493055670530981883072);
815 const __m256d CBQ3
= _mm256_set1_pd(2.36815675631420037315349279199);
816 const __m256d CBQ2
= _mm256_set1_pd(3.55261649184083035537184223542);
817 const __m256d CBQ1
= _mm256_set1_pd(2.93501136050160872574376997993);
820 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
821 const __m256d CCP6
= _mm256_set1_pd(-2.8175401114513378771);
822 const __m256d CCP5
= _mm256_set1_pd(-3.22729451764143718517);
823 const __m256d CCP4
= _mm256_set1_pd(-2.5518551727311523996);
824 const __m256d CCP3
= _mm256_set1_pd(-0.687717681153649930619);
825 const __m256d CCP2
= _mm256_set1_pd(-0.212652252872804219852);
826 const __m256d CCP1
= _mm256_set1_pd(0.0175389834052493308818);
827 const __m256d CCP0
= _mm256_set1_pd(0.00628057170626964891937);
829 const __m256d CCQ6
= _mm256_set1_pd(5.48409182238641741584);
830 const __m256d CCQ5
= _mm256_set1_pd(13.5064170191802889145);
831 const __m256d CCQ4
= _mm256_set1_pd(22.9367376522880577224);
832 const __m256d CCQ3
= _mm256_set1_pd(15.930646027911794143);
833 const __m256d CCQ2
= _mm256_set1_pd(11.0567237927800161565);
834 const __m256d CCQ1
= _mm256_set1_pd(2.79257750980575282228);
836 const __m256d CCoffset
= _mm256_set1_pd(0.5579090118408203125);
838 const __m256d one
= _mm256_set1_pd(1.0);
839 const __m256d two
= _mm256_set1_pd(2.0);
841 const __m256d signbit
= _mm256_castsi256_pd( _mm256_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000,
842 0x80000000,0x00000000,0x80000000,0x00000000) );
844 __m256d xabs
,x2
,x4
,t
,t2
,w
,w2
;
845 __m256d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
846 __m256d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
847 __m256d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
848 __m256d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
851 /* Calculate erf() */
852 xabs
= gmx_mm256_abs_pd(x
);
853 x2
= _mm256_mul_pd(x
,x
);
854 x4
= _mm256_mul_pd(x2
,x2
);
856 PolyAP0
= _mm256_mul_pd(CAP4
,x4
);
857 PolyAP1
= _mm256_mul_pd(CAP3
,x4
);
858 PolyAP0
= _mm256_add_pd(PolyAP0
,CAP2
);
859 PolyAP1
= _mm256_add_pd(PolyAP1
,CAP1
);
860 PolyAP0
= _mm256_mul_pd(PolyAP0
,x4
);
861 PolyAP1
= _mm256_mul_pd(PolyAP1
,x2
);
862 PolyAP0
= _mm256_add_pd(PolyAP0
,CAP0
);
863 PolyAP0
= _mm256_add_pd(PolyAP0
,PolyAP1
);
865 PolyAQ1
= _mm256_mul_pd(CAQ5
,x4
);
866 PolyAQ0
= _mm256_mul_pd(CAQ4
,x4
);
867 PolyAQ1
= _mm256_add_pd(PolyAQ1
,CAQ3
);
868 PolyAQ0
= _mm256_add_pd(PolyAQ0
,CAQ2
);
869 PolyAQ1
= _mm256_mul_pd(PolyAQ1
,x4
);
870 PolyAQ0
= _mm256_mul_pd(PolyAQ0
,x4
);
871 PolyAQ1
= _mm256_add_pd(PolyAQ1
,CAQ1
);
872 PolyAQ0
= _mm256_add_pd(PolyAQ0
,one
);
873 PolyAQ1
= _mm256_mul_pd(PolyAQ1
,x2
);
874 PolyAQ0
= _mm256_add_pd(PolyAQ0
,PolyAQ1
);
876 res_erf
= _mm256_mul_pd(PolyAP0
,gmx_mm256_inv_pd(PolyAQ0
));
877 res_erf
= _mm256_add_pd(CAoffset
,res_erf
);
878 res_erf
= _mm256_mul_pd(x
,res_erf
);
880 /* Calculate erfc() in range [1,4.5] */
881 t
= _mm256_sub_pd(xabs
,one
);
882 t2
= _mm256_mul_pd(t
,t
);
884 PolyBP0
= _mm256_mul_pd(CBP6
,t2
);
885 PolyBP1
= _mm256_mul_pd(CBP5
,t2
);
886 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP4
);
887 PolyBP1
= _mm256_add_pd(PolyBP1
,CBP3
);
888 PolyBP0
= _mm256_mul_pd(PolyBP0
,t2
);
889 PolyBP1
= _mm256_mul_pd(PolyBP1
,t2
);
890 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP2
);
891 PolyBP1
= _mm256_add_pd(PolyBP1
,CBP1
);
892 PolyBP0
= _mm256_mul_pd(PolyBP0
,t2
);
893 PolyBP1
= _mm256_mul_pd(PolyBP1
,t
);
894 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP0
);
895 PolyBP0
= _mm256_add_pd(PolyBP0
,PolyBP1
);
897 PolyBQ1
= _mm256_mul_pd(CBQ7
,t2
);
898 PolyBQ0
= _mm256_mul_pd(CBQ6
,t2
);
899 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ5
);
900 PolyBQ0
= _mm256_add_pd(PolyBQ0
,CBQ4
);
901 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t2
);
902 PolyBQ0
= _mm256_mul_pd(PolyBQ0
,t2
);
903 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ3
);
904 PolyBQ0
= _mm256_add_pd(PolyBQ0
,CBQ2
);
905 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t2
);
906 PolyBQ0
= _mm256_mul_pd(PolyBQ0
,t2
);
907 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ1
);
908 PolyBQ0
= _mm256_add_pd(PolyBQ0
,one
);
909 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t
);
910 PolyBQ0
= _mm256_add_pd(PolyBQ0
,PolyBQ1
);
912 res_erfcB
= _mm256_mul_pd(PolyBP0
,gmx_mm256_inv_pd(PolyBQ0
));
914 res_erfcB
= _mm256_mul_pd(res_erfcB
,xabs
);
916 /* Calculate erfc() in range [4.5,inf] */
917 w
= gmx_mm256_inv_pd(xabs
);
918 w2
= _mm256_mul_pd(w
,w
);
920 PolyCP0
= _mm256_mul_pd(CCP6
,w2
);
921 PolyCP1
= _mm256_mul_pd(CCP5
,w2
);
922 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP4
);
923 PolyCP1
= _mm256_add_pd(PolyCP1
,CCP3
);
924 PolyCP0
= _mm256_mul_pd(PolyCP0
,w2
);
925 PolyCP1
= _mm256_mul_pd(PolyCP1
,w2
);
926 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP2
);
927 PolyCP1
= _mm256_add_pd(PolyCP1
,CCP1
);
928 PolyCP0
= _mm256_mul_pd(PolyCP0
,w2
);
929 PolyCP1
= _mm256_mul_pd(PolyCP1
,w
);
930 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP0
);
931 PolyCP0
= _mm256_add_pd(PolyCP0
,PolyCP1
);
933 PolyCQ0
= _mm256_mul_pd(CCQ6
,w2
);
934 PolyCQ1
= _mm256_mul_pd(CCQ5
,w2
);
935 PolyCQ0
= _mm256_add_pd(PolyCQ0
,CCQ4
);
936 PolyCQ1
= _mm256_add_pd(PolyCQ1
,CCQ3
);
937 PolyCQ0
= _mm256_mul_pd(PolyCQ0
,w2
);
938 PolyCQ1
= _mm256_mul_pd(PolyCQ1
,w2
);
939 PolyCQ0
= _mm256_add_pd(PolyCQ0
,CCQ2
);
940 PolyCQ1
= _mm256_add_pd(PolyCQ1
,CCQ1
);
941 PolyCQ0
= _mm256_mul_pd(PolyCQ0
,w2
);
942 PolyCQ1
= _mm256_mul_pd(PolyCQ1
,w
);
943 PolyCQ0
= _mm256_add_pd(PolyCQ0
,one
);
944 PolyCQ0
= _mm256_add_pd(PolyCQ0
,PolyCQ1
);
946 expmx2
= gmx_mm256_exp_pd( _mm256_or_pd(signbit
, x2
) );
948 res_erfcC
= _mm256_mul_pd(PolyCP0
,gmx_mm256_inv_pd(PolyCQ0
));
949 res_erfcC
= _mm256_add_pd(res_erfcC
,CCoffset
);
950 res_erfcC
= _mm256_mul_pd(res_erfcC
,w
);
952 mask
= _mm256_cmp_pd(xabs
,_mm256_set1_pd(4.5),_CMP_GT_OQ
);
953 res_erfc
= _mm256_blendv_pd(res_erfcB
,res_erfcC
,mask
);
955 res_erfc
= _mm256_mul_pd(res_erfc
,expmx2
);
957 /* erfc(x<0) = 2-erfc(|x|) */
958 mask
= _mm256_cmp_pd(x
,_mm256_setzero_pd(),_CMP_LT_OQ
);
959 res_erfc
= _mm256_blendv_pd(res_erfc
,_mm256_sub_pd(two
,res_erfc
),mask
);
961 /* Select erf() or erfc() */
962 mask
= _mm256_cmp_pd(xabs
,one
,_CMP_LT_OQ
);
963 res
= _mm256_blendv_pd(_mm256_sub_pd(one
,res_erfc
),res_erf
,mask
);
969 gmx_mm_erf_pd(__m128d x
)
971 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
972 const __m128d CAP4
= _mm_set1_pd(-0.431780540597889301512e-4);
973 const __m128d CAP3
= _mm_set1_pd(-0.00578562306260059236059);
974 const __m128d CAP2
= _mm_set1_pd(-0.028593586920219752446);
975 const __m128d CAP1
= _mm_set1_pd(-0.315924962948621698209);
976 const __m128d CAP0
= _mm_set1_pd(0.14952975608477029151);
978 const __m128d CAQ5
= _mm_set1_pd(-0.374089300177174709737e-5);
979 const __m128d CAQ4
= _mm_set1_pd(0.00015126584532155383535);
980 const __m128d CAQ3
= _mm_set1_pd(0.00536692680669480725423);
981 const __m128d CAQ2
= _mm_set1_pd(0.0668686825594046122636);
982 const __m128d CAQ1
= _mm_set1_pd(0.402604990869284362773);
984 const __m128d CAoffset
= _mm_set1_pd(0.9788494110107421875);
986 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
987 const __m128d CBP6
= _mm_set1_pd(2.49650423685462752497647637088e-10);
988 const __m128d CBP5
= _mm_set1_pd(0.00119770193298159629350136085658);
989 const __m128d CBP4
= _mm_set1_pd(0.0164944422378370965881008942733);
990 const __m128d CBP3
= _mm_set1_pd(0.0984581468691775932063932439252);
991 const __m128d CBP2
= _mm_set1_pd(0.317364595806937763843589437418);
992 const __m128d CBP1
= _mm_set1_pd(0.554167062641455850932670067075);
993 const __m128d CBP0
= _mm_set1_pd(0.427583576155807163756925301060);
994 const __m128d CBQ7
= _mm_set1_pd(0.00212288829699830145976198384930);
995 const __m128d CBQ6
= _mm_set1_pd(0.0334810979522685300554606393425);
996 const __m128d CBQ5
= _mm_set1_pd(0.2361713785181450957579508850717);
997 const __m128d CBQ4
= _mm_set1_pd(0.955364736493055670530981883072);
998 const __m128d CBQ3
= _mm_set1_pd(2.36815675631420037315349279199);
999 const __m128d CBQ2
= _mm_set1_pd(3.55261649184083035537184223542);
1000 const __m128d CBQ1
= _mm_set1_pd(2.93501136050160872574376997993);
1003 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1004 const __m128d CCP6
= _mm_set1_pd(-2.8175401114513378771);
1005 const __m128d CCP5
= _mm_set1_pd(-3.22729451764143718517);
1006 const __m128d CCP4
= _mm_set1_pd(-2.5518551727311523996);
1007 const __m128d CCP3
= _mm_set1_pd(-0.687717681153649930619);
1008 const __m128d CCP2
= _mm_set1_pd(-0.212652252872804219852);
1009 const __m128d CCP1
= _mm_set1_pd(0.0175389834052493308818);
1010 const __m128d CCP0
= _mm_set1_pd(0.00628057170626964891937);
1012 const __m128d CCQ6
= _mm_set1_pd(5.48409182238641741584);
1013 const __m128d CCQ5
= _mm_set1_pd(13.5064170191802889145);
1014 const __m128d CCQ4
= _mm_set1_pd(22.9367376522880577224);
1015 const __m128d CCQ3
= _mm_set1_pd(15.930646027911794143);
1016 const __m128d CCQ2
= _mm_set1_pd(11.0567237927800161565);
1017 const __m128d CCQ1
= _mm_set1_pd(2.79257750980575282228);
1019 const __m128d CCoffset
= _mm_set1_pd(0.5579090118408203125);
1021 const __m128d one
= _mm_set1_pd(1.0);
1022 const __m128d two
= _mm_set1_pd(2.0);
1024 const __m128d signbit
= gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
1026 __m128d xabs
,x2
,x4
,t
,t2
,w
,w2
;
1027 __m128d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
1028 __m128d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
1029 __m128d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
1030 __m128d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
1031 __m128d mask
,expmx2
;
1033 /* Calculate erf() */
1034 xabs
= gmx_mm_abs_pd(x
);
1035 x2
= _mm_mul_pd(x
,x
);
1036 x4
= _mm_mul_pd(x2
,x2
);
1038 PolyAP0
= _mm_mul_pd(CAP4
,x4
);
1039 PolyAP1
= _mm_mul_pd(CAP3
,x4
);
1040 PolyAP0
= _mm_add_pd(PolyAP0
,CAP2
);
1041 PolyAP1
= _mm_add_pd(PolyAP1
,CAP1
);
1042 PolyAP0
= _mm_mul_pd(PolyAP0
,x4
);
1043 PolyAP1
= _mm_mul_pd(PolyAP1
,x2
);
1044 PolyAP0
= _mm_add_pd(PolyAP0
,CAP0
);
1045 PolyAP0
= _mm_add_pd(PolyAP0
,PolyAP1
);
1047 PolyAQ1
= _mm_mul_pd(CAQ5
,x4
);
1048 PolyAQ0
= _mm_mul_pd(CAQ4
,x4
);
1049 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ3
);
1050 PolyAQ0
= _mm_add_pd(PolyAQ0
,CAQ2
);
1051 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x4
);
1052 PolyAQ0
= _mm_mul_pd(PolyAQ0
,x4
);
1053 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ1
);
1054 PolyAQ0
= _mm_add_pd(PolyAQ0
,one
);
1055 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x2
);
1056 PolyAQ0
= _mm_add_pd(PolyAQ0
,PolyAQ1
);
1058 res_erf
= _mm_mul_pd(PolyAP0
,gmx_mm_inv_pd(PolyAQ0
));
1059 res_erf
= _mm_add_pd(CAoffset
,res_erf
);
1060 res_erf
= _mm_mul_pd(x
,res_erf
);
1062 /* Calculate erfc() in range [1,4.5] */
1063 t
= _mm_sub_pd(xabs
,one
);
1064 t2
= _mm_mul_pd(t
,t
);
1066 PolyBP0
= _mm_mul_pd(CBP6
,t2
);
1067 PolyBP1
= _mm_mul_pd(CBP5
,t2
);
1068 PolyBP0
= _mm_add_pd(PolyBP0
,CBP4
);
1069 PolyBP1
= _mm_add_pd(PolyBP1
,CBP3
);
1070 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
1071 PolyBP1
= _mm_mul_pd(PolyBP1
,t2
);
1072 PolyBP0
= _mm_add_pd(PolyBP0
,CBP2
);
1073 PolyBP1
= _mm_add_pd(PolyBP1
,CBP1
);
1074 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
1075 PolyBP1
= _mm_mul_pd(PolyBP1
,t
);
1076 PolyBP0
= _mm_add_pd(PolyBP0
,CBP0
);
1077 PolyBP0
= _mm_add_pd(PolyBP0
,PolyBP1
);
1079 PolyBQ1
= _mm_mul_pd(CBQ7
,t2
);
1080 PolyBQ0
= _mm_mul_pd(CBQ6
,t2
);
1081 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ5
);
1082 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ4
);
1083 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
1084 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
1085 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ3
);
1086 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ2
);
1087 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
1088 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
1089 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ1
);
1090 PolyBQ0
= _mm_add_pd(PolyBQ0
,one
);
1091 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t
);
1092 PolyBQ0
= _mm_add_pd(PolyBQ0
,PolyBQ1
);
1094 res_erfcB
= _mm_mul_pd(PolyBP0
,gmx_mm_inv_pd(PolyBQ0
));
1096 res_erfcB
= _mm_mul_pd(res_erfcB
,xabs
);
1098 /* Calculate erfc() in range [4.5,inf] */
1099 w
= gmx_mm_inv_pd(xabs
);
1100 w2
= _mm_mul_pd(w
,w
);
1102 PolyCP0
= _mm_mul_pd(CCP6
,w2
);
1103 PolyCP1
= _mm_mul_pd(CCP5
,w2
);
1104 PolyCP0
= _mm_add_pd(PolyCP0
,CCP4
);
1105 PolyCP1
= _mm_add_pd(PolyCP1
,CCP3
);
1106 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
1107 PolyCP1
= _mm_mul_pd(PolyCP1
,w2
);
1108 PolyCP0
= _mm_add_pd(PolyCP0
,CCP2
);
1109 PolyCP1
= _mm_add_pd(PolyCP1
,CCP1
);
1110 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
1111 PolyCP1
= _mm_mul_pd(PolyCP1
,w
);
1112 PolyCP0
= _mm_add_pd(PolyCP0
,CCP0
);
1113 PolyCP0
= _mm_add_pd(PolyCP0
,PolyCP1
);
1115 PolyCQ0
= _mm_mul_pd(CCQ6
,w2
);
1116 PolyCQ1
= _mm_mul_pd(CCQ5
,w2
);
1117 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ4
);
1118 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ3
);
1119 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
1120 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w2
);
1121 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ2
);
1122 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ1
);
1123 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
1124 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w
);
1125 PolyCQ0
= _mm_add_pd(PolyCQ0
,one
);
1126 PolyCQ0
= _mm_add_pd(PolyCQ0
,PolyCQ1
);
1128 expmx2
= gmx_mm_exp_pd( _mm_or_pd(signbit
, x2
) );
1130 res_erfcC
= _mm_mul_pd(PolyCP0
,gmx_mm_inv_pd(PolyCQ0
));
1131 res_erfcC
= _mm_add_pd(res_erfcC
,CCoffset
);
1132 res_erfcC
= _mm_mul_pd(res_erfcC
,w
);
1134 mask
= _mm_cmpgt_pd(xabs
,_mm_set1_pd(4.5));
1135 res_erfc
= _mm_blendv_pd(res_erfcB
,res_erfcC
,mask
);
1137 res_erfc
= _mm_mul_pd(res_erfc
,expmx2
);
1139 /* erfc(x<0) = 2-erfc(|x|) */
1140 mask
= _mm_cmplt_pd(x
,_mm_setzero_pd());
1141 res_erfc
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(two
,res_erfc
),mask
);
1143 /* Select erf() or erfc() */
1144 mask
= _mm_cmplt_pd(xabs
,one
);
1145 res
= _mm_blendv_pd(_mm_sub_pd(one
,res_erfc
),res_erf
,mask
);
1152 gmx_mm256_erfc_pd(__m256d x
)
1154 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1155 const __m256d CAP4
= _mm256_set1_pd(-0.431780540597889301512e-4);
1156 const __m256d CAP3
= _mm256_set1_pd(-0.00578562306260059236059);
1157 const __m256d CAP2
= _mm256_set1_pd(-0.028593586920219752446);
1158 const __m256d CAP1
= _mm256_set1_pd(-0.315924962948621698209);
1159 const __m256d CAP0
= _mm256_set1_pd(0.14952975608477029151);
1161 const __m256d CAQ5
= _mm256_set1_pd(-0.374089300177174709737e-5);
1162 const __m256d CAQ4
= _mm256_set1_pd(0.00015126584532155383535);
1163 const __m256d CAQ3
= _mm256_set1_pd(0.00536692680669480725423);
1164 const __m256d CAQ2
= _mm256_set1_pd(0.0668686825594046122636);
1165 const __m256d CAQ1
= _mm256_set1_pd(0.402604990869284362773);
1167 const __m256d CAoffset
= _mm256_set1_pd(0.9788494110107421875);
1169 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1170 const __m256d CBP6
= _mm256_set1_pd(2.49650423685462752497647637088e-10);
1171 const __m256d CBP5
= _mm256_set1_pd(0.00119770193298159629350136085658);
1172 const __m256d CBP4
= _mm256_set1_pd(0.0164944422378370965881008942733);
1173 const __m256d CBP3
= _mm256_set1_pd(0.0984581468691775932063932439252);
1174 const __m256d CBP2
= _mm256_set1_pd(0.317364595806937763843589437418);
1175 const __m256d CBP1
= _mm256_set1_pd(0.554167062641455850932670067075);
1176 const __m256d CBP0
= _mm256_set1_pd(0.427583576155807163756925301060);
1177 const __m256d CBQ7
= _mm256_set1_pd(0.00212288829699830145976198384930);
1178 const __m256d CBQ6
= _mm256_set1_pd(0.0334810979522685300554606393425);
1179 const __m256d CBQ5
= _mm256_set1_pd(0.2361713785181450957579508850717);
1180 const __m256d CBQ4
= _mm256_set1_pd(0.955364736493055670530981883072);
1181 const __m256d CBQ3
= _mm256_set1_pd(2.36815675631420037315349279199);
1182 const __m256d CBQ2
= _mm256_set1_pd(3.55261649184083035537184223542);
1183 const __m256d CBQ1
= _mm256_set1_pd(2.93501136050160872574376997993);
1186 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1187 const __m256d CCP6
= _mm256_set1_pd(-2.8175401114513378771);
1188 const __m256d CCP5
= _mm256_set1_pd(-3.22729451764143718517);
1189 const __m256d CCP4
= _mm256_set1_pd(-2.5518551727311523996);
1190 const __m256d CCP3
= _mm256_set1_pd(-0.687717681153649930619);
1191 const __m256d CCP2
= _mm256_set1_pd(-0.212652252872804219852);
1192 const __m256d CCP1
= _mm256_set1_pd(0.0175389834052493308818);
1193 const __m256d CCP0
= _mm256_set1_pd(0.00628057170626964891937);
1195 const __m256d CCQ6
= _mm256_set1_pd(5.48409182238641741584);
1196 const __m256d CCQ5
= _mm256_set1_pd(13.5064170191802889145);
1197 const __m256d CCQ4
= _mm256_set1_pd(22.9367376522880577224);
1198 const __m256d CCQ3
= _mm256_set1_pd(15.930646027911794143);
1199 const __m256d CCQ2
= _mm256_set1_pd(11.0567237927800161565);
1200 const __m256d CCQ1
= _mm256_set1_pd(2.79257750980575282228);
1202 const __m256d CCoffset
= _mm256_set1_pd(0.5579090118408203125);
1204 const __m256d one
= _mm256_set1_pd(1.0);
1205 const __m256d two
= _mm256_set1_pd(2.0);
1207 const __m256d signbit
= _mm256_castsi256_pd( _mm256_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000,
1208 0x80000000,0x00000000,0x80000000,0x00000000) );
1210 __m256d xabs
,x2
,x4
,t
,t2
,w
,w2
;
1211 __m256d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
1212 __m256d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
1213 __m256d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
1214 __m256d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
1215 __m256d mask
,expmx2
;
1217 /* Calculate erf() */
1218 xabs
= gmx_mm256_abs_pd(x
);
1219 x2
= _mm256_mul_pd(x
,x
);
1220 x4
= _mm256_mul_pd(x2
,x2
);
1222 PolyAP0
= _mm256_mul_pd(CAP4
,x4
);
1223 PolyAP1
= _mm256_mul_pd(CAP3
,x4
);
1224 PolyAP0
= _mm256_add_pd(PolyAP0
,CAP2
);
1225 PolyAP1
= _mm256_add_pd(PolyAP1
,CAP1
);
1226 PolyAP0
= _mm256_mul_pd(PolyAP0
,x4
);
1227 PolyAP1
= _mm256_mul_pd(PolyAP1
,x2
);
1228 PolyAP0
= _mm256_add_pd(PolyAP0
,CAP0
);
1229 PolyAP0
= _mm256_add_pd(PolyAP0
,PolyAP1
);
1231 PolyAQ1
= _mm256_mul_pd(CAQ5
,x4
);
1232 PolyAQ0
= _mm256_mul_pd(CAQ4
,x4
);
1233 PolyAQ1
= _mm256_add_pd(PolyAQ1
,CAQ3
);
1234 PolyAQ0
= _mm256_add_pd(PolyAQ0
,CAQ2
);
1235 PolyAQ1
= _mm256_mul_pd(PolyAQ1
,x4
);
1236 PolyAQ0
= _mm256_mul_pd(PolyAQ0
,x4
);
1237 PolyAQ1
= _mm256_add_pd(PolyAQ1
,CAQ1
);
1238 PolyAQ0
= _mm256_add_pd(PolyAQ0
,one
);
1239 PolyAQ1
= _mm256_mul_pd(PolyAQ1
,x2
);
1240 PolyAQ0
= _mm256_add_pd(PolyAQ0
,PolyAQ1
);
1242 res_erf
= _mm256_mul_pd(PolyAP0
,gmx_mm256_inv_pd(PolyAQ0
));
1243 res_erf
= _mm256_add_pd(CAoffset
,res_erf
);
1244 res_erf
= _mm256_mul_pd(x
,res_erf
);
1246 /* Calculate erfc() in range [1,4.5] */
1247 t
= _mm256_sub_pd(xabs
,one
);
1248 t2
= _mm256_mul_pd(t
,t
);
1250 PolyBP0
= _mm256_mul_pd(CBP6
,t2
);
1251 PolyBP1
= _mm256_mul_pd(CBP5
,t2
);
1252 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP4
);
1253 PolyBP1
= _mm256_add_pd(PolyBP1
,CBP3
);
1254 PolyBP0
= _mm256_mul_pd(PolyBP0
,t2
);
1255 PolyBP1
= _mm256_mul_pd(PolyBP1
,t2
);
1256 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP2
);
1257 PolyBP1
= _mm256_add_pd(PolyBP1
,CBP1
);
1258 PolyBP0
= _mm256_mul_pd(PolyBP0
,t2
);
1259 PolyBP1
= _mm256_mul_pd(PolyBP1
,t
);
1260 PolyBP0
= _mm256_add_pd(PolyBP0
,CBP0
);
1261 PolyBP0
= _mm256_add_pd(PolyBP0
,PolyBP1
);
1263 PolyBQ1
= _mm256_mul_pd(CBQ7
,t2
);
1264 PolyBQ0
= _mm256_mul_pd(CBQ6
,t2
);
1265 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ5
);
1266 PolyBQ0
= _mm256_add_pd(PolyBQ0
,CBQ4
);
1267 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t2
);
1268 PolyBQ0
= _mm256_mul_pd(PolyBQ0
,t2
);
1269 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ3
);
1270 PolyBQ0
= _mm256_add_pd(PolyBQ0
,CBQ2
);
1271 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t2
);
1272 PolyBQ0
= _mm256_mul_pd(PolyBQ0
,t2
);
1273 PolyBQ1
= _mm256_add_pd(PolyBQ1
,CBQ1
);
1274 PolyBQ0
= _mm256_add_pd(PolyBQ0
,one
);
1275 PolyBQ1
= _mm256_mul_pd(PolyBQ1
,t
);
1276 PolyBQ0
= _mm256_add_pd(PolyBQ0
,PolyBQ1
);
1278 res_erfcB
= _mm256_mul_pd(PolyBP0
,gmx_mm256_inv_pd(PolyBQ0
));
1280 res_erfcB
= _mm256_mul_pd(res_erfcB
,xabs
);
1282 /* Calculate erfc() in range [4.5,inf] */
1283 w
= gmx_mm256_inv_pd(xabs
);
1284 w2
= _mm256_mul_pd(w
,w
);
1286 PolyCP0
= _mm256_mul_pd(CCP6
,w2
);
1287 PolyCP1
= _mm256_mul_pd(CCP5
,w2
);
1288 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP4
);
1289 PolyCP1
= _mm256_add_pd(PolyCP1
,CCP3
);
1290 PolyCP0
= _mm256_mul_pd(PolyCP0
,w2
);
1291 PolyCP1
= _mm256_mul_pd(PolyCP1
,w2
);
1292 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP2
);
1293 PolyCP1
= _mm256_add_pd(PolyCP1
,CCP1
);
1294 PolyCP0
= _mm256_mul_pd(PolyCP0
,w2
);
1295 PolyCP1
= _mm256_mul_pd(PolyCP1
,w
);
1296 PolyCP0
= _mm256_add_pd(PolyCP0
,CCP0
);
1297 PolyCP0
= _mm256_add_pd(PolyCP0
,PolyCP1
);
1299 PolyCQ0
= _mm256_mul_pd(CCQ6
,w2
);
1300 PolyCQ1
= _mm256_mul_pd(CCQ5
,w2
);
1301 PolyCQ0
= _mm256_add_pd(PolyCQ0
,CCQ4
);
1302 PolyCQ1
= _mm256_add_pd(PolyCQ1
,CCQ3
);
1303 PolyCQ0
= _mm256_mul_pd(PolyCQ0
,w2
);
1304 PolyCQ1
= _mm256_mul_pd(PolyCQ1
,w2
);
1305 PolyCQ0
= _mm256_add_pd(PolyCQ0
,CCQ2
);
1306 PolyCQ1
= _mm256_add_pd(PolyCQ1
,CCQ1
);
1307 PolyCQ0
= _mm256_mul_pd(PolyCQ0
,w2
);
1308 PolyCQ1
= _mm256_mul_pd(PolyCQ1
,w
);
1309 PolyCQ0
= _mm256_add_pd(PolyCQ0
,one
);
1310 PolyCQ0
= _mm256_add_pd(PolyCQ0
,PolyCQ1
);
1312 expmx2
= gmx_mm256_exp_pd( _mm256_or_pd(signbit
, x2
) );
1314 res_erfcC
= _mm256_mul_pd(PolyCP0
,gmx_mm256_inv_pd(PolyCQ0
));
1315 res_erfcC
= _mm256_add_pd(res_erfcC
,CCoffset
);
1316 res_erfcC
= _mm256_mul_pd(res_erfcC
,w
);
1318 mask
= _mm256_cmp_pd(xabs
,_mm256_set1_pd(4.5),_CMP_GT_OQ
);
1319 res_erfc
= _mm256_blendv_pd(res_erfcB
,res_erfcC
,mask
);
1321 res_erfc
= _mm256_mul_pd(res_erfc
,expmx2
);
1323 /* erfc(x<0) = 2-erfc(|x|) */
1324 mask
= _mm256_cmp_pd(x
,_mm256_setzero_pd(),_CMP_LT_OQ
);
1325 res_erfc
= _mm256_blendv_pd(res_erfc
,_mm256_sub_pd(two
,res_erfc
),mask
);
1327 /* Select erf() or erfc() */
1328 mask
= _mm256_cmp_pd(xabs
,one
,_CMP_LT_OQ
);
1329 res
= _mm256_blendv_pd(res_erfc
,_mm256_sub_pd(one
,res_erf
),mask
);
1336 gmx_mm_erfc_pd(__m128d x
)
1338 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1339 const __m128d CAP4
= _mm_set1_pd(-0.431780540597889301512e-4);
1340 const __m128d CAP3
= _mm_set1_pd(-0.00578562306260059236059);
1341 const __m128d CAP2
= _mm_set1_pd(-0.028593586920219752446);
1342 const __m128d CAP1
= _mm_set1_pd(-0.315924962948621698209);
1343 const __m128d CAP0
= _mm_set1_pd(0.14952975608477029151);
1345 const __m128d CAQ5
= _mm_set1_pd(-0.374089300177174709737e-5);
1346 const __m128d CAQ4
= _mm_set1_pd(0.00015126584532155383535);
1347 const __m128d CAQ3
= _mm_set1_pd(0.00536692680669480725423);
1348 const __m128d CAQ2
= _mm_set1_pd(0.0668686825594046122636);
1349 const __m128d CAQ1
= _mm_set1_pd(0.402604990869284362773);
1351 const __m128d CAoffset
= _mm_set1_pd(0.9788494110107421875);
1353 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1354 const __m128d CBP6
= _mm_set1_pd(2.49650423685462752497647637088e-10);
1355 const __m128d CBP5
= _mm_set1_pd(0.00119770193298159629350136085658);
1356 const __m128d CBP4
= _mm_set1_pd(0.0164944422378370965881008942733);
1357 const __m128d CBP3
= _mm_set1_pd(0.0984581468691775932063932439252);
1358 const __m128d CBP2
= _mm_set1_pd(0.317364595806937763843589437418);
1359 const __m128d CBP1
= _mm_set1_pd(0.554167062641455850932670067075);
1360 const __m128d CBP0
= _mm_set1_pd(0.427583576155807163756925301060);
1361 const __m128d CBQ7
= _mm_set1_pd(0.00212288829699830145976198384930);
1362 const __m128d CBQ6
= _mm_set1_pd(0.0334810979522685300554606393425);
1363 const __m128d CBQ5
= _mm_set1_pd(0.2361713785181450957579508850717);
1364 const __m128d CBQ4
= _mm_set1_pd(0.955364736493055670530981883072);
1365 const __m128d CBQ3
= _mm_set1_pd(2.36815675631420037315349279199);
1366 const __m128d CBQ2
= _mm_set1_pd(3.55261649184083035537184223542);
1367 const __m128d CBQ1
= _mm_set1_pd(2.93501136050160872574376997993);
1370 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1371 const __m128d CCP6
= _mm_set1_pd(-2.8175401114513378771);
1372 const __m128d CCP5
= _mm_set1_pd(-3.22729451764143718517);
1373 const __m128d CCP4
= _mm_set1_pd(-2.5518551727311523996);
1374 const __m128d CCP3
= _mm_set1_pd(-0.687717681153649930619);
1375 const __m128d CCP2
= _mm_set1_pd(-0.212652252872804219852);
1376 const __m128d CCP1
= _mm_set1_pd(0.0175389834052493308818);
1377 const __m128d CCP0
= _mm_set1_pd(0.00628057170626964891937);
1379 const __m128d CCQ6
= _mm_set1_pd(5.48409182238641741584);
1380 const __m128d CCQ5
= _mm_set1_pd(13.5064170191802889145);
1381 const __m128d CCQ4
= _mm_set1_pd(22.9367376522880577224);
1382 const __m128d CCQ3
= _mm_set1_pd(15.930646027911794143);
1383 const __m128d CCQ2
= _mm_set1_pd(11.0567237927800161565);
1384 const __m128d CCQ1
= _mm_set1_pd(2.79257750980575282228);
1386 const __m128d CCoffset
= _mm_set1_pd(0.5579090118408203125);
1388 const __m128d one
= _mm_set1_pd(1.0);
1389 const __m128d two
= _mm_set1_pd(2.0);
1391 const __m128d signbit
= gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
1393 __m128d xabs
,x2
,x4
,t
,t2
,w
,w2
;
1394 __m128d PolyAP0
,PolyAP1
,PolyAQ0
,PolyAQ1
;
1395 __m128d PolyBP0
,PolyBP1
,PolyBQ0
,PolyBQ1
;
1396 __m128d PolyCP0
,PolyCP1
,PolyCQ0
,PolyCQ1
;
1397 __m128d res_erf
,res_erfcB
,res_erfcC
,res_erfc
,res
;
1398 __m128d mask
,expmx2
;
1400 /* Calculate erf() */
1401 xabs
= gmx_mm_abs_pd(x
);
1402 x2
= _mm_mul_pd(x
,x
);
1403 x4
= _mm_mul_pd(x2
,x2
);
1405 PolyAP0
= _mm_mul_pd(CAP4
,x4
);
1406 PolyAP1
= _mm_mul_pd(CAP3
,x4
);
1407 PolyAP0
= _mm_add_pd(PolyAP0
,CAP2
);
1408 PolyAP1
= _mm_add_pd(PolyAP1
,CAP1
);
1409 PolyAP0
= _mm_mul_pd(PolyAP0
,x4
);
1410 PolyAP1
= _mm_mul_pd(PolyAP1
,x2
);
1411 PolyAP0
= _mm_add_pd(PolyAP0
,CAP0
);
1412 PolyAP0
= _mm_add_pd(PolyAP0
,PolyAP1
);
1414 PolyAQ1
= _mm_mul_pd(CAQ5
,x4
);
1415 PolyAQ0
= _mm_mul_pd(CAQ4
,x4
);
1416 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ3
);
1417 PolyAQ0
= _mm_add_pd(PolyAQ0
,CAQ2
);
1418 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x4
);
1419 PolyAQ0
= _mm_mul_pd(PolyAQ0
,x4
);
1420 PolyAQ1
= _mm_add_pd(PolyAQ1
,CAQ1
);
1421 PolyAQ0
= _mm_add_pd(PolyAQ0
,one
);
1422 PolyAQ1
= _mm_mul_pd(PolyAQ1
,x2
);
1423 PolyAQ0
= _mm_add_pd(PolyAQ0
,PolyAQ1
);
1425 res_erf
= _mm_mul_pd(PolyAP0
,gmx_mm_inv_pd(PolyAQ0
));
1426 res_erf
= _mm_add_pd(CAoffset
,res_erf
);
1427 res_erf
= _mm_mul_pd(x
,res_erf
);
1429 /* Calculate erfc() in range [1,4.5] */
1430 t
= _mm_sub_pd(xabs
,one
);
1431 t2
= _mm_mul_pd(t
,t
);
1433 PolyBP0
= _mm_mul_pd(CBP6
,t2
);
1434 PolyBP1
= _mm_mul_pd(CBP5
,t2
);
1435 PolyBP0
= _mm_add_pd(PolyBP0
,CBP4
);
1436 PolyBP1
= _mm_add_pd(PolyBP1
,CBP3
);
1437 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
1438 PolyBP1
= _mm_mul_pd(PolyBP1
,t2
);
1439 PolyBP0
= _mm_add_pd(PolyBP0
,CBP2
);
1440 PolyBP1
= _mm_add_pd(PolyBP1
,CBP1
);
1441 PolyBP0
= _mm_mul_pd(PolyBP0
,t2
);
1442 PolyBP1
= _mm_mul_pd(PolyBP1
,t
);
1443 PolyBP0
= _mm_add_pd(PolyBP0
,CBP0
);
1444 PolyBP0
= _mm_add_pd(PolyBP0
,PolyBP1
);
1446 PolyBQ1
= _mm_mul_pd(CBQ7
,t2
);
1447 PolyBQ0
= _mm_mul_pd(CBQ6
,t2
);
1448 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ5
);
1449 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ4
);
1450 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
1451 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
1452 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ3
);
1453 PolyBQ0
= _mm_add_pd(PolyBQ0
,CBQ2
);
1454 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t2
);
1455 PolyBQ0
= _mm_mul_pd(PolyBQ0
,t2
);
1456 PolyBQ1
= _mm_add_pd(PolyBQ1
,CBQ1
);
1457 PolyBQ0
= _mm_add_pd(PolyBQ0
,one
);
1458 PolyBQ1
= _mm_mul_pd(PolyBQ1
,t
);
1459 PolyBQ0
= _mm_add_pd(PolyBQ0
,PolyBQ1
);
1461 res_erfcB
= _mm_mul_pd(PolyBP0
,gmx_mm_inv_pd(PolyBQ0
));
1463 res_erfcB
= _mm_mul_pd(res_erfcB
,xabs
);
1465 /* Calculate erfc() in range [4.5,inf] */
1466 w
= gmx_mm_inv_pd(xabs
);
1467 w2
= _mm_mul_pd(w
,w
);
1469 PolyCP0
= _mm_mul_pd(CCP6
,w2
);
1470 PolyCP1
= _mm_mul_pd(CCP5
,w2
);
1471 PolyCP0
= _mm_add_pd(PolyCP0
,CCP4
);
1472 PolyCP1
= _mm_add_pd(PolyCP1
,CCP3
);
1473 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
1474 PolyCP1
= _mm_mul_pd(PolyCP1
,w2
);
1475 PolyCP0
= _mm_add_pd(PolyCP0
,CCP2
);
1476 PolyCP1
= _mm_add_pd(PolyCP1
,CCP1
);
1477 PolyCP0
= _mm_mul_pd(PolyCP0
,w2
);
1478 PolyCP1
= _mm_mul_pd(PolyCP1
,w
);
1479 PolyCP0
= _mm_add_pd(PolyCP0
,CCP0
);
1480 PolyCP0
= _mm_add_pd(PolyCP0
,PolyCP1
);
1482 PolyCQ0
= _mm_mul_pd(CCQ6
,w2
);
1483 PolyCQ1
= _mm_mul_pd(CCQ5
,w2
);
1484 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ4
);
1485 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ3
);
1486 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
1487 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w2
);
1488 PolyCQ0
= _mm_add_pd(PolyCQ0
,CCQ2
);
1489 PolyCQ1
= _mm_add_pd(PolyCQ1
,CCQ1
);
1490 PolyCQ0
= _mm_mul_pd(PolyCQ0
,w2
);
1491 PolyCQ1
= _mm_mul_pd(PolyCQ1
,w
);
1492 PolyCQ0
= _mm_add_pd(PolyCQ0
,one
);
1493 PolyCQ0
= _mm_add_pd(PolyCQ0
,PolyCQ1
);
1495 expmx2
= gmx_mm_exp_pd( _mm_or_pd(signbit
, x2
) );
1497 res_erfcC
= _mm_mul_pd(PolyCP0
,gmx_mm_inv_pd(PolyCQ0
));
1498 res_erfcC
= _mm_add_pd(res_erfcC
,CCoffset
);
1499 res_erfcC
= _mm_mul_pd(res_erfcC
,w
);
1501 mask
= _mm_cmpgt_pd(xabs
,_mm_set1_pd(4.5));
1502 res_erfc
= _mm_blendv_pd(res_erfcB
,res_erfcC
,mask
);
1504 res_erfc
= _mm_mul_pd(res_erfc
,expmx2
);
1506 /* erfc(x<0) = 2-erfc(|x|) */
1507 mask
= _mm_cmplt_pd(x
,_mm_setzero_pd());
1508 res_erfc
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(two
,res_erfc
),mask
);
1510 /* Select erf() or erfc() */
1511 mask
= _mm_cmplt_pd(xabs
,one
);
1512 res
= _mm_blendv_pd(res_erfc
,_mm_sub_pd(one
,res_erf
),mask
);
1518 /* Calculate the force correction due to PME analytically.
1520 * This routine is meant to enable analytical evaluation of the
1521 * direct-space PME electrostatic force to avoid tables.
1523 * The direct-space potential should be Erfc(beta*r)/r, but there
1524 * are some problems evaluating that:
1526 * First, the error function is difficult (read: expensive) to
1527 * approxmiate accurately for intermediate to large arguments, and
1528 * this happens already in ranges of beta*r that occur in simulations.
1529 * Second, we now try to avoid calculating potentials in Gromacs but
1530 * use forces directly.
1532 * We can simply things slight by noting that the PME part is really
1533 * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
1535 * V= 1/r - Erf(beta*r)/r
1537 * The first term we already have from the inverse square root, so
1538 * that we can leave out of this routine.
1540 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
1541 * the argument beta*r will be in the range 0.15 to ~4. Use your
1542 * favorite plotting program to realize how well-behaved Erf(z)/z is
1545 * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
1546 * However, it turns out it is more efficient to approximate f(z)/z and
1547 * then only use even powers. This is another minor optimization, since
1548 * we actually WANT f(z)/z, because it is going to be multiplied by
1549 * the vector between the two atoms to get the vectorial force. The
1550 * fastest flops are the ones we can avoid calculating!
1552 * So, here's how it should be used:
1555 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
1556 * 3. Evaluate this routine with z^2 as the argument.
1557 * 4. The return value is the expression:
1560 * 2*exp(-z^2) erf(z)
1561 * ------------ - --------
1564 * 5. Multiply the entire expression by beta^3. This will get you
1566 * beta^3*2*exp(-z^2) beta^3*erf(z)
1567 * ------------------ - ---------------
1570 * or, switching back to r (z=r*beta):
1572 * 2*beta*exp(-r^2*beta^2) erf(r*beta)
1573 * ----------------------- - -----------
1577 * With a bit of math exercise you should be able to confirm that
1578 * this is exactly D[Erf[beta*r]/r,r] divided by r another time.
1580 * 6. Add the result to 1/r^3, multiply by the product of the charges,
1581 * and you have your force (divided by r). A final multiplication
1582 * with the vector connecting the two particles and you have your
1583 * vectorial force to add to the particles.
1587 gmx_mm256_pmecorrF_pd(__m256d z2
)
1589 const __m256d FN10
= _mm256_set1_pd(-8.0072854618360083154e-14);
1590 const __m256d FN9
= _mm256_set1_pd(1.1859116242260148027e-11);
1591 const __m256d FN8
= _mm256_set1_pd(-8.1490406329798423616e-10);
1592 const __m256d FN7
= _mm256_set1_pd(3.4404793543907847655e-8);
1593 const __m256d FN6
= _mm256_set1_pd(-9.9471420832602741006e-7);
1594 const __m256d FN5
= _mm256_set1_pd(0.000020740315999115847456);
1595 const __m256d FN4
= _mm256_set1_pd(-0.00031991745139313364005);
1596 const __m256d FN3
= _mm256_set1_pd(0.0035074449373659008203);
1597 const __m256d FN2
= _mm256_set1_pd(-0.031750380176100813405);
1598 const __m256d FN1
= _mm256_set1_pd(0.13884101728898463426);
1599 const __m256d FN0
= _mm256_set1_pd(-0.75225277815249618847);
1601 const __m256d FD5
= _mm256_set1_pd(0.000016009278224355026701);
1602 const __m256d FD4
= _mm256_set1_pd(0.00051055686934806966046);
1603 const __m256d FD3
= _mm256_set1_pd(0.0081803507497974289008);
1604 const __m256d FD2
= _mm256_set1_pd(0.077181146026670287235);
1605 const __m256d FD1
= _mm256_set1_pd(0.41543303143712535988);
1606 const __m256d FD0
= _mm256_set1_pd(1.0);
1609 __m256d polyFN0
,polyFN1
,polyFD0
,polyFD1
;
1611 z4
= _mm256_mul_pd(z2
,z2
);
1613 polyFD1
= _mm256_mul_pd(FD5
,z4
);
1614 polyFD0
= _mm256_mul_pd(FD4
,z4
);
1615 polyFD1
= _mm256_add_pd(polyFD1
,FD3
);
1616 polyFD0
= _mm256_add_pd(polyFD0
,FD2
);
1617 polyFD1
= _mm256_mul_pd(polyFD1
,z4
);
1618 polyFD0
= _mm256_mul_pd(polyFD0
,z4
);
1619 polyFD1
= _mm256_add_pd(polyFD1
,FD1
);
1620 polyFD0
= _mm256_add_pd(polyFD0
,FD0
);
1621 polyFD1
= _mm256_mul_pd(polyFD1
,z2
);
1622 polyFD0
= _mm256_add_pd(polyFD0
,polyFD1
);
1624 polyFD0
= gmx_mm256_inv_pd(polyFD0
);
1626 polyFN0
= _mm256_mul_pd(FN10
,z4
);
1627 polyFN1
= _mm256_mul_pd(FN9
,z4
);
1628 polyFN0
= _mm256_add_pd(polyFN0
,FN8
);
1629 polyFN1
= _mm256_add_pd(polyFN1
,FN7
);
1630 polyFN0
= _mm256_mul_pd(polyFN0
,z4
);
1631 polyFN1
= _mm256_mul_pd(polyFN1
,z4
);
1632 polyFN0
= _mm256_add_pd(polyFN0
,FN6
);
1633 polyFN1
= _mm256_add_pd(polyFN1
,FN5
);
1634 polyFN0
= _mm256_mul_pd(polyFN0
,z4
);
1635 polyFN1
= _mm256_mul_pd(polyFN1
,z4
);
1636 polyFN0
= _mm256_add_pd(polyFN0
,FN4
);
1637 polyFN1
= _mm256_add_pd(polyFN1
,FN3
);
1638 polyFN0
= _mm256_mul_pd(polyFN0
,z4
);
1639 polyFN1
= _mm256_mul_pd(polyFN1
,z4
);
1640 polyFN0
= _mm256_add_pd(polyFN0
,FN2
);
1641 polyFN1
= _mm256_add_pd(polyFN1
,FN1
);
1642 polyFN0
= _mm256_mul_pd(polyFN0
,z4
);
1643 polyFN1
= _mm256_mul_pd(polyFN1
,z2
);
1644 polyFN0
= _mm256_add_pd(polyFN0
,FN0
);
1645 polyFN0
= _mm256_add_pd(polyFN0
,polyFN1
);
1647 return _mm256_mul_pd(polyFN0
,polyFD0
);
1653 gmx_mm_pmecorrF_pd(__m128d z2
)
1655 const __m128d FN10
= _mm_set1_pd(-8.0072854618360083154e-14);
1656 const __m128d FN9
= _mm_set1_pd(1.1859116242260148027e-11);
1657 const __m128d FN8
= _mm_set1_pd(-8.1490406329798423616e-10);
1658 const __m128d FN7
= _mm_set1_pd(3.4404793543907847655e-8);
1659 const __m128d FN6
= _mm_set1_pd(-9.9471420832602741006e-7);
1660 const __m128d FN5
= _mm_set1_pd(0.000020740315999115847456);
1661 const __m128d FN4
= _mm_set1_pd(-0.00031991745139313364005);
1662 const __m128d FN3
= _mm_set1_pd(0.0035074449373659008203);
1663 const __m128d FN2
= _mm_set1_pd(-0.031750380176100813405);
1664 const __m128d FN1
= _mm_set1_pd(0.13884101728898463426);
1665 const __m128d FN0
= _mm_set1_pd(-0.75225277815249618847);
1667 const __m128d FD5
= _mm_set1_pd(0.000016009278224355026701);
1668 const __m128d FD4
= _mm_set1_pd(0.00051055686934806966046);
1669 const __m128d FD3
= _mm_set1_pd(0.0081803507497974289008);
1670 const __m128d FD2
= _mm_set1_pd(0.077181146026670287235);
1671 const __m128d FD1
= _mm_set1_pd(0.41543303143712535988);
1672 const __m128d FD0
= _mm_set1_pd(1.0);
1675 __m128d polyFN0
,polyFN1
,polyFD0
,polyFD1
;
1677 z4
= _mm_mul_pd(z2
,z2
);
1679 polyFD1
= _mm_mul_pd(FD5
,z4
);
1680 polyFD0
= _mm_mul_pd(FD4
,z4
);
1681 polyFD1
= _mm_add_pd(polyFD1
,FD3
);
1682 polyFD0
= _mm_add_pd(polyFD0
,FD2
);
1683 polyFD1
= _mm_mul_pd(polyFD1
,z4
);
1684 polyFD0
= _mm_mul_pd(polyFD0
,z4
);
1685 polyFD1
= _mm_add_pd(polyFD1
,FD1
);
1686 polyFD0
= _mm_add_pd(polyFD0
,FD0
);
1687 polyFD1
= _mm_mul_pd(polyFD1
,z2
);
1688 polyFD0
= _mm_add_pd(polyFD0
,polyFD1
);
1690 polyFD0
= gmx_mm_inv_pd(polyFD0
);
1692 polyFN0
= _mm_mul_pd(FN10
,z4
);
1693 polyFN1
= _mm_mul_pd(FN9
,z4
);
1694 polyFN0
= _mm_add_pd(polyFN0
,FN8
);
1695 polyFN1
= _mm_add_pd(polyFN1
,FN7
);
1696 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
1697 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
1698 polyFN0
= _mm_add_pd(polyFN0
,FN6
);
1699 polyFN1
= _mm_add_pd(polyFN1
,FN5
);
1700 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
1701 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
1702 polyFN0
= _mm_add_pd(polyFN0
,FN4
);
1703 polyFN1
= _mm_add_pd(polyFN1
,FN3
);
1704 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
1705 polyFN1
= _mm_mul_pd(polyFN1
,z4
);
1706 polyFN0
= _mm_add_pd(polyFN0
,FN2
);
1707 polyFN1
= _mm_add_pd(polyFN1
,FN1
);
1708 polyFN0
= _mm_mul_pd(polyFN0
,z4
);
1709 polyFN1
= _mm_mul_pd(polyFN1
,z2
);
1710 polyFN0
= _mm_add_pd(polyFN0
,FN0
);
1711 polyFN0
= _mm_add_pd(polyFN0
,polyFN1
);
1713 return _mm_mul_pd(polyFN0
,polyFD0
);
1719 /* Calculate the potential correction due to PME analytically.
1721 * See gmx_mm256_pmecorrF_ps() for details about the approximation.
1723 * This routine calculates Erf(z)/z, although you should provide z^2
1724 * as the input argument.
1726 * Here's how it should be used:
1729 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
1730 * 3. Evaluate this routine with z^2 as the argument.
1731 * 4. The return value is the expression:
1738 * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
1744 * 6. Add the result to 1/r, multiply by the product of the charges,
1745 * and you have your potential.
1749 gmx_mm256_pmecorrV_pd(__m256d z2
)
1751 const __m256d VN9
= _mm256_set1_pd(-9.3723776169321855475e-13);
1752 const __m256d VN8
= _mm256_set1_pd(1.2280156762674215741e-10);
1753 const __m256d VN7
= _mm256_set1_pd(-7.3562157912251309487e-9);
1754 const __m256d VN6
= _mm256_set1_pd(2.6215886208032517509e-7);
1755 const __m256d VN5
= _mm256_set1_pd(-4.9532491651265819499e-6);
1756 const __m256d VN4
= _mm256_set1_pd(0.00025907400778966060389);
1757 const __m256d VN3
= _mm256_set1_pd(0.0010585044856156469792);
1758 const __m256d VN2
= _mm256_set1_pd(0.045247661136833092885);
1759 const __m256d VN1
= _mm256_set1_pd(0.11643931522926034421);
1760 const __m256d VN0
= _mm256_set1_pd(1.1283791671726767970);
1762 const __m256d VD5
= _mm256_set1_pd(0.000021784709867336150342);
1763 const __m256d VD4
= _mm256_set1_pd(0.00064293662010911388448);
1764 const __m256d VD3
= _mm256_set1_pd(0.0096311444822588683504);
1765 const __m256d VD2
= _mm256_set1_pd(0.085608012351550627051);
1766 const __m256d VD1
= _mm256_set1_pd(0.43652499166614811084);
1767 const __m256d VD0
= _mm256_set1_pd(1.0);
1770 __m256d polyVN0
,polyVN1
,polyVD0
,polyVD1
;
1772 z4
= _mm256_mul_pd(z2
,z2
);
1774 polyVD1
= _mm256_mul_pd(VD5
,z4
);
1775 polyVD0
= _mm256_mul_pd(VD4
,z4
);
1776 polyVD1
= _mm256_add_pd(polyVD1
,VD3
);
1777 polyVD0
= _mm256_add_pd(polyVD0
,VD2
);
1778 polyVD1
= _mm256_mul_pd(polyVD1
,z4
);
1779 polyVD0
= _mm256_mul_pd(polyVD0
,z4
);
1780 polyVD1
= _mm256_add_pd(polyVD1
,VD1
);
1781 polyVD0
= _mm256_add_pd(polyVD0
,VD0
);
1782 polyVD1
= _mm256_mul_pd(polyVD1
,z2
);
1783 polyVD0
= _mm256_add_pd(polyVD0
,polyVD1
);
1785 polyVD0
= gmx_mm256_inv_pd(polyVD0
);
1787 polyVN1
= _mm256_mul_pd(VN9
,z4
);
1788 polyVN0
= _mm256_mul_pd(VN8
,z4
);
1789 polyVN1
= _mm256_add_pd(polyVN1
,VN7
);
1790 polyVN0
= _mm256_add_pd(polyVN0
,VN6
);
1791 polyVN1
= _mm256_mul_pd(polyVN1
,z4
);
1792 polyVN0
= _mm256_mul_pd(polyVN0
,z4
);
1793 polyVN1
= _mm256_add_pd(polyVN1
,VN5
);
1794 polyVN0
= _mm256_add_pd(polyVN0
,VN4
);
1795 polyVN1
= _mm256_mul_pd(polyVN1
,z4
);
1796 polyVN0
= _mm256_mul_pd(polyVN0
,z4
);
1797 polyVN1
= _mm256_add_pd(polyVN1
,VN3
);
1798 polyVN0
= _mm256_add_pd(polyVN0
,VN2
);
1799 polyVN1
= _mm256_mul_pd(polyVN1
,z4
);
1800 polyVN0
= _mm256_mul_pd(polyVN0
,z4
);
1801 polyVN1
= _mm256_add_pd(polyVN1
,VN1
);
1802 polyVN0
= _mm256_add_pd(polyVN0
,VN0
);
1803 polyVN1
= _mm256_mul_pd(polyVN1
,z2
);
1804 polyVN0
= _mm256_add_pd(polyVN0
,polyVN1
);
1806 return _mm256_mul_pd(polyVN0
,polyVD0
);
1811 gmx_mm_pmecorrV_pd(__m256d z2
)
1813 const __m128d VN9
= _mm_set1_pd(-9.3723776169321855475e-13);
1814 const __m128d VN8
= _mm_set1_pd(1.2280156762674215741e-10);
1815 const __m128d VN7
= _mm_set1_pd(-7.3562157912251309487e-9);
1816 const __m128d VN6
= _mm_set1_pd(2.6215886208032517509e-7);
1817 const __m128d VN5
= _mm_set1_pd(-4.9532491651265819499e-6);
1818 const __m128d VN4
= _mm_set1_pd(0.00025907400778966060389);
1819 const __m128d VN3
= _mm_set1_pd(0.0010585044856156469792);
1820 const __m128d VN2
= _mm_set1_pd(0.045247661136833092885);
1821 const __m128d VN1
= _mm_set1_pd(0.11643931522926034421);
1822 const __m128d VN0
= _mm_set1_pd(1.1283791671726767970);
1824 const __m128d VD5
= _mm_set1_pd(0.000021784709867336150342);
1825 const __m128d VD4
= _mm_set1_pd(0.00064293662010911388448);
1826 const __m128d VD3
= _mm_set1_pd(0.0096311444822588683504);
1827 const __m128d VD2
= _mm_set1_pd(0.085608012351550627051);
1828 const __m128d VD1
= _mm_set1_pd(0.43652499166614811084);
1829 const __m128d VD0
= _mm_set1_pd(1.0);
1832 __m128d polyVN0
,polyVN1
,polyVD0
,polyVD1
;
1834 z4
= _mm_mul_pd(z2
,z2
);
1836 polyVD1
= _mm_mul_pd(VD5
,z4
);
1837 polyVD0
= _mm_mul_pd(VD4
,z4
);
1838 polyVD1
= _mm_add_pd(polyVD1
,VD3
);
1839 polyVD0
= _mm_add_pd(polyVD0
,VD2
);
1840 polyVD1
= _mm_mul_pd(polyVD1
,z4
);
1841 polyVD0
= _mm_mul_pd(polyVD0
,z4
);
1842 polyVD1
= _mm_add_pd(polyVD1
,VD1
);
1843 polyVD0
= _mm_add_pd(polyVD0
,VD0
);
1844 polyVD1
= _mm_mul_pd(polyVD1
,z2
);
1845 polyVD0
= _mm_add_pd(polyVD0
,polyVD1
);
1847 polyVD0
= gmx_mm_inv_pd(polyVD0
);
1849 polyVN1
= _mm_mul_pd(VN9
,z4
);
1850 polyVN0
= _mm_mul_pd(VN8
,z4
);
1851 polyVN1
= _mm_add_pd(polyVN1
,VN7
);
1852 polyVN0
= _mm_add_pd(polyVN0
,VN6
);
1853 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
1854 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
1855 polyVN1
= _mm_add_pd(polyVN1
,VN5
);
1856 polyVN0
= _mm_add_pd(polyVN0
,VN4
);
1857 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
1858 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
1859 polyVN1
= _mm_add_pd(polyVN1
,VN3
);
1860 polyVN0
= _mm_add_pd(polyVN0
,VN2
);
1861 polyVN1
= _mm_mul_pd(polyVN1
,z4
);
1862 polyVN0
= _mm_mul_pd(polyVN0
,z4
);
1863 polyVN1
= _mm_add_pd(polyVN1
,VN1
);
1864 polyVN0
= _mm_add_pd(polyVN0
,VN0
);
1865 polyVN1
= _mm_mul_pd(polyVN1
,z2
);
1866 polyVN0
= _mm_add_pd(polyVN0
,polyVN1
);
1868 return _mm_mul_pd(polyVN0
,polyVD0
);
1874 gmx_mm256_sincos_pd(__m256d x
,
1879 __declspec(align(16))
1880 const double sintable
[34] =
1882 1.00000000000000000e+00 , 0.00000000000000000e+00 ,
1883 9.95184726672196929e-01 , 9.80171403295606036e-02 ,
1884 9.80785280403230431e-01 , 1.95090322016128248e-01 ,
1885 9.56940335732208824e-01 , 2.90284677254462331e-01 ,
1886 9.23879532511286738e-01 , 3.82683432365089782e-01 ,
1887 8.81921264348355050e-01 , 4.71396736825997642e-01 ,
1888 8.31469612302545236e-01 , 5.55570233019602178e-01 ,
1889 7.73010453362736993e-01 , 6.34393284163645488e-01 ,
1890 7.07106781186547573e-01 , 7.07106781186547462e-01 ,
1891 6.34393284163645599e-01 , 7.73010453362736882e-01 ,
1892 5.55570233019602289e-01 , 8.31469612302545125e-01 ,
1893 4.71396736825997809e-01 , 8.81921264348354939e-01 ,
1894 3.82683432365089837e-01 , 9.23879532511286738e-01 ,
1895 2.90284677254462276e-01 , 9.56940335732208935e-01 ,
1896 1.95090322016128304e-01 , 9.80785280403230431e-01 ,
1897 9.80171403295607702e-02 , 9.95184726672196818e-01 ,
1898 0.0 , 1.00000000000000000e+00
1901 const __m128d sintable
[17] =
1903 _mm_set_pd( 0.0 , 1.0 ),
1904 _mm_set_pd( sin( 1.0 * (M_PI
/2.0) / 16.0) , cos( 1.0 * (M_PI
/2.0) / 16.0) ),
1905 _mm_set_pd( sin( 2.0 * (M_PI
/2.0) / 16.0) , cos( 2.0 * (M_PI
/2.0) / 16.0) ),
1906 _mm_set_pd( sin( 3.0 * (M_PI
/2.0) / 16.0) , cos( 3.0 * (M_PI
/2.0) / 16.0) ),
1907 _mm_set_pd( sin( 4.0 * (M_PI
/2.0) / 16.0) , cos( 4.0 * (M_PI
/2.0) / 16.0) ),
1908 _mm_set_pd( sin( 5.0 * (M_PI
/2.0) / 16.0) , cos( 5.0 * (M_PI
/2.0) / 16.0) ),
1909 _mm_set_pd( sin( 6.0 * (M_PI
/2.0) / 16.0) , cos( 6.0 * (M_PI
/2.0) / 16.0) ),
1910 _mm_set_pd( sin( 7.0 * (M_PI
/2.0) / 16.0) , cos( 7.0 * (M_PI
/2.0) / 16.0) ),
1911 _mm_set_pd( sin( 8.0 * (M_PI
/2.0) / 16.0) , cos( 8.0 * (M_PI
/2.0) / 16.0) ),
1912 _mm_set_pd( sin( 9.0 * (M_PI
/2.0) / 16.0) , cos( 9.0 * (M_PI
/2.0) / 16.0) ),
1913 _mm_set_pd( sin( 10.0 * (M_PI
/2.0) / 16.0) , cos( 10.0 * (M_PI
/2.0) / 16.0) ),
1914 _mm_set_pd( sin( 11.0 * (M_PI
/2.0) / 16.0) , cos( 11.0 * (M_PI
/2.0) / 16.0) ),
1915 _mm_set_pd( sin( 12.0 * (M_PI
/2.0) / 16.0) , cos( 12.0 * (M_PI
/2.0) / 16.0) ),
1916 _mm_set_pd( sin( 13.0 * (M_PI
/2.0) / 16.0) , cos( 13.0 * (M_PI
/2.0) / 16.0) ),
1917 _mm_set_pd( sin( 14.0 * (M_PI
/2.0) / 16.0) , cos( 14.0 * (M_PI
/2.0) / 16.0) ),
1918 _mm_set_pd( sin( 15.0 * (M_PI
/2.0) / 16.0) , cos( 15.0 * (M_PI
/2.0) / 16.0) ),
1919 _mm_set_pd( 1.0 , 0.0 )
1923 const __m256d signmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
1924 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1925 const __m128i signbit_epi32
= _mm_set1_epi32(0x80000000);
1927 const __m256d tabscale
= _mm256_set1_pd(32.0/M_PI
);
1928 const __m256d invtabscale0
= _mm256_set1_pd(9.81747508049011230469e-02);
1929 const __m256d invtabscale1
= _mm256_set1_pd(1.96197799156550576057e-08);
1930 const __m128i ione
= _mm_set1_epi32(1);
1931 const __m128i i32
= _mm_set1_epi32(32);
1932 const __m128i i16
= _mm_set1_epi32(16);
1933 const __m128i tabmask
= _mm_set1_epi32(0x3F);
1934 const __m256d sinP7
= _mm256_set1_pd(-1.0/5040.0);
1935 const __m256d sinP5
= _mm256_set1_pd(1.0/120.0);
1936 const __m256d sinP3
= _mm256_set1_pd(-1.0/6.0);
1937 const __m256d sinP1
= _mm256_set1_pd(1.0);
1939 const __m256d cosP6
= _mm256_set1_pd(-1.0/720.0);
1940 const __m256d cosP4
= _mm256_set1_pd(1.0/24.0);
1941 const __m256d cosP2
= _mm256_set1_pd(-1.0/2.0);
1942 const __m256d cosP0
= _mm256_set1_pd(1.0);
1945 __m128i tabidx
,corridx
;
1946 __m256d xabs
,z
,z2
,polySin
,polyCos
;
1948 __m256d t1
,t2
,t3
,t4
;
1950 __m256d sinpoint
,cospoint
;
1951 __m256d xsign
,ssign
,csign
;
1952 __m128i imask
,sswapsign
,cswapsign
;
1955 xsign
= _mm256_andnot_pd(signmask
,x
);
1956 xabs
= _mm256_and_pd(x
,signmask
);
1958 scalex
= _mm256_mul_pd(tabscale
,xabs
);
1959 tabidx
= _mm256_cvtpd_epi32(scalex
);
1961 xpoint
= _mm256_round_pd(scalex
,_MM_FROUND_TO_NEAREST_INT
);
1963 /* Extended precision arithmetics */
1964 z
= _mm256_sub_pd(xabs
,_mm256_mul_pd(invtabscale0
,xpoint
));
1965 z
= _mm256_sub_pd(z
,_mm256_mul_pd(invtabscale1
,xpoint
));
1967 /* Range reduction to 0..2*Pi */
1968 tabidx
= _mm_and_si128(tabidx
,tabmask
);
1970 /* tabidx is now in range [0,..,64] */
1971 imask
= _mm_cmpgt_epi32(tabidx
,i32
);
1974 corridx
= _mm_and_si128(imask
,i32
);
1975 tabidx
= _mm_sub_epi32(tabidx
,corridx
);
1977 /* tabidx is now in range [0..32] */
1978 imask
= _mm_cmpgt_epi32(tabidx
,i16
);
1979 cswapsign
= _mm_xor_si128(cswapsign
,imask
);
1980 corridx
= _mm_sub_epi32(i32
,tabidx
);
1981 tabidx
= _mm_blendv_epi8(tabidx
,corridx
,imask
);
1982 /* tabidx is now in range [0..16] */
1983 ssign
= _mm256_cvtepi32_pd( _mm_or_si128( sswapsign
, ione
) );
1984 csign
= _mm256_cvtepi32_pd( _mm_or_si128( cswapsign
, ione
) );
1986 /* First lookup into table */
1988 t1
= _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,0))),
1989 _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,2)), 0x1);
1990 t2
= _mm256_insertf128_pd(_mm256_castpd128_pd256(_mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,1))),
1991 _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,3)), 0x1);
1993 t1
= _mm256_insertf128_pd(_mm256_castpd128_pd256(sintable
[_mm_extract_epi32(tabidx
,0)]),
1994 sintable
[_mm_extract_epi32(tabidx
,2)], 0x1);
1995 t2
= _mm256_insertf128_pd(_mm256_castpd128_pd256(sintable
[_mm_extract_epi32(tabidx
,1)]),
1996 sintable
[_mm_extract_epi32(tabidx
,3)], 0x1);
1999 sinpoint
= _mm256_unpackhi_pd(t1
,t2
);
2000 cospoint
= _mm256_unpacklo_pd(t1
,t2
);
2002 sinpoint
= _mm256_mul_pd(sinpoint
,ssign
);
2003 cospoint
= _mm256_mul_pd(cospoint
,csign
);
2005 z2
= _mm256_mul_pd(z
,z
);
2007 polySin
= _mm256_mul_pd(sinP7
,z2
);
2008 polySin
= _mm256_add_pd(polySin
,sinP5
);
2009 polySin
= _mm256_mul_pd(polySin
,z2
);
2010 polySin
= _mm256_add_pd(polySin
,sinP3
);
2011 polySin
= _mm256_mul_pd(polySin
,z2
);
2012 polySin
= _mm256_add_pd(polySin
,sinP1
);
2013 polySin
= _mm256_mul_pd(polySin
,z
);
2015 polyCos
= _mm256_mul_pd(cosP6
,z2
);
2016 polyCos
= _mm256_add_pd(polyCos
,cosP4
);
2017 polyCos
= _mm256_mul_pd(polyCos
,z2
);
2018 polyCos
= _mm256_add_pd(polyCos
,cosP2
);
2019 polyCos
= _mm256_mul_pd(polyCos
,z2
);
2020 polyCos
= _mm256_add_pd(polyCos
,cosP0
);
2022 *sinval
= _mm256_xor_pd(_mm256_add_pd( _mm256_mul_pd(sinpoint
,polyCos
) , _mm256_mul_pd(cospoint
,polySin
) ),xsign
);
2023 *cosval
= _mm256_sub_pd( _mm256_mul_pd(cospoint
,polyCos
) , _mm256_mul_pd(sinpoint
,polySin
) );
2029 gmx_mm_sincos_pd(__m128d x
,
2034 __declspec(align(16))
2035 const double sintable
[34] =
2037 1.00000000000000000e+00 , 0.00000000000000000e+00 ,
2038 9.95184726672196929e-01 , 9.80171403295606036e-02 ,
2039 9.80785280403230431e-01 , 1.95090322016128248e-01 ,
2040 9.56940335732208824e-01 , 2.90284677254462331e-01 ,
2041 9.23879532511286738e-01 , 3.82683432365089782e-01 ,
2042 8.81921264348355050e-01 , 4.71396736825997642e-01 ,
2043 8.31469612302545236e-01 , 5.55570233019602178e-01 ,
2044 7.73010453362736993e-01 , 6.34393284163645488e-01 ,
2045 7.07106781186547573e-01 , 7.07106781186547462e-01 ,
2046 6.34393284163645599e-01 , 7.73010453362736882e-01 ,
2047 5.55570233019602289e-01 , 8.31469612302545125e-01 ,
2048 4.71396736825997809e-01 , 8.81921264348354939e-01 ,
2049 3.82683432365089837e-01 , 9.23879532511286738e-01 ,
2050 2.90284677254462276e-01 , 9.56940335732208935e-01 ,
2051 1.95090322016128304e-01 , 9.80785280403230431e-01 ,
2052 9.80171403295607702e-02 , 9.95184726672196818e-01 ,
2053 0.0 , 1.00000000000000000e+00
2056 const __m128d sintable
[17] =
2058 _mm_set_pd( 0.0 , 1.0 ),
2059 _mm_set_pd( sin( 1.0 * (M_PI
/2.0) / 16.0) , cos( 1.0 * (M_PI
/2.0) / 16.0) ),
2060 _mm_set_pd( sin( 2.0 * (M_PI
/2.0) / 16.0) , cos( 2.0 * (M_PI
/2.0) / 16.0) ),
2061 _mm_set_pd( sin( 3.0 * (M_PI
/2.0) / 16.0) , cos( 3.0 * (M_PI
/2.0) / 16.0) ),
2062 _mm_set_pd( sin( 4.0 * (M_PI
/2.0) / 16.0) , cos( 4.0 * (M_PI
/2.0) / 16.0) ),
2063 _mm_set_pd( sin( 5.0 * (M_PI
/2.0) / 16.0) , cos( 5.0 * (M_PI
/2.0) / 16.0) ),
2064 _mm_set_pd( sin( 6.0 * (M_PI
/2.0) / 16.0) , cos( 6.0 * (M_PI
/2.0) / 16.0) ),
2065 _mm_set_pd( sin( 7.0 * (M_PI
/2.0) / 16.0) , cos( 7.0 * (M_PI
/2.0) / 16.0) ),
2066 _mm_set_pd( sin( 8.0 * (M_PI
/2.0) / 16.0) , cos( 8.0 * (M_PI
/2.0) / 16.0) ),
2067 _mm_set_pd( sin( 9.0 * (M_PI
/2.0) / 16.0) , cos( 9.0 * (M_PI
/2.0) / 16.0) ),
2068 _mm_set_pd( sin( 10.0 * (M_PI
/2.0) / 16.0) , cos( 10.0 * (M_PI
/2.0) / 16.0) ),
2069 _mm_set_pd( sin( 11.0 * (M_PI
/2.0) / 16.0) , cos( 11.0 * (M_PI
/2.0) / 16.0) ),
2070 _mm_set_pd( sin( 12.0 * (M_PI
/2.0) / 16.0) , cos( 12.0 * (M_PI
/2.0) / 16.0) ),
2071 _mm_set_pd( sin( 13.0 * (M_PI
/2.0) / 16.0) , cos( 13.0 * (M_PI
/2.0) / 16.0) ),
2072 _mm_set_pd( sin( 14.0 * (M_PI
/2.0) / 16.0) , cos( 14.0 * (M_PI
/2.0) / 16.0) ),
2073 _mm_set_pd( sin( 15.0 * (M_PI
/2.0) / 16.0) , cos( 15.0 * (M_PI
/2.0) / 16.0) ),
2074 _mm_set_pd( 1.0 , 0.0 )
2078 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2079 const __m128i signbit_epi32
= _mm_set1_epi32(0x80000000);
2081 const __m128d tabscale
= _mm_set1_pd(32.0/M_PI
);
2082 const __m128d invtabscale0
= _mm_set1_pd(9.81747508049011230469e-02);
2083 const __m128d invtabscale1
= _mm_set1_pd(1.96197799156550576057e-08);
2084 const __m128i ione
= _mm_set1_epi32(1);
2085 const __m128i i32
= _mm_set1_epi32(32);
2086 const __m128i i16
= _mm_set1_epi32(16);
2087 const __m128i tabmask
= _mm_set1_epi32(0x3F);
2088 const __m128d sinP7
= _mm_set1_pd(-1.0/5040.0);
2089 const __m128d sinP5
= _mm_set1_pd(1.0/120.0);
2090 const __m128d sinP3
= _mm_set1_pd(-1.0/6.0);
2091 const __m128d sinP1
= _mm_set1_pd(1.0);
2093 const __m128d cosP6
= _mm_set1_pd(-1.0/720.0);
2094 const __m128d cosP4
= _mm_set1_pd(1.0/24.0);
2095 const __m128d cosP2
= _mm_set1_pd(-1.0/2.0);
2096 const __m128d cosP0
= _mm_set1_pd(1.0);
2099 __m128i tabidx
,corridx
;
2100 __m128d xabs
,z
,z2
,polySin
,polyCos
;
2102 __m128d ypoint0
,ypoint1
;
2104 __m128d sinpoint
,cospoint
;
2105 __m128d xsign
,ssign
,csign
;
2106 __m128i imask
,sswapsign
,cswapsign
;
2109 xsign
= _mm_andnot_pd(signmask
,x
);
2110 xabs
= _mm_and_pd(x
,signmask
);
2112 scalex
= _mm_mul_pd(tabscale
,xabs
);
2113 tabidx
= _mm_cvtpd_epi32(scalex
);
2115 xpoint
= _mm_round_pd(scalex
,_MM_FROUND_TO_NEAREST_INT
);
2117 /* Extended precision arithmetics */
2118 z
= _mm_sub_pd(xabs
,_mm_mul_pd(invtabscale0
,xpoint
));
2119 z
= _mm_sub_pd(z
,_mm_mul_pd(invtabscale1
,xpoint
));
2121 /* Range reduction to 0..2*Pi */
2122 tabidx
= _mm_and_si128(tabidx
,tabmask
);
2124 /* tabidx is now in range [0,..,64] */
2125 imask
= _mm_cmpgt_epi32(tabidx
,i32
);
2128 corridx
= _mm_and_si128(imask
,i32
);
2129 tabidx
= _mm_sub_epi32(tabidx
,corridx
);
2131 /* tabidx is now in range [0..32] */
2132 imask
= _mm_cmpgt_epi32(tabidx
,i16
);
2133 cswapsign
= _mm_xor_si128(cswapsign
,imask
);
2134 corridx
= _mm_sub_epi32(i32
,tabidx
);
2135 tabidx
= _mm_blendv_epi8(tabidx
,corridx
,imask
);
2136 /* tabidx is now in range [0..16] */
2137 ssign
= _mm_cvtepi32_pd( _mm_or_si128( sswapsign
, ione
) );
2138 csign
= _mm_cvtepi32_pd( _mm_or_si128( cswapsign
, ione
) );
2141 ypoint0
= _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,0));
2142 ypoint1
= _mm_load_pd(sintable
+ 2*_mm_extract_epi32(tabidx
,1));
2144 ypoint0
= sintable
[_mm_extract_epi32(tabidx
,0)];
2145 ypoint1
= sintable
[_mm_extract_epi32(tabidx
,1)];
2147 sinpoint
= _mm_unpackhi_pd(ypoint0
,ypoint1
);
2148 cospoint
= _mm_unpacklo_pd(ypoint0
,ypoint1
);
2150 sinpoint
= _mm_mul_pd(sinpoint
,ssign
);
2151 cospoint
= _mm_mul_pd(cospoint
,csign
);
2153 z2
= _mm_mul_pd(z
,z
);
2155 polySin
= _mm_mul_pd(sinP7
,z2
);
2156 polySin
= _mm_add_pd(polySin
,sinP5
);
2157 polySin
= _mm_mul_pd(polySin
,z2
);
2158 polySin
= _mm_add_pd(polySin
,sinP3
);
2159 polySin
= _mm_mul_pd(polySin
,z2
);
2160 polySin
= _mm_add_pd(polySin
,sinP1
);
2161 polySin
= _mm_mul_pd(polySin
,z
);
2163 polyCos
= _mm_mul_pd(cosP6
,z2
);
2164 polyCos
= _mm_add_pd(polyCos
,cosP4
);
2165 polyCos
= _mm_mul_pd(polyCos
,z2
);
2166 polyCos
= _mm_add_pd(polyCos
,cosP2
);
2167 polyCos
= _mm_mul_pd(polyCos
,z2
);
2168 polyCos
= _mm_add_pd(polyCos
,cosP0
);
2170 *sinval
= _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint
,polyCos
) , _mm_mul_pd(cospoint
,polySin
) ),xsign
);
2171 *cosval
= _mm_sub_pd( _mm_mul_pd(cospoint
,polyCos
) , _mm_mul_pd(sinpoint
,polySin
) );
2178 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
2179 * will then call the sincos() routine and waste a factor 2 in performance!
2182 gmx_mm256_sin_pd(__m256d x
)
2185 gmx_mm256_sincos_pd(x
,&s
,&c
);
2190 gmx_mm_sin_pd(__m128d x
)
2193 gmx_mm_sincos_pd(x
,&s
,&c
);
2198 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
2199 * will then call the sincos() routine and waste a factor 2 in performance!
2202 gmx_mm256_cos_pd(__m256d x
)
2205 gmx_mm256_sincos_pd(x
,&s
,&c
);
2210 gmx_mm_cos_pd(__m128d x
)
2213 gmx_mm_sincos_pd(x
,&s
,&c
);
2219 gmx_mm256_tan_pd(__m256d x
)
2221 __m256d sinval
,cosval
;
2224 gmx_mm256_sincos_pd(x
,&sinval
,&cosval
);
2226 tanval
= _mm256_mul_pd(sinval
,gmx_mm256_inv_pd(cosval
));
2232 gmx_mm_tan_pd(__m128d x
)
2234 __m128d sinval
,cosval
;
2237 gmx_mm_sincos_pd(x
,&sinval
,&cosval
);
2239 tanval
= _mm_mul_pd(sinval
,gmx_mm_inv_pd(cosval
));
2246 gmx_mm256_asin_pd(__m256d x
)
2248 /* Same algorithm as cephes library */
2249 const __m256d signmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2250 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2251 const __m256d limit1
= _mm256_set1_pd(0.625);
2252 const __m256d limit2
= _mm256_set1_pd(1e-8);
2253 const __m256d one
= _mm256_set1_pd(1.0);
2254 const __m256d halfpi
= _mm256_set1_pd(M_PI
/2.0);
2255 const __m256d quarterpi
= _mm256_set1_pd(M_PI
/4.0);
2256 const __m256d morebits
= _mm256_set1_pd(6.123233995736765886130e-17);
2258 const __m256d P5
= _mm256_set1_pd(4.253011369004428248960e-3);
2259 const __m256d P4
= _mm256_set1_pd(-6.019598008014123785661e-1);
2260 const __m256d P3
= _mm256_set1_pd(5.444622390564711410273e0
);
2261 const __m256d P2
= _mm256_set1_pd(-1.626247967210700244449e1
);
2262 const __m256d P1
= _mm256_set1_pd(1.956261983317594739197e1
);
2263 const __m256d P0
= _mm256_set1_pd(-8.198089802484824371615e0
);
2265 const __m256d Q4
= _mm256_set1_pd(-1.474091372988853791896e1
);
2266 const __m256d Q3
= _mm256_set1_pd(7.049610280856842141659e1
);
2267 const __m256d Q2
= _mm256_set1_pd(-1.471791292232726029859e2
);
2268 const __m256d Q1
= _mm256_set1_pd(1.395105614657485689735e2
);
2269 const __m256d Q0
= _mm256_set1_pd(-4.918853881490881290097e1
);
2271 const __m256d R4
= _mm256_set1_pd(2.967721961301243206100e-3);
2272 const __m256d R3
= _mm256_set1_pd(-5.634242780008963776856e-1);
2273 const __m256d R2
= _mm256_set1_pd(6.968710824104713396794e0
);
2274 const __m256d R1
= _mm256_set1_pd(-2.556901049652824852289e1
);
2275 const __m256d R0
= _mm256_set1_pd(2.853665548261061424989e1
);
2277 const __m256d S3
= _mm256_set1_pd(-2.194779531642920639778e1
);
2278 const __m256d S2
= _mm256_set1_pd(1.470656354026814941758e2
);
2279 const __m256d S1
= _mm256_set1_pd(-3.838770957603691357202e2
);
2280 const __m256d S0
= _mm256_set1_pd(3.424398657913078477438e2
);
2285 __m256d zz
,ww
,z
,q
,w
,y
,zz2
,ww2
;
2292 sign
= _mm256_andnot_pd(signmask
,x
);
2293 xabs
= _mm256_and_pd(x
,signmask
);
2295 mask
= _mm256_cmp_pd(xabs
,limit1
,_CMP_GT_OQ
);
2297 zz
= _mm256_sub_pd(one
,xabs
);
2298 ww
= _mm256_mul_pd(xabs
,xabs
);
2299 zz2
= _mm256_mul_pd(zz
,zz
);
2300 ww2
= _mm256_mul_pd(ww
,ww
);
2303 RA
= _mm256_mul_pd(R4
,zz2
);
2304 RB
= _mm256_mul_pd(R3
,zz2
);
2305 RA
= _mm256_add_pd(RA
,R2
);
2306 RB
= _mm256_add_pd(RB
,R1
);
2307 RA
= _mm256_mul_pd(RA
,zz2
);
2308 RB
= _mm256_mul_pd(RB
,zz
);
2309 RA
= _mm256_add_pd(RA
,R0
);
2310 RA
= _mm256_add_pd(RA
,RB
);
2313 SB
= _mm256_mul_pd(S3
,zz2
);
2314 SA
= _mm256_add_pd(zz2
,S2
);
2315 SB
= _mm256_add_pd(SB
,S1
);
2316 SA
= _mm256_mul_pd(SA
,zz2
);
2317 SB
= _mm256_mul_pd(SB
,zz
);
2318 SA
= _mm256_add_pd(SA
,S0
);
2319 SA
= _mm256_add_pd(SA
,SB
);
2322 PA
= _mm256_mul_pd(P5
,ww2
);
2323 PB
= _mm256_mul_pd(P4
,ww2
);
2324 PA
= _mm256_add_pd(PA
,P3
);
2325 PB
= _mm256_add_pd(PB
,P2
);
2326 PA
= _mm256_mul_pd(PA
,ww2
);
2327 PB
= _mm256_mul_pd(PB
,ww2
);
2328 PA
= _mm256_add_pd(PA
,P1
);
2329 PB
= _mm256_add_pd(PB
,P0
);
2330 PA
= _mm256_mul_pd(PA
,ww
);
2331 PA
= _mm256_add_pd(PA
,PB
);
2334 QB
= _mm256_mul_pd(Q4
,ww2
);
2335 QA
= _mm256_add_pd(ww2
,Q3
);
2336 QB
= _mm256_add_pd(QB
,Q2
);
2337 QA
= _mm256_mul_pd(QA
,ww2
);
2338 QB
= _mm256_mul_pd(QB
,ww2
);
2339 QA
= _mm256_add_pd(QA
,Q1
);
2340 QB
= _mm256_add_pd(QB
,Q0
);
2341 QA
= _mm256_mul_pd(QA
,ww
);
2342 QA
= _mm256_add_pd(QA
,QB
);
2344 RA
= _mm256_mul_pd(RA
,zz
);
2345 PA
= _mm256_mul_pd(PA
,ww
);
2347 nom
= _mm256_blendv_pd( PA
,RA
,mask
);
2348 denom
= _mm256_blendv_pd( QA
,SA
,mask
);
2350 q
= _mm256_mul_pd( nom
, gmx_mm256_inv_pd(denom
) );
2352 zz
= _mm256_add_pd(zz
,zz
);
2353 zz
= gmx_mm256_sqrt_pd(zz
);
2354 z
= _mm256_sub_pd(quarterpi
,zz
);
2355 zz
= _mm256_mul_pd(zz
,q
);
2356 zz
= _mm256_sub_pd(zz
,morebits
);
2357 z
= _mm256_sub_pd(z
,zz
);
2358 z
= _mm256_add_pd(z
,quarterpi
);
2360 w
= _mm256_mul_pd(xabs
,q
);
2361 w
= _mm256_add_pd(w
,xabs
);
2363 z
= _mm256_blendv_pd( w
,z
,mask
);
2365 mask
= _mm256_cmp_pd(xabs
,limit2
,_CMP_GT_OQ
);
2366 z
= _mm256_blendv_pd( xabs
,z
,mask
);
2368 z
= _mm256_xor_pd(z
,sign
);
2374 gmx_mm_asin_pd(__m128d x
)
2376 /* Same algorithm as cephes library */
2377 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2378 const __m128d limit1
= _mm_set1_pd(0.625);
2379 const __m128d limit2
= _mm_set1_pd(1e-8);
2380 const __m128d one
= _mm_set1_pd(1.0);
2381 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
2382 const __m128d quarterpi
= _mm_set1_pd(M_PI
/4.0);
2383 const __m128d morebits
= _mm_set1_pd(6.123233995736765886130e-17);
2385 const __m128d P5
= _mm_set1_pd(4.253011369004428248960e-3);
2386 const __m128d P4
= _mm_set1_pd(-6.019598008014123785661e-1);
2387 const __m128d P3
= _mm_set1_pd(5.444622390564711410273e0
);
2388 const __m128d P2
= _mm_set1_pd(-1.626247967210700244449e1
);
2389 const __m128d P1
= _mm_set1_pd(1.956261983317594739197e1
);
2390 const __m128d P0
= _mm_set1_pd(-8.198089802484824371615e0
);
2392 const __m128d Q4
= _mm_set1_pd(-1.474091372988853791896e1
);
2393 const __m128d Q3
= _mm_set1_pd(7.049610280856842141659e1
);
2394 const __m128d Q2
= _mm_set1_pd(-1.471791292232726029859e2
);
2395 const __m128d Q1
= _mm_set1_pd(1.395105614657485689735e2
);
2396 const __m128d Q0
= _mm_set1_pd(-4.918853881490881290097e1
);
2398 const __m128d R4
= _mm_set1_pd(2.967721961301243206100e-3);
2399 const __m128d R3
= _mm_set1_pd(-5.634242780008963776856e-1);
2400 const __m128d R2
= _mm_set1_pd(6.968710824104713396794e0
);
2401 const __m128d R1
= _mm_set1_pd(-2.556901049652824852289e1
);
2402 const __m128d R0
= _mm_set1_pd(2.853665548261061424989e1
);
2404 const __m128d S3
= _mm_set1_pd(-2.194779531642920639778e1
);
2405 const __m128d S2
= _mm_set1_pd(1.470656354026814941758e2
);
2406 const __m128d S1
= _mm_set1_pd(-3.838770957603691357202e2
);
2407 const __m128d S0
= _mm_set1_pd(3.424398657913078477438e2
);
2412 __m128d zz
,ww
,z
,q
,w
,y
,zz2
,ww2
;
2419 sign
= _mm_andnot_pd(signmask
,x
);
2420 xabs
= _mm_and_pd(x
,signmask
);
2422 mask
= _mm_cmpgt_pd(xabs
,limit1
);
2424 zz
= _mm_sub_pd(one
,xabs
);
2425 ww
= _mm_mul_pd(xabs
,xabs
);
2426 zz2
= _mm_mul_pd(zz
,zz
);
2427 ww2
= _mm_mul_pd(ww
,ww
);
2430 RA
= _mm_mul_pd(R4
,zz2
);
2431 RB
= _mm_mul_pd(R3
,zz2
);
2432 RA
= _mm_add_pd(RA
,R2
);
2433 RB
= _mm_add_pd(RB
,R1
);
2434 RA
= _mm_mul_pd(RA
,zz2
);
2435 RB
= _mm_mul_pd(RB
,zz
);
2436 RA
= _mm_add_pd(RA
,R0
);
2437 RA
= _mm_add_pd(RA
,RB
);
2440 SB
= _mm_mul_pd(S3
,zz2
);
2441 SA
= _mm_add_pd(zz2
,S2
);
2442 SB
= _mm_add_pd(SB
,S1
);
2443 SA
= _mm_mul_pd(SA
,zz2
);
2444 SB
= _mm_mul_pd(SB
,zz
);
2445 SA
= _mm_add_pd(SA
,S0
);
2446 SA
= _mm_add_pd(SA
,SB
);
2449 PA
= _mm_mul_pd(P5
,ww2
);
2450 PB
= _mm_mul_pd(P4
,ww2
);
2451 PA
= _mm_add_pd(PA
,P3
);
2452 PB
= _mm_add_pd(PB
,P2
);
2453 PA
= _mm_mul_pd(PA
,ww2
);
2454 PB
= _mm_mul_pd(PB
,ww2
);
2455 PA
= _mm_add_pd(PA
,P1
);
2456 PB
= _mm_add_pd(PB
,P0
);
2457 PA
= _mm_mul_pd(PA
,ww
);
2458 PA
= _mm_add_pd(PA
,PB
);
2461 QB
= _mm_mul_pd(Q4
,ww2
);
2462 QA
= _mm_add_pd(ww2
,Q3
);
2463 QB
= _mm_add_pd(QB
,Q2
);
2464 QA
= _mm_mul_pd(QA
,ww2
);
2465 QB
= _mm_mul_pd(QB
,ww2
);
2466 QA
= _mm_add_pd(QA
,Q1
);
2467 QB
= _mm_add_pd(QB
,Q0
);
2468 QA
= _mm_mul_pd(QA
,ww
);
2469 QA
= _mm_add_pd(QA
,QB
);
2471 RA
= _mm_mul_pd(RA
,zz
);
2472 PA
= _mm_mul_pd(PA
,ww
);
2474 nom
= _mm_blendv_pd( PA
,RA
,mask
);
2475 denom
= _mm_blendv_pd( QA
,SA
,mask
);
2477 q
= _mm_mul_pd( nom
, gmx_mm_inv_pd(denom
) );
2479 zz
= _mm_add_pd(zz
,zz
);
2480 zz
= gmx_mm_sqrt_pd(zz
);
2481 z
= _mm_sub_pd(quarterpi
,zz
);
2482 zz
= _mm_mul_pd(zz
,q
);
2483 zz
= _mm_sub_pd(zz
,morebits
);
2484 z
= _mm_sub_pd(z
,zz
);
2485 z
= _mm_add_pd(z
,quarterpi
);
2487 w
= _mm_mul_pd(xabs
,q
);
2488 w
= _mm_add_pd(w
,xabs
);
2490 z
= _mm_blendv_pd( w
,z
,mask
);
2492 mask
= _mm_cmpgt_pd(xabs
,limit2
);
2493 z
= _mm_blendv_pd( xabs
,z
,mask
);
2495 z
= _mm_xor_pd(z
,sign
);
2502 gmx_mm256_acos_pd(__m256d x
)
2504 const __m256d signmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2505 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2506 const __m256d one
= _mm256_set1_pd(1.0);
2507 const __m256d half
= _mm256_set1_pd(0.5);
2508 const __m256d pi
= _mm256_set1_pd(M_PI
);
2509 const __m256d quarterpi0
= _mm256_set1_pd(7.85398163397448309616e-1);
2510 const __m256d quarterpi1
= _mm256_set1_pd(6.123233995736765886130e-17);
2517 mask1
= _mm256_cmp_pd(x
,half
,_CMP_GT_OQ
);
2518 z1
= _mm256_mul_pd(half
,_mm256_sub_pd(one
,x
));
2519 z1
= gmx_mm256_sqrt_pd(z1
);
2520 z
= _mm256_blendv_pd( x
,z1
,mask1
);
2522 z
= gmx_mm256_asin_pd(z
);
2524 z1
= _mm256_add_pd(z
,z
);
2526 z2
= _mm256_sub_pd(quarterpi0
,z
);
2527 z2
= _mm256_add_pd(z2
,quarterpi1
);
2528 z2
= _mm256_add_pd(z2
,quarterpi0
);
2530 z
= _mm256_blendv_pd(z2
,z1
,mask1
);
2536 gmx_mm_acos_pd(__m128d x
)
2538 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2539 const __m128d one
= _mm_set1_pd(1.0);
2540 const __m128d half
= _mm_set1_pd(0.5);
2541 const __m128d pi
= _mm_set1_pd(M_PI
);
2542 const __m128d quarterpi0
= _mm_set1_pd(7.85398163397448309616e-1);
2543 const __m128d quarterpi1
= _mm_set1_pd(6.123233995736765886130e-17);
2550 mask1
= _mm_cmpgt_pd(x
,half
);
2551 z1
= _mm_mul_pd(half
,_mm_sub_pd(one
,x
));
2552 z1
= gmx_mm_sqrt_pd(z1
);
2553 z
= _mm_blendv_pd( x
,z1
,mask1
);
2555 z
= gmx_mm_asin_pd(z
);
2557 z1
= _mm_add_pd(z
,z
);
2559 z2
= _mm_sub_pd(quarterpi0
,z
);
2560 z2
= _mm_add_pd(z2
,quarterpi1
);
2561 z2
= _mm_add_pd(z2
,quarterpi0
);
2563 z
= _mm_blendv_pd(z2
,z1
,mask1
);
2570 gmx_mm256_atan_pd(__m256d x
)
2572 /* Same algorithm as cephes library */
2573 const __m256d signmask
= _mm256_castsi256_pd( _mm256_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,
2574 0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2575 const __m256d limit1
= _mm256_set1_pd(0.66);
2576 const __m256d limit2
= _mm256_set1_pd(2.41421356237309504880);
2577 const __m256d quarterpi
= _mm256_set1_pd(M_PI
/4.0);
2578 const __m256d halfpi
= _mm256_set1_pd(M_PI
/2.0);
2579 const __m256d mone
= _mm256_set1_pd(-1.0);
2580 const __m256d morebits1
= _mm256_set1_pd(0.5*6.123233995736765886130E-17);
2581 const __m256d morebits2
= _mm256_set1_pd(6.123233995736765886130E-17);
2583 const __m256d P4
= _mm256_set1_pd(-8.750608600031904122785E-1);
2584 const __m256d P3
= _mm256_set1_pd(-1.615753718733365076637E1
);
2585 const __m256d P2
= _mm256_set1_pd(-7.500855792314704667340E1
);
2586 const __m256d P1
= _mm256_set1_pd(-1.228866684490136173410E2
);
2587 const __m256d P0
= _mm256_set1_pd(-6.485021904942025371773E1
);
2589 const __m256d Q4
= _mm256_set1_pd(2.485846490142306297962E1
);
2590 const __m256d Q3
= _mm256_set1_pd(1.650270098316988542046E2
);
2591 const __m256d Q2
= _mm256_set1_pd(4.328810604912902668951E2
);
2592 const __m256d Q1
= _mm256_set1_pd(4.853903996359136964868E2
);
2593 const __m256d Q0
= _mm256_set1_pd(1.945506571482613964425E2
);
2596 __m256d mask1
,mask2
;
2599 __m256d P_A
,P_B
,Q_A
,Q_B
;
2601 sign
= _mm256_andnot_pd(signmask
,x
);
2602 x
= _mm256_and_pd(x
,signmask
);
2604 mask1
= _mm256_cmp_pd(x
,limit1
,_CMP_GT_OQ
);
2605 mask2
= _mm256_cmp_pd(x
,limit2
,_CMP_GT_OQ
);
2607 t1
= _mm256_mul_pd(_mm256_add_pd(x
,mone
),gmx_mm256_inv_pd(_mm256_sub_pd(x
,mone
)));
2608 t2
= _mm256_mul_pd(mone
,gmx_mm256_inv_pd(x
));
2610 y
= _mm256_and_pd(mask1
,quarterpi
);
2611 y
= _mm256_or_pd( _mm256_and_pd(mask2
,halfpi
) , _mm256_andnot_pd(mask2
,y
) );
2613 x
= _mm256_or_pd( _mm256_and_pd(mask1
,t1
) , _mm256_andnot_pd(mask1
,x
) );
2614 x
= _mm256_or_pd( _mm256_and_pd(mask2
,t2
) , _mm256_andnot_pd(mask2
,x
) );
2616 z
= _mm256_mul_pd(x
,x
);
2617 z2
= _mm256_mul_pd(z
,z
);
2619 P_A
= _mm256_mul_pd(P4
,z2
);
2620 P_B
= _mm256_mul_pd(P3
,z2
);
2621 P_A
= _mm256_add_pd(P_A
,P2
);
2622 P_B
= _mm256_add_pd(P_B
,P1
);
2623 P_A
= _mm256_mul_pd(P_A
,z2
);
2624 P_B
= _mm256_mul_pd(P_B
,z
);
2625 P_A
= _mm256_add_pd(P_A
,P0
);
2626 P_A
= _mm256_add_pd(P_A
,P_B
);
2629 Q_B
= _mm256_mul_pd(Q4
,z2
);
2630 Q_A
= _mm256_add_pd(z2
,Q3
);
2631 Q_B
= _mm256_add_pd(Q_B
,Q2
);
2632 Q_A
= _mm256_mul_pd(Q_A
,z2
);
2633 Q_B
= _mm256_mul_pd(Q_B
,z2
);
2634 Q_A
= _mm256_add_pd(Q_A
,Q1
);
2635 Q_B
= _mm256_add_pd(Q_B
,Q0
);
2636 Q_A
= _mm256_mul_pd(Q_A
,z
);
2637 Q_A
= _mm256_add_pd(Q_A
,Q_B
);
2639 z
= _mm256_mul_pd(z
,P_A
);
2640 z
= _mm256_mul_pd(z
,gmx_mm256_inv_pd(Q_A
));
2641 z
= _mm256_mul_pd(z
,x
);
2642 z
= _mm256_add_pd(z
,x
);
2644 t1
= _mm256_and_pd(mask1
,morebits1
);
2645 t1
= _mm256_or_pd( _mm256_and_pd(mask2
,morebits2
) , _mm256_andnot_pd(mask2
,t1
) );
2647 z
= _mm256_add_pd(z
,t1
);
2648 y
= _mm256_add_pd(y
,z
);
2650 y
= _mm256_xor_pd(y
,sign
);
2656 gmx_mm_atan_pd(__m128d x
)
2658 /* Same algorithm as cephes library */
2659 const __m128d signmask
= gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
2660 const __m128d limit1
= _mm_set1_pd(0.66);
2661 const __m128d limit2
= _mm_set1_pd(2.41421356237309504880);
2662 const __m128d quarterpi
= _mm_set1_pd(M_PI
/4.0);
2663 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
2664 const __m128d mone
= _mm_set1_pd(-1.0);
2665 const __m128d morebits1
= _mm_set1_pd(0.5*6.123233995736765886130E-17);
2666 const __m128d morebits2
= _mm_set1_pd(6.123233995736765886130E-17);
2668 const __m128d P4
= _mm_set1_pd(-8.750608600031904122785E-1);
2669 const __m128d P3
= _mm_set1_pd(-1.615753718733365076637E1
);
2670 const __m128d P2
= _mm_set1_pd(-7.500855792314704667340E1
);
2671 const __m128d P1
= _mm_set1_pd(-1.228866684490136173410E2
);
2672 const __m128d P0
= _mm_set1_pd(-6.485021904942025371773E1
);
2674 const __m128d Q4
= _mm_set1_pd(2.485846490142306297962E1
);
2675 const __m128d Q3
= _mm_set1_pd(1.650270098316988542046E2
);
2676 const __m128d Q2
= _mm_set1_pd(4.328810604912902668951E2
);
2677 const __m128d Q1
= _mm_set1_pd(4.853903996359136964868E2
);
2678 const __m128d Q0
= _mm_set1_pd(1.945506571482613964425E2
);
2681 __m128d mask1
,mask2
;
2684 __m128d P_A
,P_B
,Q_A
,Q_B
;
2686 sign
= _mm_andnot_pd(signmask
,x
);
2687 x
= _mm_and_pd(x
,signmask
);
2689 mask1
= _mm_cmpgt_pd(x
,limit1
);
2690 mask2
= _mm_cmpgt_pd(x
,limit2
);
2692 t1
= _mm_mul_pd(_mm_add_pd(x
,mone
),gmx_mm_inv_pd(_mm_sub_pd(x
,mone
)));
2693 t2
= _mm_mul_pd(mone
,gmx_mm_inv_pd(x
));
2695 y
= _mm_and_pd(mask1
,quarterpi
);
2696 y
= _mm_or_pd( _mm_and_pd(mask2
,halfpi
) , _mm_andnot_pd(mask2
,y
) );
2698 x
= _mm_or_pd( _mm_and_pd(mask1
,t1
) , _mm_andnot_pd(mask1
,x
) );
2699 x
= _mm_or_pd( _mm_and_pd(mask2
,t2
) , _mm_andnot_pd(mask2
,x
) );
2701 z
= _mm_mul_pd(x
,x
);
2702 z2
= _mm_mul_pd(z
,z
);
2704 P_A
= _mm_mul_pd(P4
,z2
);
2705 P_B
= _mm_mul_pd(P3
,z2
);
2706 P_A
= _mm_add_pd(P_A
,P2
);
2707 P_B
= _mm_add_pd(P_B
,P1
);
2708 P_A
= _mm_mul_pd(P_A
,z2
);
2709 P_B
= _mm_mul_pd(P_B
,z
);
2710 P_A
= _mm_add_pd(P_A
,P0
);
2711 P_A
= _mm_add_pd(P_A
,P_B
);
2714 Q_B
= _mm_mul_pd(Q4
,z2
);
2715 Q_A
= _mm_add_pd(z2
,Q3
);
2716 Q_B
= _mm_add_pd(Q_B
,Q2
);
2717 Q_A
= _mm_mul_pd(Q_A
,z2
);
2718 Q_B
= _mm_mul_pd(Q_B
,z2
);
2719 Q_A
= _mm_add_pd(Q_A
,Q1
);
2720 Q_B
= _mm_add_pd(Q_B
,Q0
);
2721 Q_A
= _mm_mul_pd(Q_A
,z
);
2722 Q_A
= _mm_add_pd(Q_A
,Q_B
);
2724 z
= _mm_mul_pd(z
,P_A
);
2725 z
= _mm_mul_pd(z
,gmx_mm_inv_pd(Q_A
));
2726 z
= _mm_mul_pd(z
,x
);
2727 z
= _mm_add_pd(z
,x
);
2729 t1
= _mm_and_pd(mask1
,morebits1
);
2730 t1
= _mm_or_pd( _mm_and_pd(mask2
,morebits2
) , _mm_andnot_pd(mask2
,t1
) );
2732 z
= _mm_add_pd(z
,t1
);
2733 y
= _mm_add_pd(y
,z
);
2735 y
= _mm_xor_pd(y
,sign
);
2743 gmx_mm256_atan2_pd(__m256d y
, __m256d x
)
2745 const __m256d pi
= _mm256_set1_pd(M_PI
);
2746 const __m256d minuspi
= _mm256_set1_pd(-M_PI
);
2747 const __m256d halfpi
= _mm256_set1_pd(M_PI
/2.0);
2748 const __m256d minushalfpi
= _mm256_set1_pd(-M_PI
/2.0);
2752 __m256d maskx_lt
,maskx_eq
;
2753 __m256d masky_lt
,masky_eq
;
2754 __m256d mask1
,mask2
,mask3
,mask4
,maskall
;
2756 maskx_lt
= _mm256_cmp_pd(x
,_mm256_setzero_pd(),_CMP_LT_OQ
);
2757 masky_lt
= _mm256_cmp_pd(y
,_mm256_setzero_pd(),_CMP_LT_OQ
);
2758 maskx_eq
= _mm256_cmp_pd(x
,_mm256_setzero_pd(),_CMP_EQ_OQ
);
2759 masky_eq
= _mm256_cmp_pd(y
,_mm256_setzero_pd(),_CMP_EQ_OQ
);
2761 z
= _mm256_mul_pd(y
,gmx_mm256_inv_pd(x
));
2762 z
= gmx_mm256_atan_pd(z
);
2764 mask1
= _mm256_and_pd(maskx_eq
,masky_lt
);
2765 mask2
= _mm256_andnot_pd(maskx_lt
,masky_eq
);
2766 mask3
= _mm256_andnot_pd( _mm256_or_pd(masky_lt
,masky_eq
) , maskx_eq
);
2767 mask4
= _mm256_and_pd(masky_eq
,maskx_lt
);
2769 maskall
= _mm256_or_pd( _mm256_or_pd(mask1
,mask2
), _mm256_or_pd(mask3
,mask4
) );
2771 z
= _mm256_andnot_pd(maskall
,z
);
2772 z1
= _mm256_and_pd(mask1
,minushalfpi
);
2773 z3
= _mm256_and_pd(mask3
,halfpi
);
2774 z4
= _mm256_and_pd(mask4
,pi
);
2776 z
= _mm256_or_pd( _mm256_or_pd(z
,z1
), _mm256_or_pd(z3
,z4
) );
2778 w
= _mm256_blendv_pd(pi
,minuspi
,masky_lt
);
2779 w
= _mm256_and_pd(w
,maskx_lt
);
2781 w
= _mm256_andnot_pd(maskall
,w
);
2783 z
= _mm256_add_pd(z
,w
);
2789 gmx_mm_atan2_pd(__m128d y
, __m128d x
)
2791 const __m128d pi
= _mm_set1_pd(M_PI
);
2792 const __m128d minuspi
= _mm_set1_pd(-M_PI
);
2793 const __m128d halfpi
= _mm_set1_pd(M_PI
/2.0);
2794 const __m128d minushalfpi
= _mm_set1_pd(-M_PI
/2.0);
2798 __m128d maskx_lt
,maskx_eq
;
2799 __m128d masky_lt
,masky_eq
;
2800 __m128d mask1
,mask2
,mask3
,mask4
,maskall
;
2802 maskx_lt
= _mm_cmplt_pd(x
,_mm_setzero_pd());
2803 masky_lt
= _mm_cmplt_pd(y
,_mm_setzero_pd());
2804 maskx_eq
= _mm_cmpeq_pd(x
,_mm_setzero_pd());
2805 masky_eq
= _mm_cmpeq_pd(y
,_mm_setzero_pd());
2807 z
= _mm_mul_pd(y
,gmx_mm_inv_pd(x
));
2808 z
= gmx_mm_atan_pd(z
);
2810 mask1
= _mm_and_pd(maskx_eq
,masky_lt
);
2811 mask2
= _mm_andnot_pd(maskx_lt
,masky_eq
);
2812 mask3
= _mm_andnot_pd( _mm_or_pd(masky_lt
,masky_eq
) , maskx_eq
);
2813 mask4
= _mm_and_pd(masky_eq
,maskx_lt
);
2815 maskall
= _mm_or_pd( _mm_or_pd(mask1
,mask2
), _mm_or_pd(mask3
,mask4
) );
2817 z
= _mm_andnot_pd(maskall
,z
);
2818 z1
= _mm_and_pd(mask1
,minushalfpi
);
2819 z3
= _mm_and_pd(mask3
,halfpi
);
2820 z4
= _mm_and_pd(mask4
,pi
);
2822 z
= _mm_or_pd( _mm_or_pd(z
,z1
), _mm_or_pd(z3
,z4
) );
2824 w
= _mm_blendv_pd(pi
,minuspi
,masky_lt
);
2825 w
= _mm_and_pd(w
,maskx_lt
);
2827 w
= _mm_andnot_pd(maskall
,w
);
2829 z
= _mm_add_pd(z
,w
);
2834 #endif /* _gmx_math_x86_avx_256_double_h_ */