Make sure frexp() returns correct for argument 0.0
[gromacs.git] / src / gromacs / simd / impl_x86_avx_512 / impl_x86_avx_512_simd_double.h
blob8dd20c2e6c898601d809c25d0b423c0146eb5215
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5 * Copyright (c) 2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef GMX_SIMD_IMPL_X86_AVX_512_SIMD_DOUBLE_H
38 #define GMX_SIMD_IMPL_X86_AVX_512_SIMD_DOUBLE_H
40 #include "config.h"
42 #include <cassert>
43 #include <cstdint>
45 #include <immintrin.h>
47 #include "gromacs/math/utilities.h"
48 #include "gromacs/utility/basedefinitions.h"
50 #include "impl_x86_avx_512_general.h"
51 #include "impl_x86_avx_512_simd_float.h"
53 namespace gmx
56 class SimdDouble
58 public:
59 SimdDouble() {}
61 SimdDouble(double d) : simdInternal_(_mm512_set1_pd(d)) {}
63 // Internal utility constructor to simplify return statements
64 SimdDouble(__m512d simd) : simdInternal_(simd) {}
66 __m512d simdInternal_;
69 class SimdDInt32
71 public:
72 SimdDInt32() {}
74 SimdDInt32(std::int32_t i) : simdInternal_(_mm256_set1_epi32(i)) {}
76 // Internal utility constructor to simplify return statements
77 SimdDInt32(__m256i simd) : simdInternal_(simd) {}
79 __m256i simdInternal_;
82 class SimdDBool
84 public:
85 SimdDBool() {}
87 // Internal utility constructor to simplify return statements
88 SimdDBool(__mmask8 simd) : simdInternal_(simd) {}
90 __mmask8 simdInternal_;
93 class SimdDIBool
95 public:
96 SimdDIBool() {}
98 // Internal utility constructor to simplify return statements
99 SimdDIBool(__mmask16 simd) : simdInternal_(simd) {}
101 __mmask16 simdInternal_;
104 static inline SimdDouble gmx_simdcall simdLoad(const double* m, SimdDoubleTag = {})
106 assert(std::size_t(m) % 64 == 0);
107 return { _mm512_load_pd(m) };
110 static inline void gmx_simdcall store(double* m, SimdDouble a)
112 assert(std::size_t(m) % 64 == 0);
113 _mm512_store_pd(m, a.simdInternal_);
116 static inline SimdDouble gmx_simdcall simdLoadU(const double* m, SimdDoubleTag = {})
118 return { _mm512_loadu_pd(m) };
121 static inline void gmx_simdcall storeU(double* m, SimdDouble a)
123 _mm512_storeu_pd(m, a.simdInternal_);
126 static inline SimdDouble gmx_simdcall setZeroD()
128 return { _mm512_setzero_pd() };
131 static inline SimdDInt32 gmx_simdcall simdLoad(const std::int32_t* m, SimdDInt32Tag)
133 assert(std::size_t(m) % 32 == 0);
134 return { _mm256_load_si256(reinterpret_cast<const __m256i*>(m)) };
137 static inline void gmx_simdcall store(std::int32_t* m, SimdDInt32 a)
139 assert(std::size_t(m) % 32 == 0);
140 _mm256_store_si256(reinterpret_cast<__m256i*>(m), a.simdInternal_);
143 static inline SimdDInt32 gmx_simdcall simdLoadU(const std::int32_t* m, SimdDInt32Tag)
145 return { _mm256_loadu_si256(reinterpret_cast<const __m256i*>(m)) };
148 static inline void gmx_simdcall storeU(std::int32_t* m, SimdDInt32 a)
150 _mm256_storeu_si256(reinterpret_cast<__m256i*>(m), a.simdInternal_);
153 static inline SimdDInt32 gmx_simdcall setZeroDI()
155 return { _mm256_setzero_si256() };
158 static inline SimdDouble gmx_simdcall operator&(SimdDouble a, SimdDouble b)
160 return { _mm512_castsi512_pd(_mm512_and_epi32(_mm512_castpd_si512(a.simdInternal_),
161 _mm512_castpd_si512(b.simdInternal_))) };
164 static inline SimdDouble gmx_simdcall andNot(SimdDouble a, SimdDouble b)
166 return { _mm512_castsi512_pd(_mm512_andnot_epi32(_mm512_castpd_si512(a.simdInternal_),
167 _mm512_castpd_si512(b.simdInternal_))) };
170 static inline SimdDouble gmx_simdcall operator|(SimdDouble a, SimdDouble b)
172 return { _mm512_castsi512_pd(_mm512_or_epi32(_mm512_castpd_si512(a.simdInternal_),
173 _mm512_castpd_si512(b.simdInternal_))) };
176 static inline SimdDouble gmx_simdcall operator^(SimdDouble a, SimdDouble b)
178 return { _mm512_castsi512_pd(_mm512_xor_epi32(_mm512_castpd_si512(a.simdInternal_),
179 _mm512_castpd_si512(b.simdInternal_))) };
182 static inline SimdDouble gmx_simdcall operator+(SimdDouble a, SimdDouble b)
184 return { _mm512_add_pd(a.simdInternal_, b.simdInternal_) };
187 static inline SimdDouble gmx_simdcall operator-(SimdDouble a, SimdDouble b)
189 return { _mm512_sub_pd(a.simdInternal_, b.simdInternal_) };
192 static inline SimdDouble gmx_simdcall operator-(SimdDouble x)
194 return { _mm512_castsi512_pd(_mm512_xor_epi32(_mm512_castpd_si512(x.simdInternal_),
195 _mm512_castpd_si512(_mm512_set1_pd(GMX_DOUBLE_NEGZERO)))) };
198 static inline SimdDouble gmx_simdcall operator*(SimdDouble a, SimdDouble b)
200 return { _mm512_mul_pd(a.simdInternal_, b.simdInternal_) };
203 static inline SimdDouble gmx_simdcall fma(SimdDouble a, SimdDouble b, SimdDouble c)
205 return { _mm512_fmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
208 static inline SimdDouble gmx_simdcall fms(SimdDouble a, SimdDouble b, SimdDouble c)
210 return { _mm512_fmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
213 static inline SimdDouble gmx_simdcall fnma(SimdDouble a, SimdDouble b, SimdDouble c)
215 return { _mm512_fnmadd_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
218 static inline SimdDouble gmx_simdcall fnms(SimdDouble a, SimdDouble b, SimdDouble c)
220 return { _mm512_fnmsub_pd(a.simdInternal_, b.simdInternal_, c.simdInternal_) };
223 // Override for AVX-512-KNL
224 #if GMX_SIMD_X86_AVX_512
225 static inline SimdDouble gmx_simdcall rsqrt(SimdDouble x)
227 return { _mm512_rsqrt14_pd(x.simdInternal_) };
230 static inline SimdDouble gmx_simdcall rcp(SimdDouble x)
232 return { _mm512_rcp14_pd(x.simdInternal_) };
234 #endif
236 static inline SimdDouble gmx_simdcall maskAdd(SimdDouble a, SimdDouble b, SimdDBool m)
238 return { _mm512_mask_add_pd(a.simdInternal_, m.simdInternal_, a.simdInternal_, b.simdInternal_) };
241 static inline SimdDouble gmx_simdcall maskzMul(SimdDouble a, SimdDouble b, SimdDBool m)
243 return { _mm512_maskz_mul_pd(m.simdInternal_, a.simdInternal_, b.simdInternal_) };
246 static inline SimdDouble gmx_simdcall maskzFma(SimdDouble a, SimdDouble b, SimdDouble c, SimdDBool m)
248 return { _mm512_maskz_fmadd_pd(m.simdInternal_, a.simdInternal_, b.simdInternal_, c.simdInternal_) };
251 // Override for AVX-512-KNL
252 #if GMX_SIMD_X86_AVX_512
253 static inline SimdDouble gmx_simdcall maskzRsqrt(SimdDouble x, SimdDBool m)
255 return { _mm512_maskz_rsqrt14_pd(m.simdInternal_, x.simdInternal_) };
258 static inline SimdDouble gmx_simdcall maskzRcp(SimdDouble x, SimdDBool m)
260 return { _mm512_maskz_rcp14_pd(m.simdInternal_, x.simdInternal_) };
262 #endif
264 static inline SimdDouble gmx_simdcall abs(SimdDouble x)
266 return { _mm512_castsi512_pd(_mm512_andnot_epi32(_mm512_castpd_si512(_mm512_set1_pd(GMX_DOUBLE_NEGZERO)),
267 _mm512_castpd_si512(x.simdInternal_))) };
270 static inline SimdDouble gmx_simdcall max(SimdDouble a, SimdDouble b)
272 return { _mm512_max_pd(a.simdInternal_, b.simdInternal_) };
275 static inline SimdDouble gmx_simdcall min(SimdDouble a, SimdDouble b)
277 return { _mm512_min_pd(a.simdInternal_, b.simdInternal_) };
280 static inline SimdDouble gmx_simdcall round(SimdDouble x)
282 return { _mm512_roundscale_pd(x.simdInternal_, 0) };
285 static inline SimdDouble gmx_simdcall trunc(SimdDouble x)
287 #if defined(__INTEL_COMPILER) || defined(__ECC)
288 return { _mm512_trunc_pd(x.simdInternal_) };
289 #else
290 return { _mm512_cvtepi32_pd(_mm512_cvttpd_epi32(x.simdInternal_)) };
291 #endif
294 template<MathOptimization opt = MathOptimization::Safe>
295 static inline SimdDouble frexp(SimdDouble value, SimdDInt32* exponent)
297 __m512d rExponent;
298 __m256i iExponent;
299 __m512d result;
301 if (opt == MathOptimization::Safe)
303 // For the safe branch, we use the masked operations to only assign results if the
304 // input value was nonzero, and otherwise set exponent to 0, and the fraction to the input (+-0).
305 __mmask8 valueIsNonZero =
306 _mm512_cmp_pd_mask(_mm512_setzero_pd(), value.simdInternal_, _CMP_NEQ_OQ);
307 rExponent = _mm512_maskz_getexp_pd(valueIsNonZero, value.simdInternal_);
309 // AVX512F does not contain any function to use masking when adding 1 to a 256-bit register
310 // (that comes with AVX512VL), so to work around this we create an integer -1 value, and
311 // use masking in the _conversion_ instruction where it is supported. When we later add
312 // 1 to all fields, the files that were formerly -1 (corresponding to zero exponent)
313 // will be assigned -1 + 1 = 0.
314 iExponent = _mm512_mask_cvtpd_epi32(_mm256_set1_epi32(-1), valueIsNonZero, rExponent);
315 iExponent = _mm256_add_epi32(iExponent, _mm256_set1_epi32(1));
317 // Set result to value (+-0) when it is zero.
318 result = _mm512_mask_getmant_pd(value.simdInternal_, valueIsNonZero, value.simdInternal_,
319 _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
321 else
323 // For the fast branch, it's the user's responsibility to make sure never to call the
324 // function with input values of +-0.0
325 rExponent = _mm512_getexp_pd(value.simdInternal_);
326 iExponent = _mm512_cvtpd_epi32(rExponent);
327 iExponent = _mm256_add_epi32(iExponent, _mm256_set1_epi32(1));
329 result = _mm512_getmant_pd(value.simdInternal_, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
332 exponent->simdInternal_ = iExponent;
333 return { result };
336 template<MathOptimization opt = MathOptimization::Safe>
337 static inline SimdDouble ldexp(SimdDouble value, SimdDInt32 exponent)
339 const __m256i exponentBias = _mm256_set1_epi32(1023);
340 __m256i iExponent = _mm256_add_epi32(exponent.simdInternal_, exponentBias);
341 __m512i iExponent512;
343 if (opt == MathOptimization::Safe)
345 // Make sure biased argument is not negative
346 iExponent = _mm256_max_epi32(iExponent, _mm256_setzero_si256());
349 iExponent512 =
350 _mm512_permutexvar_epi32(_mm512_set_epi32(7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0, 0),
351 _mm512_castsi256_si512(iExponent));
352 iExponent512 =
353 _mm512_mask_slli_epi32(_mm512_setzero_epi32(), avx512Int2Mask(0xAAAA), iExponent512, 20);
354 return _mm512_mul_pd(_mm512_castsi512_pd(iExponent512), value.simdInternal_);
357 static inline double gmx_simdcall reduce(SimdDouble a)
359 __m512d x = a.simdInternal_;
360 x = _mm512_add_pd(x, _mm512_shuffle_f64x2(x, x, 0xEE));
361 x = _mm512_add_pd(x, _mm512_shuffle_f64x2(x, x, 0x11));
362 x = _mm512_add_pd(x, _mm512_permute_pd(x, 0x01));
363 return *reinterpret_cast<double*>(&x);
366 static inline SimdDBool gmx_simdcall operator==(SimdDouble a, SimdDouble b)
368 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_EQ_OQ) };
371 static inline SimdDBool gmx_simdcall operator!=(SimdDouble a, SimdDouble b)
373 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_NEQ_OQ) };
376 static inline SimdDBool gmx_simdcall operator<(SimdDouble a, SimdDouble b)
378 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_LT_OQ) };
381 static inline SimdDBool gmx_simdcall operator<=(SimdDouble a, SimdDouble b)
383 return { _mm512_cmp_pd_mask(a.simdInternal_, b.simdInternal_, _CMP_LE_OQ) };
386 static inline SimdDBool gmx_simdcall testBits(SimdDouble a)
388 return { _mm512_test_epi64_mask(_mm512_castpd_si512(a.simdInternal_),
389 _mm512_castpd_si512(a.simdInternal_)) };
392 static inline SimdDBool gmx_simdcall operator&&(SimdDBool a, SimdDBool b)
394 return { static_cast<__mmask8>(_mm512_kand(a.simdInternal_, b.simdInternal_)) };
397 static inline SimdDBool gmx_simdcall operator||(SimdDBool a, SimdDBool b)
399 return { static_cast<__mmask8>(_mm512_kor(a.simdInternal_, b.simdInternal_)) };
402 static inline bool gmx_simdcall anyTrue(SimdDBool a)
404 return (avx512Mask2Int(a.simdInternal_) != 0);
407 static inline SimdDouble gmx_simdcall selectByMask(SimdDouble a, SimdDBool m)
409 return { _mm512_mask_mov_pd(_mm512_setzero_pd(), m.simdInternal_, a.simdInternal_) };
412 static inline SimdDouble gmx_simdcall selectByNotMask(SimdDouble a, SimdDBool m)
414 return { _mm512_mask_mov_pd(a.simdInternal_, m.simdInternal_, _mm512_setzero_pd()) };
417 static inline SimdDouble gmx_simdcall blend(SimdDouble a, SimdDouble b, SimdDBool sel)
419 return { _mm512_mask_blend_pd(sel.simdInternal_, a.simdInternal_, b.simdInternal_) };
422 static inline SimdDouble gmx_simdcall copysign(SimdDouble a, SimdDouble b)
424 return { _mm512_castsi512_pd(_mm512_ternarylogic_epi64(_mm512_castpd_si512(a.simdInternal_),
425 _mm512_castpd_si512(b.simdInternal_),
426 _mm512_set1_epi64(INT64_MIN), 0xD8)) };
429 static inline SimdDInt32 gmx_simdcall operator&(SimdDInt32 a, SimdDInt32 b)
431 return { _mm256_and_si256(a.simdInternal_, b.simdInternal_) };
434 static inline SimdDInt32 gmx_simdcall andNot(SimdDInt32 a, SimdDInt32 b)
436 return { _mm256_andnot_si256(a.simdInternal_, b.simdInternal_) };
439 static inline SimdDInt32 gmx_simdcall operator|(SimdDInt32 a, SimdDInt32 b)
441 return { _mm256_or_si256(a.simdInternal_, b.simdInternal_) };
444 static inline SimdDInt32 gmx_simdcall operator^(SimdDInt32 a, SimdDInt32 b)
446 return { _mm256_xor_si256(a.simdInternal_, b.simdInternal_) };
449 static inline SimdDInt32 gmx_simdcall operator+(SimdDInt32 a, SimdDInt32 b)
451 return { _mm256_add_epi32(a.simdInternal_, b.simdInternal_) };
454 static inline SimdDInt32 gmx_simdcall operator-(SimdDInt32 a, SimdDInt32 b)
456 return { _mm256_sub_epi32(a.simdInternal_, b.simdInternal_) };
459 static inline SimdDInt32 gmx_simdcall operator*(SimdDInt32 a, SimdDInt32 b)
461 return { _mm256_mullo_epi32(a.simdInternal_, b.simdInternal_) };
464 static inline SimdDIBool gmx_simdcall operator==(SimdDInt32 a, SimdDInt32 b)
466 return { _mm512_mask_cmp_epi32_mask(avx512Int2Mask(0xFF), _mm512_castsi256_si512(a.simdInternal_),
467 _mm512_castsi256_si512(b.simdInternal_), _MM_CMPINT_EQ) };
470 static inline SimdDIBool gmx_simdcall testBits(SimdDInt32 a)
472 return { _mm512_mask_test_epi32_mask(avx512Int2Mask(0xFF), _mm512_castsi256_si512(a.simdInternal_),
473 _mm512_castsi256_si512(a.simdInternal_)) };
476 static inline SimdDIBool gmx_simdcall operator<(SimdDInt32 a, SimdDInt32 b)
478 return { _mm512_mask_cmp_epi32_mask(avx512Int2Mask(0xFF), _mm512_castsi256_si512(a.simdInternal_),
479 _mm512_castsi256_si512(b.simdInternal_), _MM_CMPINT_LT) };
482 static inline SimdDIBool gmx_simdcall operator&&(SimdDIBool a, SimdDIBool b)
484 return { _mm512_kand(a.simdInternal_, b.simdInternal_) };
487 static inline SimdDIBool gmx_simdcall operator||(SimdDIBool a, SimdDIBool b)
489 return { _mm512_kor(a.simdInternal_, b.simdInternal_) };
492 static inline bool gmx_simdcall anyTrue(SimdDIBool a)
494 return (avx512Mask2Int(a.simdInternal_) & 0xFF) != 0;
497 static inline SimdDInt32 gmx_simdcall selectByMask(SimdDInt32 a, SimdDIBool m)
499 return { _mm512_castsi512_si256(_mm512_mask_mov_epi32(
500 _mm512_setzero_si512(), m.simdInternal_, _mm512_castsi256_si512(a.simdInternal_))) };
503 static inline SimdDInt32 gmx_simdcall selectByNotMask(SimdDInt32 a, SimdDIBool m)
505 return { _mm512_castsi512_si256(_mm512_mask_mov_epi32(
506 _mm512_castsi256_si512(a.simdInternal_), m.simdInternal_, _mm512_setzero_si512())) };
509 static inline SimdDInt32 gmx_simdcall blend(SimdDInt32 a, SimdDInt32 b, SimdDIBool sel)
511 return { _mm512_castsi512_si256(
512 _mm512_mask_blend_epi32(sel.simdInternal_, _mm512_castsi256_si512(a.simdInternal_),
513 _mm512_castsi256_si512(b.simdInternal_))) };
516 static inline SimdDInt32 gmx_simdcall cvtR2I(SimdDouble a)
518 return { _mm512_cvtpd_epi32(a.simdInternal_) };
521 static inline SimdDInt32 gmx_simdcall cvttR2I(SimdDouble a)
523 return { _mm512_cvttpd_epi32(a.simdInternal_) };
526 static inline SimdDouble gmx_simdcall cvtI2R(SimdDInt32 a)
528 return { _mm512_cvtepi32_pd(a.simdInternal_) };
531 static inline SimdDIBool gmx_simdcall cvtB2IB(SimdDBool a)
533 return { a.simdInternal_ };
536 static inline SimdDBool gmx_simdcall cvtIB2B(SimdDIBool a)
538 return { static_cast<__mmask8>(a.simdInternal_) };
541 static inline void gmx_simdcall cvtF2DD(SimdFloat f, SimdDouble* d0, SimdDouble* d1)
543 d0->simdInternal_ = _mm512_cvtps_pd(_mm512_castps512_ps256(f.simdInternal_));
544 d1->simdInternal_ = _mm512_cvtps_pd(
545 _mm512_castps512_ps256(_mm512_shuffle_f32x4(f.simdInternal_, f.simdInternal_, 0xEE)));
548 static inline SimdFloat gmx_simdcall cvtDD2F(SimdDouble d0, SimdDouble d1)
550 __m512 f0 = _mm512_castps256_ps512(_mm512_cvtpd_ps(d0.simdInternal_));
551 __m512 f1 = _mm512_castps256_ps512(_mm512_cvtpd_ps(d1.simdInternal_));
552 return { _mm512_shuffle_f32x4(f0, f1, 0x44) };
555 } // namespace gmx
557 #endif // GMX_SIMD_IMPL_X86_AVX_512_SIMD_DOUBLE_H