Make sure frexp() returns correct for argument 0.0
[gromacs.git] / src / gromacs / simd / impl_x86_avx_256 / impl_x86_avx_256_simd_float.h
blob8bccb00094f4424d9909137a321d15bb53835ef8
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
5 * Copyright (c) 2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef GMX_SIMD_IMPL_X86_AVX_256_SIMD_FLOAT_H
38 #define GMX_SIMD_IMPL_X86_AVX_256_SIMD_FLOAT_H
40 #include "config.h"
42 #include <cassert>
43 #include <cstddef>
44 #include <cstdint>
46 #include <immintrin.h>
48 #include "gromacs/math/utilities.h"
50 namespace gmx
53 class SimdFloat
55 public:
56 MSVC_DIAGNOSTIC_IGNORE(26495) // simdInternal_ is not being initialized!
57 SimdFloat() {}
58 MSVC_DIAGNOSTIC_RESET
59 SimdFloat(float f) : simdInternal_(_mm256_set1_ps(f)) {}
61 // Internal utility constructor to simplify return statements
62 SimdFloat(__m256 simd) : simdInternal_(simd) {}
64 __m256 simdInternal_;
67 class SimdFInt32
69 public:
70 MSVC_DIAGNOSTIC_IGNORE(26495) // simdInternal_ is not being initialized!
71 SimdFInt32() {}
72 MSVC_DIAGNOSTIC_RESET
73 SimdFInt32(std::int32_t i) : simdInternal_(_mm256_set1_epi32(i)) {}
75 // Internal utility constructor to simplify return statements
76 SimdFInt32(__m256i simd) : simdInternal_(simd) {}
78 __m256i simdInternal_;
81 class SimdFBool
83 public:
84 MSVC_DIAGNOSTIC_IGNORE(26495) // simdInternal_ is not being initialized!
85 SimdFBool() {}
86 MSVC_DIAGNOSTIC_RESET
87 SimdFBool(bool b) : simdInternal_(_mm256_castsi256_ps(_mm256_set1_epi32(b ? 0xFFFFFFFF : 0))) {}
89 // Internal utility constructor to simplify return statements
90 SimdFBool(__m256 simd) : simdInternal_(simd) {}
92 __m256 simdInternal_;
95 static inline SimdFloat gmx_simdcall simdLoad(const float* m, SimdFloatTag /*unused*/ = {})
97 assert(std::size_t(m) % 32 == 0);
98 return { _mm256_load_ps(m) };
101 static inline void gmx_simdcall store(float* m, SimdFloat a)
103 assert(std::size_t(m) % 32 == 0);
104 _mm256_store_ps(m, a.simdInternal_);
107 static inline SimdFloat gmx_simdcall simdLoadU(const float* m, SimdFloatTag /*unused*/ = {})
109 return { _mm256_loadu_ps(m) };
112 static inline void gmx_simdcall storeU(float* m, SimdFloat a)
114 _mm256_storeu_ps(m, a.simdInternal_);
117 static inline SimdFloat gmx_simdcall setZeroF()
119 return { _mm256_setzero_ps() };
122 static inline SimdFInt32 gmx_simdcall simdLoad(const std::int32_t* m, SimdFInt32Tag /*unused*/)
124 assert(std::size_t(m) % 32 == 0);
125 return { _mm256_load_si256(reinterpret_cast<const __m256i*>(m)) };
128 static inline void gmx_simdcall store(std::int32_t* m, SimdFInt32 a)
130 assert(std::size_t(m) % 32 == 0);
131 _mm256_store_si256(reinterpret_cast<__m256i*>(m), a.simdInternal_);
134 static inline SimdFInt32 gmx_simdcall simdLoadU(const std::int32_t* m, SimdFInt32Tag /*unused*/)
136 return { _mm256_loadu_si256(reinterpret_cast<const __m256i*>(m)) };
139 static inline void gmx_simdcall storeU(std::int32_t* m, SimdFInt32 a)
141 _mm256_storeu_si256(reinterpret_cast<__m256i*>(m), a.simdInternal_);
144 static inline SimdFInt32 gmx_simdcall setZeroFI()
146 return { _mm256_setzero_si256() };
149 template<int index>
150 static inline std::int32_t gmx_simdcall extract(SimdFInt32 a)
152 return _mm_extract_epi32(_mm256_extractf128_si256(a.simdInternal_, index >> 2), index & 0x3);
155 static inline SimdFloat gmx_simdcall operator&(SimdFloat a, SimdFloat b)
157 return { _mm256_and_ps(a.simdInternal_, b.simdInternal_) };
160 static inline SimdFloat gmx_simdcall andNot(SimdFloat a, SimdFloat b)
162 return { _mm256_andnot_ps(a.simdInternal_, b.simdInternal_) };
165 static inline SimdFloat gmx_simdcall operator|(SimdFloat a, SimdFloat b)
167 return { _mm256_or_ps(a.simdInternal_, b.simdInternal_) };
170 static inline SimdFloat gmx_simdcall operator^(SimdFloat a, SimdFloat b)
172 return { _mm256_xor_ps(a.simdInternal_, b.simdInternal_) };
175 static inline SimdFloat gmx_simdcall operator+(SimdFloat a, SimdFloat b)
177 return { _mm256_add_ps(a.simdInternal_, b.simdInternal_) };
180 static inline SimdFloat gmx_simdcall operator-(SimdFloat a, SimdFloat b)
182 return { _mm256_sub_ps(a.simdInternal_, b.simdInternal_) };
185 static inline SimdFloat gmx_simdcall operator-(SimdFloat x)
187 return { _mm256_xor_ps(x.simdInternal_, _mm256_set1_ps(GMX_FLOAT_NEGZERO)) };
190 static inline SimdFloat gmx_simdcall operator*(SimdFloat a, SimdFloat b)
192 return { _mm256_mul_ps(a.simdInternal_, b.simdInternal_) };
195 // Override for AVX2 and higher
196 #if GMX_SIMD_X86_AVX_256
197 static inline SimdFloat gmx_simdcall fma(SimdFloat a, SimdFloat b, SimdFloat c)
199 return { _mm256_add_ps(_mm256_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_) };
202 static inline SimdFloat gmx_simdcall fms(SimdFloat a, SimdFloat b, SimdFloat c)
204 return { _mm256_sub_ps(_mm256_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_) };
207 static inline SimdFloat gmx_simdcall fnma(SimdFloat a, SimdFloat b, SimdFloat c)
209 return { _mm256_sub_ps(c.simdInternal_, _mm256_mul_ps(a.simdInternal_, b.simdInternal_)) };
212 static inline SimdFloat gmx_simdcall fnms(SimdFloat a, SimdFloat b, SimdFloat c)
214 return { _mm256_sub_ps(_mm256_setzero_ps(),
215 _mm256_add_ps(_mm256_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_)) };
217 #endif
219 static inline SimdFloat gmx_simdcall rsqrt(SimdFloat x)
221 return { _mm256_rsqrt_ps(x.simdInternal_) };
224 static inline SimdFloat gmx_simdcall rcp(SimdFloat x)
226 return { _mm256_rcp_ps(x.simdInternal_) };
229 static inline SimdFloat gmx_simdcall maskAdd(SimdFloat a, SimdFloat b, SimdFBool m)
231 return { _mm256_add_ps(a.simdInternal_, _mm256_and_ps(b.simdInternal_, m.simdInternal_)) };
234 static inline SimdFloat gmx_simdcall maskzMul(SimdFloat a, SimdFloat b, SimdFBool m)
236 return { _mm256_and_ps(_mm256_mul_ps(a.simdInternal_, b.simdInternal_), m.simdInternal_) };
239 static inline SimdFloat maskzFma(SimdFloat a, SimdFloat b, SimdFloat c, SimdFBool m)
241 return { _mm256_and_ps(_mm256_add_ps(_mm256_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_),
242 m.simdInternal_) };
245 static inline SimdFloat maskzRsqrt(SimdFloat x, SimdFBool m)
247 #ifndef NDEBUG
248 x.simdInternal_ = _mm256_blendv_ps(_mm256_set1_ps(1.0F), x.simdInternal_, m.simdInternal_);
249 #endif
250 return { _mm256_and_ps(_mm256_rsqrt_ps(x.simdInternal_), m.simdInternal_) };
253 static inline SimdFloat maskzRcp(SimdFloat x, SimdFBool m)
255 #ifndef NDEBUG
256 x.simdInternal_ = _mm256_blendv_ps(_mm256_set1_ps(1.0F), x.simdInternal_, m.simdInternal_);
257 #endif
258 return { _mm256_and_ps(_mm256_rcp_ps(x.simdInternal_), m.simdInternal_) };
261 static inline SimdFloat gmx_simdcall abs(SimdFloat x)
263 return { _mm256_andnot_ps(_mm256_set1_ps(GMX_FLOAT_NEGZERO), x.simdInternal_) };
266 static inline SimdFloat gmx_simdcall max(SimdFloat a, SimdFloat b)
268 return { _mm256_max_ps(a.simdInternal_, b.simdInternal_) };
271 static inline SimdFloat gmx_simdcall min(SimdFloat a, SimdFloat b)
273 return { _mm256_min_ps(a.simdInternal_, b.simdInternal_) };
276 static inline SimdFloat gmx_simdcall round(SimdFloat x)
278 return { _mm256_round_ps(x.simdInternal_, _MM_FROUND_NINT) };
281 static inline SimdFloat gmx_simdcall trunc(SimdFloat x)
283 return { _mm256_round_ps(x.simdInternal_, _MM_FROUND_TRUNC) };
286 // Override for AVX2 and higher
287 #if GMX_SIMD_X86_AVX_256
288 template<MathOptimization opt = MathOptimization::Safe>
289 static inline SimdFloat gmx_simdcall frexp(SimdFloat value, SimdFInt32* exponent)
291 const __m256 exponentMask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7F800000));
292 const __m256 mantissaMask = _mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF));
293 const __m256 half = _mm256_set1_ps(0.5);
294 const __m128i exponentBias = _mm_set1_epi32(126); // add 1 to make our definition identical to frexp()
296 __m256i iExponent = _mm256_castps_si256(_mm256_and_ps(value.simdInternal_, exponentMask));
297 __m128i iExponentHigh = _mm256_extractf128_si256(iExponent, 0x1);
298 __m128i iExponentLow = _mm256_castsi256_si128(iExponent);
299 iExponentLow = _mm_srli_epi32(iExponentLow, 23);
300 iExponentHigh = _mm_srli_epi32(iExponentHigh, 23);
301 iExponentLow = _mm_sub_epi32(iExponentLow, exponentBias);
302 iExponentHigh = _mm_sub_epi32(iExponentHigh, exponentBias);
303 iExponent = _mm256_castsi128_si256(iExponentLow);
304 iExponent = _mm256_insertf128_si256(iExponent, iExponentHigh, 0x1);
306 __m256 result = _mm256_or_ps(_mm256_and_ps(value.simdInternal_, mantissaMask), half);
308 if (opt == MathOptimization::Safe)
310 __m256 valueIsZero = _mm256_cmp_ps(_mm256_setzero_ps(), value.simdInternal_, _CMP_EQ_OQ);
311 // Set the upper/lower 64-bit-fields of "iExponent" to 0-bits if the corresponding input value was +-0.0
312 // ... but we need to do the actual andnot operation as float, since AVX1 does not support integer ops.
313 iExponent = _mm256_castps_si256(_mm256_andnot_ps(valueIsZero, _mm256_castsi256_ps(iExponent)));
315 // Set result to +-0 if the corresponding input value was +-0
316 result = _mm256_blendv_ps(result, value.simdInternal_, valueIsZero);
319 exponent->simdInternal_ = iExponent;
321 return { result };
324 template<MathOptimization opt = MathOptimization::Safe>
325 static inline SimdFloat gmx_simdcall ldexp(SimdFloat value, SimdFInt32 exponent)
327 const __m128i exponentBias = _mm_set1_epi32(127);
328 __m256i iExponent;
329 __m128i iExponentLow, iExponentHigh;
331 iExponentHigh = _mm256_extractf128_si256(exponent.simdInternal_, 0x1);
332 iExponentLow = _mm256_castsi256_si128(exponent.simdInternal_);
334 iExponentLow = _mm_add_epi32(iExponentLow, exponentBias);
335 iExponentHigh = _mm_add_epi32(iExponentHigh, exponentBias);
337 if (opt == MathOptimization::Safe)
339 // Make sure biased argument is not negative
340 iExponentLow = _mm_max_epi32(iExponentLow, _mm_setzero_si128());
341 iExponentHigh = _mm_max_epi32(iExponentHigh, _mm_setzero_si128());
344 iExponentLow = _mm_slli_epi32(iExponentLow, 23);
345 iExponentHigh = _mm_slli_epi32(iExponentHigh, 23);
346 iExponent = _mm256_castsi128_si256(iExponentLow);
347 iExponent = _mm256_insertf128_si256(iExponent, iExponentHigh, 0x1);
348 return { _mm256_mul_ps(value.simdInternal_, _mm256_castsi256_ps(iExponent)) };
350 #endif
352 static inline float gmx_simdcall reduce(SimdFloat a)
354 __m128 t0;
355 t0 = _mm_add_ps(_mm256_castps256_ps128(a.simdInternal_), _mm256_extractf128_ps(a.simdInternal_, 0x1));
356 t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
357 t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
358 return *reinterpret_cast<float*>(&t0);
361 static inline SimdFBool gmx_simdcall operator==(SimdFloat a, SimdFloat b)
363 return { _mm256_cmp_ps(a.simdInternal_, b.simdInternal_, _CMP_EQ_OQ) };
366 static inline SimdFBool gmx_simdcall operator!=(SimdFloat a, SimdFloat b)
368 return { _mm256_cmp_ps(a.simdInternal_, b.simdInternal_, _CMP_NEQ_OQ) };
371 static inline SimdFBool gmx_simdcall operator<(SimdFloat a, SimdFloat b)
373 return { _mm256_cmp_ps(a.simdInternal_, b.simdInternal_, _CMP_LT_OQ) };
376 static inline SimdFBool gmx_simdcall operator<=(SimdFloat a, SimdFloat b)
378 return { _mm256_cmp_ps(a.simdInternal_, b.simdInternal_, _CMP_LE_OQ) };
381 // Override for AVX2 and higher
382 #if GMX_SIMD_X86_AVX_256
383 static inline SimdFBool gmx_simdcall testBits(SimdFloat a)
385 __m256 tst = _mm256_cvtepi32_ps(_mm256_castps_si256(a.simdInternal_));
387 return { _mm256_cmp_ps(tst, _mm256_setzero_ps(), _CMP_NEQ_OQ) };
389 #endif
391 static inline SimdFBool gmx_simdcall operator&&(SimdFBool a, SimdFBool b)
393 return { _mm256_and_ps(a.simdInternal_, b.simdInternal_) };
396 static inline SimdFBool gmx_simdcall operator||(SimdFBool a, SimdFBool b)
398 return { _mm256_or_ps(a.simdInternal_, b.simdInternal_) };
401 static inline bool gmx_simdcall anyTrue(SimdFBool a)
403 return _mm256_movemask_ps(a.simdInternal_) != 0;
406 static inline SimdFloat gmx_simdcall selectByMask(SimdFloat a, SimdFBool mask)
408 return { _mm256_and_ps(a.simdInternal_, mask.simdInternal_) };
411 static inline SimdFloat gmx_simdcall selectByNotMask(SimdFloat a, SimdFBool mask)
413 return { _mm256_andnot_ps(mask.simdInternal_, a.simdInternal_) };
416 static inline SimdFloat gmx_simdcall blend(SimdFloat a, SimdFloat b, SimdFBool sel)
418 return { _mm256_blendv_ps(a.simdInternal_, b.simdInternal_, sel.simdInternal_) };
421 static inline SimdFInt32 gmx_simdcall cvtR2I(SimdFloat a)
423 return { _mm256_cvtps_epi32(a.simdInternal_) };
426 static inline SimdFInt32 gmx_simdcall cvttR2I(SimdFloat a)
428 return { _mm256_cvttps_epi32(a.simdInternal_) };
431 static inline SimdFloat gmx_simdcall cvtI2R(SimdFInt32 a)
433 return { _mm256_cvtepi32_ps(a.simdInternal_) };
436 } // namespace gmx
438 #endif // GMX_SIMD_IMPL_X86_AVX_256_SIMD_FLOAT_H