Make sure frexp() returns correct for argument 0.0
[gromacs.git] / src / gromacs / simd / impl_x86_sse2 / impl_x86_sse2_simd_float.h
blob7bc392c5e8eb9e7a577d44eb012a13e996051621
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef GMX_SIMD_IMPL_X86_SSE2_SIMD_FLOAT_H
36 #define GMX_SIMD_IMPL_X86_SSE2_SIMD_FLOAT_H
38 #include "config.h"
40 #include <cassert>
41 #include <cstddef>
42 #include <cstdint>
44 #include <emmintrin.h>
46 #include "gromacs/math/utilities.h"
48 namespace gmx
51 class SimdFloat
53 public:
54 SimdFloat() {}
56 SimdFloat(float f) : simdInternal_(_mm_set1_ps(f)) {}
58 // Internal utility constructor to simplify return statements
59 SimdFloat(__m128 simd) : simdInternal_(simd) {}
61 __m128 simdInternal_;
64 class SimdFInt32
66 public:
67 SimdFInt32() {}
69 SimdFInt32(std::int32_t i) : simdInternal_(_mm_set1_epi32(i)) {}
71 // Internal utility constructor to simplify return statements
72 SimdFInt32(__m128i simd) : simdInternal_(simd) {}
74 __m128i simdInternal_;
77 class SimdFBool
79 public:
80 SimdFBool() {}
82 SimdFBool(bool b) : simdInternal_(_mm_castsi128_ps(_mm_set1_epi32(b ? 0xFFFFFFFF : 0))) {}
84 // Internal utility constructor to simplify return statements
85 SimdFBool(__m128 simd) : simdInternal_(simd) {}
87 __m128 simdInternal_;
90 class SimdFIBool
92 public:
93 SimdFIBool() {}
95 SimdFIBool(bool b) : simdInternal_(_mm_set1_epi32(b ? 0xFFFFFFFF : 0)) {}
97 // Internal utility constructor to simplify return statements
98 SimdFIBool(__m128i simd) : simdInternal_(simd) {}
100 __m128i simdInternal_;
103 static inline SimdFloat gmx_simdcall simdLoad(const float* m, SimdFloatTag = {})
105 assert(std::size_t(m) % 16 == 0);
106 return { _mm_load_ps(m) };
109 static inline void gmx_simdcall store(float* m, SimdFloat a)
111 assert(std::size_t(m) % 16 == 0);
112 _mm_store_ps(m, a.simdInternal_);
115 static inline SimdFloat gmx_simdcall simdLoadU(const float* m, SimdFloatTag = {})
117 return { _mm_loadu_ps(m) };
120 static inline void gmx_simdcall storeU(float* m, SimdFloat a)
122 _mm_storeu_ps(m, a.simdInternal_);
125 static inline SimdFloat gmx_simdcall setZeroF()
127 return { _mm_setzero_ps() };
130 static inline SimdFInt32 gmx_simdcall simdLoad(const std::int32_t* m, SimdFInt32Tag)
132 assert(std::size_t(m) % 16 == 0);
133 return { _mm_load_si128(reinterpret_cast<const __m128i*>(m)) };
136 static inline void gmx_simdcall store(std::int32_t* m, SimdFInt32 a)
138 assert(std::size_t(m) % 16 == 0);
139 _mm_store_si128(reinterpret_cast<__m128i*>(m), a.simdInternal_);
142 static inline SimdFInt32 gmx_simdcall simdLoadU(const std::int32_t* m, SimdFInt32Tag)
144 return { _mm_loadu_si128(reinterpret_cast<const __m128i*>(m)) };
147 static inline void gmx_simdcall storeU(std::int32_t* m, SimdFInt32 a)
149 _mm_storeu_si128(reinterpret_cast<__m128i*>(m), a.simdInternal_);
152 static inline SimdFInt32 gmx_simdcall setZeroFI()
154 return { _mm_setzero_si128() };
158 // Override for SSE4.1 and higher
159 #if GMX_SIMD_X86_SSE2
160 template<int index>
161 static inline std::int32_t gmx_simdcall extract(SimdFInt32 a)
163 return _mm_cvtsi128_si32(_mm_srli_si128(a.simdInternal_, 4 * index));
165 #endif
167 static inline SimdFloat gmx_simdcall operator&(SimdFloat a, SimdFloat b)
169 return { _mm_and_ps(a.simdInternal_, b.simdInternal_) };
172 static inline SimdFloat gmx_simdcall andNot(SimdFloat a, SimdFloat b)
174 return { _mm_andnot_ps(a.simdInternal_, b.simdInternal_) };
177 static inline SimdFloat gmx_simdcall operator|(SimdFloat a, SimdFloat b)
179 return { _mm_or_ps(a.simdInternal_, b.simdInternal_) };
182 static inline SimdFloat gmx_simdcall operator^(SimdFloat a, SimdFloat b)
184 return { _mm_xor_ps(a.simdInternal_, b.simdInternal_) };
187 static inline SimdFloat gmx_simdcall operator+(SimdFloat a, SimdFloat b)
189 return { _mm_add_ps(a.simdInternal_, b.simdInternal_) };
192 static inline SimdFloat gmx_simdcall operator-(SimdFloat a, SimdFloat b)
194 return { _mm_sub_ps(a.simdInternal_, b.simdInternal_) };
197 static inline SimdFloat gmx_simdcall operator-(SimdFloat x)
199 return { _mm_xor_ps(x.simdInternal_, _mm_set1_ps(GMX_FLOAT_NEGZERO)) };
202 static inline SimdFloat gmx_simdcall operator*(SimdFloat a, SimdFloat b)
204 return { _mm_mul_ps(a.simdInternal_, b.simdInternal_) };
207 // Override for AVX-128-FMA and higher
208 #if GMX_SIMD_X86_SSE2 || GMX_SIMD_X86_SSE4_1
209 static inline SimdFloat gmx_simdcall fma(SimdFloat a, SimdFloat b, SimdFloat c)
211 return { _mm_add_ps(_mm_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_) };
214 static inline SimdFloat gmx_simdcall fms(SimdFloat a, SimdFloat b, SimdFloat c)
216 return { _mm_sub_ps(_mm_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_) };
219 static inline SimdFloat gmx_simdcall fnma(SimdFloat a, SimdFloat b, SimdFloat c)
221 return { _mm_sub_ps(c.simdInternal_, _mm_mul_ps(a.simdInternal_, b.simdInternal_)) };
224 static inline SimdFloat gmx_simdcall fnms(SimdFloat a, SimdFloat b, SimdFloat c)
226 return { _mm_sub_ps(_mm_setzero_ps(),
227 _mm_add_ps(_mm_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_)) };
229 #endif
231 static inline SimdFloat gmx_simdcall rsqrt(SimdFloat x)
233 return { _mm_rsqrt_ps(x.simdInternal_) };
236 static inline SimdFloat gmx_simdcall rcp(SimdFloat x)
238 return { _mm_rcp_ps(x.simdInternal_) };
241 static inline SimdFloat gmx_simdcall maskAdd(SimdFloat a, SimdFloat b, SimdFBool m)
243 return { _mm_add_ps(a.simdInternal_, _mm_and_ps(b.simdInternal_, m.simdInternal_)) };
246 static inline SimdFloat gmx_simdcall maskzMul(SimdFloat a, SimdFloat b, SimdFBool m)
248 return { _mm_and_ps(_mm_mul_ps(a.simdInternal_, b.simdInternal_), m.simdInternal_) };
251 static inline SimdFloat gmx_simdcall maskzFma(SimdFloat a, SimdFloat b, SimdFloat c, SimdFBool m)
253 return { _mm_and_ps(_mm_add_ps(_mm_mul_ps(a.simdInternal_, b.simdInternal_), c.simdInternal_),
254 m.simdInternal_) };
257 // Override for SSE4.1 and higher
258 #if GMX_SIMD_X86_SSE2
259 static inline SimdFloat gmx_simdcall maskzRsqrt(SimdFloat x, SimdFBool m)
261 # ifndef NDEBUG
262 x.simdInternal_ = _mm_or_ps(_mm_andnot_ps(m.simdInternal_, _mm_set1_ps(1.0F)),
263 _mm_and_ps(m.simdInternal_, x.simdInternal_));
264 # endif
265 return { _mm_and_ps(_mm_rsqrt_ps(x.simdInternal_), m.simdInternal_) };
268 static inline SimdFloat gmx_simdcall maskzRcp(SimdFloat x, SimdFBool m)
270 # ifndef NDEBUG
271 x.simdInternal_ = _mm_or_ps(_mm_andnot_ps(m.simdInternal_, _mm_set1_ps(1.0F)),
272 _mm_and_ps(m.simdInternal_, x.simdInternal_));
273 # endif
274 return { _mm_and_ps(_mm_rcp_ps(x.simdInternal_), m.simdInternal_) };
276 #endif
278 static inline SimdFloat gmx_simdcall abs(SimdFloat x)
280 return { _mm_andnot_ps(_mm_set1_ps(GMX_FLOAT_NEGZERO), x.simdInternal_) };
283 static inline SimdFloat gmx_simdcall max(SimdFloat a, SimdFloat b)
285 return { _mm_max_ps(a.simdInternal_, b.simdInternal_) };
288 static inline SimdFloat gmx_simdcall min(SimdFloat a, SimdFloat b)
290 return { _mm_min_ps(a.simdInternal_, b.simdInternal_) };
293 // Override for SSE4.1 and higher
294 #if GMX_SIMD_X86_SSE2
295 static inline SimdFloat gmx_simdcall round(SimdFloat x)
297 return { _mm_cvtepi32_ps(_mm_cvtps_epi32(x.simdInternal_)) };
300 static inline SimdFloat gmx_simdcall trunc(SimdFloat x)
302 return { _mm_cvtepi32_ps(_mm_cvttps_epi32(x.simdInternal_)) };
305 #endif
307 template<MathOptimization opt = MathOptimization::Safe>
308 static inline SimdFloat gmx_simdcall frexp(SimdFloat value, SimdFInt32* exponent)
310 const __m128 exponentMask = _mm_castsi128_ps(_mm_set1_epi32(0x7F800000));
311 const __m128 mantissaMask = _mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF));
312 const __m128i exponentBias = _mm_set1_epi32(126); // add 1 to make our definition identical to frexp()
313 const __m128 half = _mm_set1_ps(0.5F);
315 __m128i iExponent = _mm_castps_si128(_mm_and_ps(value.simdInternal_, exponentMask));
316 iExponent = _mm_sub_epi32(_mm_srli_epi32(iExponent, 23), exponentBias);
318 // Combine mantissa and exponent for result
319 __m128 result = _mm_or_ps(_mm_and_ps(value.simdInternal_, mantissaMask), half);
321 if (opt == MathOptimization::Safe)
323 __m128 valueIsZero = _mm_cmpeq_ps(_mm_setzero_ps(), value.simdInternal_);
324 // If value was non-zero, use the exponent we calculated, otherwise set return-value exponent to zero.
325 iExponent = _mm_andnot_si128(_mm_castps_si128(valueIsZero), iExponent);
326 // set the fraction result to zero for all elements where the input value was zero.
327 result = _mm_or_ps(_mm_andnot_ps(valueIsZero, result),
328 _mm_and_ps(valueIsZero, value.simdInternal_));
331 exponent->simdInternal_ = iExponent;
332 return { result };
335 // Override for SSE4.1
336 #if GMX_SIMD_X86_SSE2
337 template<MathOptimization opt = MathOptimization::Safe>
338 static inline SimdFloat gmx_simdcall ldexp(SimdFloat value, SimdFInt32 exponent)
340 const __m128i exponentBias = _mm_set1_epi32(127);
341 __m128i iExponent;
343 iExponent = _mm_add_epi32(exponent.simdInternal_, exponentBias);
345 if (opt == MathOptimization::Safe)
347 // Make sure biased argument is not negative
348 iExponent = _mm_and_si128(iExponent, _mm_cmpgt_epi32(iExponent, _mm_setzero_si128()));
351 iExponent = _mm_slli_epi32(iExponent, 23);
353 return { _mm_mul_ps(value.simdInternal_, _mm_castsi128_ps(iExponent)) };
355 #endif
357 // Override for AVX-128-FMA and higher
358 #if GMX_SIMD_X86_SSE2 || GMX_SIMD_X86_SSE4_1
359 static inline float gmx_simdcall reduce(SimdFloat a)
361 // Shuffle has latency 1/throughput 1, followed by add with latency 3, t-put 1.
362 // This is likely faster than using _mm_hadd_ps, which has latency 5, t-put 2.
363 a.simdInternal_ = _mm_add_ps(
364 a.simdInternal_, _mm_shuffle_ps(a.simdInternal_, a.simdInternal_, _MM_SHUFFLE(1, 0, 3, 2)));
365 a.simdInternal_ = _mm_add_ss(
366 a.simdInternal_, _mm_shuffle_ps(a.simdInternal_, a.simdInternal_, _MM_SHUFFLE(0, 3, 2, 1)));
367 return *reinterpret_cast<float*>(&a);
369 #endif
371 static inline SimdFBool gmx_simdcall operator==(SimdFloat a, SimdFloat b)
373 return { _mm_cmpeq_ps(a.simdInternal_, b.simdInternal_) };
376 static inline SimdFBool gmx_simdcall operator!=(SimdFloat a, SimdFloat b)
378 return { _mm_cmpneq_ps(a.simdInternal_, b.simdInternal_) };
381 static inline SimdFBool gmx_simdcall operator<(SimdFloat a, SimdFloat b)
383 return { _mm_cmplt_ps(a.simdInternal_, b.simdInternal_) };
386 static inline SimdFBool gmx_simdcall operator<=(SimdFloat a, SimdFloat b)
388 return { _mm_cmple_ps(a.simdInternal_, b.simdInternal_) };
391 static inline SimdFBool gmx_simdcall testBits(SimdFloat a)
393 __m128i ia = _mm_castps_si128(a.simdInternal_);
394 __m128i res = _mm_andnot_si128(_mm_cmpeq_epi32(ia, _mm_setzero_si128()), _mm_cmpeq_epi32(ia, ia));
396 return { _mm_castsi128_ps(res) };
399 static inline SimdFBool gmx_simdcall operator&&(SimdFBool a, SimdFBool b)
401 return { _mm_and_ps(a.simdInternal_, b.simdInternal_) };
404 static inline SimdFBool gmx_simdcall operator||(SimdFBool a, SimdFBool b)
406 return { _mm_or_ps(a.simdInternal_, b.simdInternal_) };
409 static inline bool gmx_simdcall anyTrue(SimdFBool a)
411 return _mm_movemask_ps(a.simdInternal_) != 0;
414 static inline SimdFloat gmx_simdcall selectByMask(SimdFloat a, SimdFBool mask)
416 return { _mm_and_ps(a.simdInternal_, mask.simdInternal_) };
419 static inline SimdFloat gmx_simdcall selectByNotMask(SimdFloat a, SimdFBool mask)
421 return { _mm_andnot_ps(mask.simdInternal_, a.simdInternal_) };
424 // Override for SSE4.1 and higher
425 #if GMX_SIMD_X86_SSE2
426 static inline SimdFloat gmx_simdcall blend(SimdFloat a, SimdFloat b, SimdFBool sel)
428 return { _mm_or_ps(_mm_andnot_ps(sel.simdInternal_, a.simdInternal_),
429 _mm_and_ps(sel.simdInternal_, b.simdInternal_)) };
431 #endif
433 static inline SimdFInt32 gmx_simdcall operator&(SimdFInt32 a, SimdFInt32 b)
435 return { _mm_and_si128(a.simdInternal_, b.simdInternal_) };
438 static inline SimdFInt32 gmx_simdcall andNot(SimdFInt32 a, SimdFInt32 b)
440 return { _mm_andnot_si128(a.simdInternal_, b.simdInternal_) };
443 static inline SimdFInt32 gmx_simdcall operator|(SimdFInt32 a, SimdFInt32 b)
445 return { _mm_or_si128(a.simdInternal_, b.simdInternal_) };
448 static inline SimdFInt32 gmx_simdcall operator^(SimdFInt32 a, SimdFInt32 b)
450 return { _mm_xor_si128(a.simdInternal_, b.simdInternal_) };
453 static inline SimdFInt32 gmx_simdcall operator+(SimdFInt32 a, SimdFInt32 b)
455 return { _mm_add_epi32(a.simdInternal_, b.simdInternal_) };
458 static inline SimdFInt32 gmx_simdcall operator-(SimdFInt32 a, SimdFInt32 b)
460 return { _mm_sub_epi32(a.simdInternal_, b.simdInternal_) };
463 // Override for SSE4.1 and higher
464 #if GMX_SIMD_X86_SSE2
465 static inline SimdFInt32 gmx_simdcall operator*(SimdFInt32 a, SimdFInt32 b)
467 __m128i a1 = _mm_srli_si128(a.simdInternal_, 4); // - a[3] a[2] a[1]
468 __m128i b1 = _mm_srli_si128(b.simdInternal_, 4); // - b[3] b[2] b[1]
469 __m128i c = _mm_mul_epu32(a.simdInternal_, b.simdInternal_);
470 __m128i c1 = _mm_mul_epu32(a1, b1);
472 c = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 1, 2, 0)); // - - a[2]*b[2] a[0]*b[0]
473 c1 = _mm_shuffle_epi32(c1, _MM_SHUFFLE(3, 1, 2, 0)); // - - a[3]*b[3] a[1]*b[1]
475 return { _mm_unpacklo_epi32(c, c1) };
477 #endif
479 static inline SimdFIBool gmx_simdcall operator==(SimdFInt32 a, SimdFInt32 b)
481 return { _mm_cmpeq_epi32(a.simdInternal_, b.simdInternal_) };
484 static inline SimdFIBool gmx_simdcall testBits(SimdFInt32 a)
486 __m128i x = a.simdInternal_;
487 __m128i res = _mm_andnot_si128(_mm_cmpeq_epi32(x, _mm_setzero_si128()), _mm_cmpeq_epi32(x, x));
489 return { res };
492 static inline SimdFIBool gmx_simdcall operator<(SimdFInt32 a, SimdFInt32 b)
494 return { _mm_cmplt_epi32(a.simdInternal_, b.simdInternal_) };
497 static inline SimdFIBool gmx_simdcall operator&&(SimdFIBool a, SimdFIBool b)
499 return { _mm_and_si128(a.simdInternal_, b.simdInternal_) };
502 static inline SimdFIBool gmx_simdcall operator||(SimdFIBool a, SimdFIBool b)
504 return { _mm_or_si128(a.simdInternal_, b.simdInternal_) };
507 static inline bool gmx_simdcall anyTrue(SimdFIBool a)
509 return _mm_movemask_epi8(a.simdInternal_) != 0;
512 static inline SimdFInt32 gmx_simdcall selectByMask(SimdFInt32 a, SimdFIBool mask)
514 return { _mm_and_si128(a.simdInternal_, mask.simdInternal_) };
517 static inline SimdFInt32 gmx_simdcall selectByNotMask(SimdFInt32 a, SimdFIBool mask)
519 return { _mm_andnot_si128(mask.simdInternal_, a.simdInternal_) };
522 // Override for SSE4.1 and higher
523 #if GMX_SIMD_X86_SSE2
524 static inline SimdFInt32 gmx_simdcall blend(SimdFInt32 a, SimdFInt32 b, SimdFIBool sel)
526 return { _mm_or_si128(_mm_andnot_si128(sel.simdInternal_, a.simdInternal_),
527 _mm_and_si128(sel.simdInternal_, b.simdInternal_)) };
529 #endif
531 static inline SimdFInt32 gmx_simdcall cvtR2I(SimdFloat a)
533 return { _mm_cvtps_epi32(a.simdInternal_) };
536 static inline SimdFInt32 gmx_simdcall cvttR2I(SimdFloat a)
538 return { _mm_cvttps_epi32(a.simdInternal_) };
541 static inline SimdFloat gmx_simdcall cvtI2R(SimdFInt32 a)
543 return { _mm_cvtepi32_ps(a.simdInternal_) };
546 static inline SimdFIBool gmx_simdcall cvtB2IB(SimdFBool a)
548 return { _mm_castps_si128(a.simdInternal_) };
551 static inline SimdFBool gmx_simdcall cvtIB2B(SimdFIBool a)
553 return { _mm_castsi128_ps(a.simdInternal_) };
556 } // namespace gmx
558 #endif // GMX_SIMD_IMPL_X86_SSE2_SIMD_FLOAT_H