Split SIMD implementations into 4 files
[gromacs.git] / src / gromacs / simd / impl_ibm_vmx / impl_ibm_vmx_simd_float.h
blob6303618e46413886593ff4ecc25576edf48b0a61
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPLEMENTATION_IBM_VMX_SIMD_FLOAT_H
37 #define GMX_SIMD_IMPLEMENTATION_IBM_VMX_SIMD_FLOAT_H
39 #include <math.h>
41 #include <altivec.h>
43 #include "impl_ibm_vmx_common.h"
45 /* Make sure we do not screw up c++ - undefine vector/bool, and rely on __vector and __bool */
46 #undef vector
47 #undef bool
49 /****************************************************
50 * SINGLE PRECISION SIMD IMPLEMENTATION *
51 ****************************************************/
52 #define gmx_simd_float_t __vector float
53 #define gmx_simd_load_f(m) vec_ld(0, (const __vector float *)(m))
54 #define gmx_simd_load1_f(m) gmx_simd_load1_f_ibm_vmx(m)
55 #define gmx_simd_set1_f(x) gmx_simd_set1_f_ibm_vmx(x)
56 #define gmx_simd_store_f(m, x) vec_st(x, 0, (__vector float *)(m))
57 #undef gmx_simd_loadu_f
58 #undef gmx_simd_storeu_f
59 #define gmx_simd_setzero_f() ((__vector float)vec_splat_u32(0))
60 #define gmx_simd_add_f(a, b) vec_add(a, b)
61 #define gmx_simd_sub_f(a, b) vec_sub(a, b)
62 #define gmx_simd_mul_f(a, b) vec_mul(a, b)
63 #define gmx_simd_fmadd_f(a, b, c) vec_madd(a, b, c)
64 #define gmx_simd_fmsub_f(a, b, c) vec_sub(vec_mul(a, b), c)
65 /* IBM uses an alternative FMA definition, so -a*b+c=-(a*b-c) is "nmsub" */
66 #define gmx_simd_fnmadd_f(a, b, c) vec_nmsub(a, b, c)
67 /* IBM uses an alternative FMA definition, so -a*b-c=-(a*b+c) is "nmadd" */
68 #define gmx_simd_fnmsub_f(a, b, c) vec_sub(gmx_simd_setzero_f(), vec_madd(a, b, c))
69 #define gmx_simd_and_f(a, b) vec_and(a, b)
70 #define gmx_simd_andnot_f(a, b) vec_andc(b, a)
71 #define gmx_simd_or_f(a, b) vec_or(a, b)
72 #define gmx_simd_xor_f(a, b) vec_xor(a, b)
73 #define gmx_simd_rsqrt_f(a) vec_rsqrte(a)
74 #define gmx_simd_rcp_f(a) vec_re(a)
75 #define gmx_simd_fabs_f(a) vec_abs(a)
76 #define gmx_simd_fneg_f(a) vec_xor(a, (__vector float)vec_sl(vec_splat_u32(-1), vec_splat_u32(-1)))
77 #define gmx_simd_max_f(a, b) vec_max(a, b)
78 #define gmx_simd_min_f(a, b) vec_min(a, b)
79 #define gmx_simd_round_f(a) vec_round(a)
80 #define gmx_simd_trunc_f(a) vec_trunc(a)
81 #define gmx_simd_fraction_f(x) vec_sub(x, vec_trunc(x))
82 #define gmx_simd_get_exponent_f(a) gmx_simd_get_exponent_f_ibm_vmx(a)
83 #define gmx_simd_get_mantissa_f(a) gmx_simd_get_mantissa_f_ibm_vmx(a)
84 #define gmx_simd_set_exponent_f(a) gmx_simd_set_exponent_f_ibm_vmx(a)
85 /* integer datatype corresponding to float: gmx_simd_fint32_t */
86 #define gmx_simd_fint32_t __vector int
87 #define gmx_simd_load_fi(m) vec_ld(0, (const __vector int *)(m))
88 #define gmx_simd_set1_fi(i) gmx_simd_set1_fi_ibm_vmx((int)(i))
89 #define gmx_simd_store_fi(m, x) vec_st(x, 0, (__vector int *)(m))
90 #undef gmx_simd_loadu_fi
91 #undef gmx_simd_storeu_fi
92 #define gmx_simd_setzero_fi() vec_splat_s32(0)
93 #define gmx_simd_cvt_f2i(a) vec_cts(vec_round(a), 0)
94 #define gmx_simd_cvtt_f2i(a) vec_cts(a, 0)
95 #define gmx_simd_cvt_i2f(a) vec_ctf(a, 0)
96 #undef gmx_simd_extract_fi
97 /* Integer logical ops on gmx_simd_fint32_t */
98 /* The shift constant magic requires an explanation:
99 * VMX only allows literals up to 15 to be created directly with vec_splat_u32,
100 * and we need to be able to shift up to 31 bits. The code on the right hand
101 * side splits the constant in three parts with values in the range 0..15.
102 * Since the argument has to be a constant (but our and VMX requirement), these
103 * constants will be evaluated at compile-time, and if one or two parts evaluate
104 * to zero they will be removed with -O2 or higher optimization (checked).
106 #define gmx_simd_slli_fi(a, i) vec_sl(a, vec_add(vec_add(vec_splat_u32( (((i&0xF)+(i/16))&0xF)+i/31 ), vec_splat_u32( (i/16)*15 )), vec_splat_u32( (i/31)*15 )))
107 #define gmx_simd_srli_fi(a, i) vec_sr(a, vec_add(vec_add(vec_splat_u32( (((i&0xF)+(i/16))&0xF)+i/31 ), vec_splat_u32( (i/16)*15 )), vec_splat_u32( (i/31)*15 )))
108 #define gmx_simd_and_fi(a, b) vec_and(a, b)
109 #define gmx_simd_andnot_fi(a, b) vec_andc(b, a)
110 #define gmx_simd_or_fi(a, b) vec_or(a, b)
111 #define gmx_simd_xor_fi(a, b) vec_xor(a, b)
112 /* Integer arithmetic ops on gmx_simd_fint32_t */
113 #define gmx_simd_add_fi(a, b) vec_add(a, b)
114 #define gmx_simd_sub_fi(a, b) vec_sub(a, b)
115 #define gmx_simd_mul_fi(a, b) vec_mule((__vector short)a, (__vector short)b)
116 /* Boolean & comparison operations on gmx_simd_float_t */
117 #define gmx_simd_fbool_t __vector __bool int
118 #define gmx_simd_cmpeq_f(a, b) vec_cmpeq(a, b)
119 #define gmx_simd_cmplt_f(a, b) vec_cmplt(a, b)
120 #define gmx_simd_cmple_f(a, b) vec_cmple(a, b)
121 #define gmx_simd_and_fb(a, b) vec_and(a, b)
122 #define gmx_simd_or_fb(a, b) vec_or(a, b)
123 #define gmx_simd_anytrue_fb(a) vec_any_ne(a, (__vector __bool int)vec_splat_u32(0))
124 #define gmx_simd_blendzero_f(a, sel) vec_and(a, (__vector float)sel)
125 #define gmx_simd_blendnotzero_f(a, sel) vec_andc(a, (__vector float)sel)
126 #define gmx_simd_blendv_f(a, b, sel) vec_sel(a, b, sel)
127 #define gmx_simd_reduce_f(a) gmx_simd_reduce_f_ibm_vmx(a)
128 /* Boolean & comparison operations on gmx_simd_fint32_t */
129 #define gmx_simd_fibool_t __vector __bool int
130 #define gmx_simd_cmpeq_fi(a, b) vec_cmpeq(a, b)
131 #define gmx_simd_cmplt_fi(a, b) vec_cmplt(a, b)
132 #define gmx_simd_and_fib(a, b) vec_and(a, b)
133 #define gmx_simd_or_fib(a, b) vec_or(a, b)
134 #define gmx_simd_anytrue_fib(a) vec_any_ne(a, (__vector __bool int)vec_splat_u32(0))
135 #define gmx_simd_blendzero_fi(a, sel) vec_and(a, (__vector int)sel)
136 #define gmx_simd_blendnotzero_fi(a, sel) vec_andc(a, (__vector int)sel)
137 #define gmx_simd_blendv_fi(a, b, sel) vec_sel(a, b, sel)
138 /* Conversions between different booleans */
139 #define gmx_simd_cvt_fb2fib(x) (x)
140 #define gmx_simd_cvt_fib2fb(x) (x)
142 /* Double is not available with VMX SIMD */
144 /****************************************************
145 * IMPLEMENTATION HELPER FUNCTIONS *
146 ****************************************************/
147 static gmx_inline gmx_simd_float_t
148 gmx_simd_set1_f_ibm_vmx(const float x)
150 /* In the old days when PPC was all big endian we could
151 * use the vec_lvsl() instruction to permute bytes based on
152 * a load adress. However, at least with gcc-4.8.2 the bytes
153 * end up reversed on Power8 running little endian (Linux).
154 * Since this is not a critical instruction we work around
155 * it by first putting the data in an aligned position before
156 * loading, so we can avoid vec_lvsl() entirely. We can
157 * do this slightly faster on GCC with alignment attributes.
159 __vector float vx;
160 #ifdef __GNUC__
161 float alignedx __attribute ((aligned (16)));
162 alignedx = x;
163 vx = vec_lde(0, &alignedx);
164 #else
165 struct {
166 vector float vx; float x[4];
167 } conv;
168 conv.x[0] = x;
169 vx = vec_lde(0, conv.x);
170 #endif
171 return vec_splat(vx, 0);
174 static gmx_inline gmx_simd_float_t
175 gmx_simd_load1_f_ibm_vmx(const float * m)
177 return gmx_simd_set1_f_ibm_vmx(*m);
180 static gmx_inline gmx_simd_fint32_t
181 gmx_simd_set1_fi_ibm_vmx(const int x)
183 /* See comment in gmx_simd_set1_f_ibm_vmx why we
184 * cannot use vec_lvsl().
186 __vector int vx;
187 #ifdef __GNUC__
188 int alignedx __attribute ((aligned (16)));
189 alignedx = x;
190 vx = vec_lde(0, &alignedx);
191 #else
192 struct {
193 vector int vx; int x[4];
194 } conv;
195 conv.x[0] = x;
196 vx = vec_lde(0, conv.x);
197 #endif
198 return vec_splat(vx, 0);
202 static gmx_inline gmx_simd_float_t
203 gmx_simd_get_exponent_f_ibm_vmx(gmx_simd_float_t x)
205 /* Generate 0x7F800000 without memory operations */
206 gmx_simd_float_t expmask = (__vector float)gmx_simd_slli_fi(vec_add(vec_splat_s32(15), vec_sl(vec_splat_s32(15), vec_splat_u32(4))), 23);
207 gmx_simd_fint32_t i127 = vec_sub(vec_sl(vec_splat_s32(1), vec_splat_u32(7)), vec_splat_s32(1));
208 gmx_simd_fint32_t iexp;
210 iexp = (__vector int)gmx_simd_and_f(x, expmask);
211 iexp = vec_sub(gmx_simd_srli_fi(iexp, 23), i127);
212 return vec_ctf(iexp, 0);
215 static gmx_inline gmx_simd_float_t
216 gmx_simd_get_mantissa_f_ibm_vmx(gmx_simd_float_t x)
218 gmx_simd_float_t expmask = (__vector float)gmx_simd_slli_fi(vec_add(vec_splat_s32(15), vec_sl(vec_splat_s32(15), vec_splat_u32(4))), 23);
220 /* Get mantissa. By taking the absolute value (to get rid of the sign bit) we can
221 * use the same mask as for gmx_simd_get_exponent_f() (but complement it). Since
222 * these two routines are typically called together, this will save a few operations.
224 x = gmx_simd_andnot_f(expmask, vec_abs(x));
225 /* Reset zero (but correctly biased) exponent */
226 return gmx_simd_or_f(x, vec_ctf(vec_splat_s32(1), 0));
229 static gmx_inline gmx_simd_float_t
230 gmx_simd_set_exponent_f_ibm_vmx(gmx_simd_float_t x)
232 gmx_simd_fint32_t iexp = gmx_simd_cvt_f2i(x);
233 gmx_simd_fint32_t i127 = vec_sub(vec_sl(vec_splat_s32(1), vec_splat_u32(7)), vec_splat_s32(1));
235 iexp = gmx_simd_slli_fi(vec_add(iexp, i127), 23);
236 return (__vector float)iexp;
239 static gmx_inline float
240 gmx_simd_reduce_f_ibm_vmx(gmx_simd_float_t x)
242 float res;
243 x = vec_add(x, vec_sld(x, x, 8));
244 x = vec_add(x, vec_sld(x, x, 4));
245 vec_ste(x, 0, &res);
246 return res;
249 #endif /* GMX_SIMD_IMPLEMENTATION_IBM_VMX_SIMD_FLOAT_H */