1 /* GCC Quad-Precision Math Library
2 Copyright (C) 2010, 2011 Free Software Foundation, Inc.
3 Written by Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
5 This file is part of the libquadmath library.
6 Libquadmath is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 Libquadmath is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with libquadmath; see the file COPYING.LIB. If
18 not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor,
19 Boston, MA 02110-1301, USA. */
21 #ifndef QUADMATH_IMP_H
22 #define QUADMATH_IMP_H
36 /* Under IEEE 754, an architecture may determine tininess of
37 floating-point results either "before rounding" or "after
38 rounding", but must do so in the same way for all operations
39 returning binary results. Define TININESS_AFTER_ROUNDING to 1 for
40 "after rounding" architectures, 0 for "before rounding"
43 #define TININESS_AFTER_ROUNDING 1
45 #define HIGH_ORDER_BIT_IS_SET_FOR_SNAN 0
47 #define FIX_FLT128_LONG_CONVERT_OVERFLOW 0
48 #define FIX_FLT128_LLONG_CONVERT_OVERFLOW 0
50 /* Prototypes for internal functions. */
51 extern int32_t __quadmath_rem_pio2q (__float128
, __float128
*);
52 extern void __quadmath_kernel_sincosq (__float128
, __float128
, __float128
*,
54 extern __float128
__quadmath_kernel_sinq (__float128
, __float128
, int);
55 extern __float128
__quadmath_kernel_cosq (__float128
, __float128
);
56 extern __float128
__quadmath_kernel_tanq (__float128
, __float128
, int);
57 extern __float128
__quadmath_gamma_productq (__float128
, __float128
, int,
59 extern __float128
__quadmath_gammaq_r (__float128
, int *);
60 extern __float128
__quadmath_lgamma_negq (__float128
, int *);
61 extern __float128
__quadmath_lgamma_productq (__float128
, __float128
,
63 extern __float128
__quadmath_lgammaq_r (__float128
, int *);
64 extern __float128
__quadmath_x2y2m1q (__float128 x
, __float128 y
);
65 extern __complex128
__quadmath_kernel_casinhq (__complex128
, int);
68 mul_splitq (__float128
*hi
, __float128
*lo
, __float128 x
, __float128 y
)
70 /* Fast built-in fused multiply-add. */
72 *lo
= fmaq (x
, y
, -*hi
);
78 /* Frankly, if you have __float128, you have 64-bit integers, right? */
84 /* Main union type we use to manipulate the floating-point type. */
91 /* On mingw targets the ms-bitfields option is active by default.
92 Therefore enforce gnu-bitfield style. */
93 __attribute__ ((gcc_struct
))
96 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
99 unsigned mantissa0
:16;
100 unsigned mantissa1
:32;
101 unsigned mantissa2
:32;
102 unsigned mantissa3
:32;
104 unsigned mantissa3
:32;
105 unsigned mantissa2
:32;
106 unsigned mantissa1
:32;
107 unsigned mantissa0
:16;
108 unsigned exponent
:15;
115 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
126 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
141 /* Make sure we are using gnu-style bitfield handling. */
142 __attribute__ ((gcc_struct
))
145 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
147 unsigned exponent
:15;
148 unsigned quiet_nan
:1;
149 unsigned mantissa0
:15;
150 unsigned mantissa1
:32;
151 unsigned mantissa2
:32;
152 unsigned mantissa3
:32;
154 unsigned mantissa3
:32;
155 unsigned mantissa2
:32;
156 unsigned mantissa1
:32;
157 unsigned mantissa0
:15;
158 unsigned quiet_nan
:1;
159 unsigned exponent
:15;
167 /* Get two 64 bit ints from a long double. */
168 #define GET_FLT128_WORDS64(ix0,ix1,d) \
170 ieee854_float128 u; \
172 (ix0) = u.words64.high; \
173 (ix1) = u.words64.low; \
176 /* Set a long double from two 64 bit ints. */
177 #define SET_FLT128_WORDS64(d,ix0,ix1) \
179 ieee854_float128 u; \
180 u.words64.high = (ix0); \
181 u.words64.low = (ix1); \
185 /* Get the more significant 64 bits of a long double mantissa. */
186 #define GET_FLT128_MSW64(v,d) \
188 ieee854_float128 u; \
190 (v) = u.words64.high; \
193 /* Set the more significant 64 bits of a long double mantissa from an int. */
194 #define SET_FLT128_MSW64(d,v) \
196 ieee854_float128 u; \
198 u.words64.high = (v); \
202 /* Get the least significant 64 bits of a long double mantissa. */
203 #define GET_FLT128_LSW64(v,d) \
205 ieee854_float128 u; \
207 (v) = u.words64.low; \
211 #define IEEE854_FLOAT128_BIAS 0x3fff
214 #define QUADFP_INFINITE 1
215 #define QUADFP_ZERO 2
216 #define QUADFP_SUBNORMAL 3
217 #define QUADFP_NORMAL 4
218 #define fpclassifyq(x) \
219 __builtin_fpclassify (QUADFP_NAN, QUADFP_INFINITE, QUADFP_NORMAL, \
220 QUADFP_SUBNORMAL, QUADFP_ZERO, x)
222 #ifndef math_opt_barrier
223 # define math_opt_barrier(x) \
224 ({ __typeof (x) __x = (x); __asm ("" : "+m" (__x)); __x; })
225 # define math_force_eval(x) \
226 ({ __typeof (x) __x = (x); __asm __volatile__ ("" : : "m" (__x)); })
229 /* math_narrow_eval reduces its floating-point argument to the range
230 and precision of its semantic type. (The original evaluation may
231 still occur with excess range and precision, so the result may be
232 affected by double rounding.) */
233 #define math_narrow_eval(x) (x)
235 /* If X (which is not a NaN) is subnormal, force an underflow
237 #define math_check_force_underflow(x) \
240 __float128 force_underflow_tmp = (x); \
241 if (fabsq (force_underflow_tmp) < FLT128_MIN) \
243 __float128 force_underflow_tmp2 \
244 = force_underflow_tmp * force_underflow_tmp; \
245 math_force_eval (force_underflow_tmp2); \
249 /* Likewise, but X is also known to be nonnegative. */
250 #define math_check_force_underflow_nonneg(x) \
253 __float128 force_underflow_tmp = (x); \
254 if (force_underflow_tmp < FLT128_MIN) \
256 __float128 force_underflow_tmp2 \
257 = force_underflow_tmp * force_underflow_tmp; \
258 math_force_eval (force_underflow_tmp2); \
263 /* Likewise, for both real and imaginary parts of a complex
265 #define math_check_force_underflow_complex(x) \
268 __typeof (x) force_underflow_complex_tmp = (x); \
269 math_check_force_underflow (__real__ force_underflow_complex_tmp); \
270 math_check_force_underflow (__imag__ force_underflow_complex_tmp); \
275 # define feraiseexcept(arg) ((void) 0)
277 # define feholdexcept(arg) ((void) 0)
278 # define fesetround(arg) ((void) 0)
279 # define feupdateenv(arg) ((void) (arg))
280 # define fesetenv(arg) ((void) (arg))
281 # define fetestexcept(arg) 0
282 # define feclearexcept(arg) ((void) 0)
284 # ifndef HAVE_FEHOLDEXCEPT
285 # define feholdexcept(arg) ((void) 0)
287 # ifndef HAVE_FESETROUND
288 # define fesetround(arg) ((void) 0)
290 # ifndef HAVE_FEUPDATEENV
291 # define feupdateenv(arg) ((void) (arg))
293 # ifndef HAVE_FESETENV
294 # define fesetenv(arg) ((void) (arg))
296 # ifndef HAVE_FETESTEXCEPT
297 # define fetestexcept(arg) 0
301 #ifndef __glibc_likely
302 # define __glibc_likely(cond) __builtin_expect ((cond), 1)
305 #ifndef __glibc_unlikely
306 # define __glibc_unlikely(cond) __builtin_expect ((cond), 0)
309 #if defined HAVE_FENV_H && defined HAVE_FESETROUND && defined HAVE_FEUPDATEENV
316 # define SET_RESTORE_ROUNDF128(RM) \
317 struct rm_ctx ctx __attribute__((cleanup (libc_feresetround_ctx))); \
318 libc_feholdsetround_ctx (&ctx, (RM))
320 static inline __attribute__ ((always_inline
)) void
321 libc_feholdsetround_ctx (struct rm_ctx
*ctx
, int round
)
323 ctx
->updated_status
= false;
325 /* Update rounding mode only if different. */
326 if (__glibc_unlikely (round
!= fegetround ()))
328 ctx
->updated_status
= true;
329 fegetenv (&ctx
->env
);
334 static inline __attribute__ ((always_inline
)) void
335 libc_feresetround_ctx (struct rm_ctx
*ctx
)
337 /* Restore the rounding mode if updated. */
338 if (__glibc_unlikely (ctx
->updated_status
))
339 feupdateenv (&ctx
->env
);
342 # define SET_RESTORE_ROUNDF128(RM) ((void) 0)