1 /* Constant folding for calls to built-in and internal functions.
2 Copyright (C) 1988-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
25 #include "stor-layout.h"
27 #include "fold-const.h"
28 #include "fold-const-call.h"
29 #include "case-cfn-macros.h"
30 #include "tm.h" /* For C[LT]Z_DEFINED_AT_ZERO. */
32 #include "gimple-expr.h"
33 #include "tree-vector-builder.h"
35 /* Functions that test for certain constant types, abstracting away the
36 decision about whether to check for overflow. */
39 integer_cst_p (tree t
)
41 return TREE_CODE (t
) == INTEGER_CST
&& !TREE_OVERFLOW (t
);
47 return TREE_CODE (t
) == REAL_CST
&& !TREE_OVERFLOW (t
);
51 complex_cst_p (tree t
)
53 return TREE_CODE (t
) == COMPLEX_CST
;
56 /* Return true if ARG is a size_type_node constant.
57 Store it in *SIZE_OUT if so. */
60 size_t_cst_p (tree t
, unsigned HOST_WIDE_INT
*size_out
)
62 if (types_compatible_p (size_type_node
, TREE_TYPE (t
))
64 && tree_fits_uhwi_p (t
))
66 *size_out
= tree_to_uhwi (t
);
72 /* RES is the result of a comparison in which < 0 means "less", 0 means
73 "equal" and > 0 means "more". Canonicalize it to -1, 0 or 1 and
74 return it in type TYPE. */
77 build_cmp_result (tree type
, int res
)
79 return build_int_cst (type
, res
< 0 ? -1 : res
> 0 ? 1 : 0);
82 /* M is the result of trying to constant-fold an expression (starting
83 with clear MPFR flags) and INEXACT says whether the result in M is
84 exact or inexact. Return true if M can be used as a constant-folded
85 result in format FORMAT, storing the value in *RESULT if so. */
88 do_mpfr_ckconv (real_value
*result
, mpfr_srcptr m
, bool inexact
,
89 const real_format
*format
)
91 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
92 overflow/underflow occurred. If -frounding-math, proceed iff the
93 result of calling FUNC was exact. */
94 if (!mpfr_number_p (m
)
96 || mpfr_underflow_p ()
97 || (flag_rounding_math
&& inexact
))
101 real_from_mpfr (&tmp
, m
, format
, MPFR_RNDN
);
103 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
104 If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
105 underflowed in the conversion. */
106 if (!real_isfinite (&tmp
)
107 || ((tmp
.cl
== rvc_zero
) != (mpfr_zero_p (m
) != 0)))
110 real_convert (result
, format
, &tmp
);
111 return real_identical (result
, &tmp
);
118 in format FORMAT, given that FUNC is the MPFR implementation of f.
119 Return true on success. */
122 do_mpfr_arg1 (real_value
*result
,
123 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_rnd_t
),
124 const real_value
*arg
, const real_format
*format
)
126 /* To proceed, MPFR must exactly represent the target floating point
127 format, which only happens when the target base equals two. */
128 if (format
->b
!= 2 || !real_isfinite (arg
))
131 int prec
= format
->p
;
132 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
135 mpfr_init2 (m
, prec
);
136 mpfr_from_real (m
, arg
, MPFR_RNDN
);
138 bool inexact
= func (m
, m
, rnd
);
139 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
147 *RESULT_SIN = sin (*ARG);
148 *RESULT_COS = cos (*ARG);
150 for format FORMAT. Return true on success. */
153 do_mpfr_sincos (real_value
*result_sin
, real_value
*result_cos
,
154 const real_value
*arg
, const real_format
*format
)
156 /* To proceed, MPFR must exactly represent the target floating point
157 format, which only happens when the target base equals two. */
158 if (format
->b
!= 2 || !real_isfinite (arg
))
161 int prec
= format
->p
;
162 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
165 mpfr_inits2 (prec
, m
, ms
, mc
, NULL
);
166 mpfr_from_real (m
, arg
, MPFR_RNDN
);
168 bool inexact
= mpfr_sin_cos (ms
, mc
, m
, rnd
);
169 bool ok
= (do_mpfr_ckconv (result_sin
, ms
, inexact
, format
)
170 && do_mpfr_ckconv (result_cos
, mc
, inexact
, format
));
171 mpfr_clears (m
, ms
, mc
, NULL
);
178 *RESULT = f (*ARG0, *ARG1)
180 in format FORMAT, given that FUNC is the MPFR implementation of f.
181 Return true on success. */
184 do_mpfr_arg2 (real_value
*result
,
185 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
, mpfr_rnd_t
),
186 const real_value
*arg0
, const real_value
*arg1
,
187 const real_format
*format
)
189 /* To proceed, MPFR must exactly represent the target floating point
190 format, which only happens when the target base equals two. */
191 if (format
->b
!= 2 || !real_isfinite (arg0
) || !real_isfinite (arg1
))
194 int prec
= format
->p
;
195 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
198 mpfr_inits2 (prec
, m0
, m1
, NULL
);
199 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
200 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
202 bool inexact
= func (m0
, m0
, m1
, rnd
);
203 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
204 mpfr_clears (m0
, m1
, NULL
);
211 *RESULT = f (ARG0, *ARG1)
213 in format FORMAT, given that FUNC is the MPFR implementation of f.
214 Return true on success. */
217 do_mpfr_arg2 (real_value
*result
,
218 int (*func
) (mpfr_ptr
, long, mpfr_srcptr
, mpfr_rnd_t
),
219 const wide_int_ref
&arg0
, const real_value
*arg1
,
220 const real_format
*format
)
222 if (format
->b
!= 2 || !real_isfinite (arg1
))
225 int prec
= format
->p
;
226 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
229 mpfr_init2 (m
, prec
);
230 mpfr_from_real (m
, arg1
, MPFR_RNDN
);
232 bool inexact
= func (m
, arg0
.to_shwi (), m
, rnd
);
233 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
241 *RESULT = f (*ARG0, *ARG1, *ARG2)
243 in format FORMAT, given that FUNC is the MPFR implementation of f.
244 Return true on success. */
247 do_mpfr_arg3 (real_value
*result
,
248 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
,
249 mpfr_srcptr
, mpfr_rnd_t
),
250 const real_value
*arg0
, const real_value
*arg1
,
251 const real_value
*arg2
, const real_format
*format
)
253 /* To proceed, MPFR must exactly represent the target floating point
254 format, which only happens when the target base equals two. */
256 || !real_isfinite (arg0
)
257 || !real_isfinite (arg1
)
258 || !real_isfinite (arg2
))
261 int prec
= format
->p
;
262 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
265 mpfr_inits2 (prec
, m0
, m1
, m2
, NULL
);
266 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
267 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
268 mpfr_from_real (m2
, arg2
, MPFR_RNDN
);
270 bool inexact
= func (m0
, m0
, m1
, m2
, rnd
);
271 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
272 mpfr_clears (m0
, m1
, m2
, NULL
);
277 /* M is the result of trying to constant-fold an expression (starting
278 with clear MPFR flags) and INEXACT says whether the result in M is
279 exact or inexact. Return true if M can be used as a constant-folded
280 result in which the real and imaginary parts have format FORMAT.
281 Store those parts in *RESULT_REAL and *RESULT_IMAG if so. */
284 do_mpc_ckconv (real_value
*result_real
, real_value
*result_imag
,
285 mpc_srcptr m
, bool inexact
, const real_format
*format
)
287 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
288 overflow/underflow occurred. If -frounding-math, proceed iff the
289 result of calling FUNC was exact. */
290 if (!mpfr_number_p (mpc_realref (m
))
291 || !mpfr_number_p (mpc_imagref (m
))
292 || mpfr_overflow_p ()
293 || mpfr_underflow_p ()
294 || (flag_rounding_math
&& inexact
))
297 REAL_VALUE_TYPE tmp_real
, tmp_imag
;
298 real_from_mpfr (&tmp_real
, mpc_realref (m
), format
, MPFR_RNDN
);
299 real_from_mpfr (&tmp_imag
, mpc_imagref (m
), format
, MPFR_RNDN
);
301 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
302 If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
303 underflowed in the conversion. */
304 if (!real_isfinite (&tmp_real
)
305 || !real_isfinite (&tmp_imag
)
306 || (tmp_real
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_realref (m
)) != 0)
307 || (tmp_imag
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_imagref (m
)) != 0))
310 real_convert (result_real
, format
, &tmp_real
);
311 real_convert (result_imag
, format
, &tmp_imag
);
313 return (real_identical (result_real
, &tmp_real
)
314 && real_identical (result_imag
, &tmp_imag
));
321 in format FORMAT, given that FUNC is the mpc implementation of f.
322 Return true on success. Both RESULT and ARG are represented as
323 real and imaginary pairs. */
326 do_mpc_arg1 (real_value
*result_real
, real_value
*result_imag
,
327 int (*func
) (mpc_ptr
, mpc_srcptr
, mpc_rnd_t
),
328 const real_value
*arg_real
, const real_value
*arg_imag
,
329 const real_format
*format
)
331 /* To proceed, MPFR must exactly represent the target floating point
332 format, which only happens when the target base equals two. */
334 || !real_isfinite (arg_real
)
335 || !real_isfinite (arg_imag
))
338 int prec
= format
->p
;
339 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
343 mpfr_from_real (mpc_realref (m
), arg_real
, MPFR_RNDN
);
344 mpfr_from_real (mpc_imagref (m
), arg_imag
, MPFR_RNDN
);
346 bool inexact
= func (m
, m
, crnd
);
347 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m
, inexact
, format
);
355 RESULT = f (ARG0, ARG1)
357 in format FORMAT, given that FUNC is the mpc implementation of f.
358 Return true on success. RESULT, ARG0 and ARG1 are represented as
359 real and imaginary pairs. */
362 do_mpc_arg2 (real_value
*result_real
, real_value
*result_imag
,
363 int (*func
)(mpc_ptr
, mpc_srcptr
, mpc_srcptr
, mpc_rnd_t
),
364 const real_value
*arg0_real
, const real_value
*arg0_imag
,
365 const real_value
*arg1_real
, const real_value
*arg1_imag
,
366 const real_format
*format
)
368 if (!real_isfinite (arg0_real
)
369 || !real_isfinite (arg0_imag
)
370 || !real_isfinite (arg1_real
)
371 || !real_isfinite (arg1_imag
))
374 int prec
= format
->p
;
375 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
378 mpc_init2 (m0
, prec
);
379 mpc_init2 (m1
, prec
);
380 mpfr_from_real (mpc_realref (m0
), arg0_real
, MPFR_RNDN
);
381 mpfr_from_real (mpc_imagref (m0
), arg0_imag
, MPFR_RNDN
);
382 mpfr_from_real (mpc_realref (m1
), arg1_real
, MPFR_RNDN
);
383 mpfr_from_real (mpc_imagref (m1
), arg1_imag
, MPFR_RNDN
);
385 bool inexact
= func (m0
, m0
, m1
, crnd
);
386 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m0
, inexact
, format
);
395 *RESULT = logb (*ARG)
397 in format FORMAT. Return true on success. */
400 fold_const_logb (real_value
*result
, const real_value
*arg
,
401 const real_format
*format
)
406 /* If arg is +-NaN, then return it. */
411 /* If arg is +-Inf, then return +Inf. */
417 /* Zero may set errno and/or raise an exception. */
421 /* For normal numbers, proceed iff radix == 2. In GCC,
422 normalized significands are in the range [0.5, 1.0). We
423 want the exponent as if they were [1.0, 2.0) so get the
424 exponent and subtract 1. */
427 real_from_integer (result
, format
, REAL_EXP (arg
) - 1, SIGNED
);
436 *RESULT = significand (*ARG)
438 in format FORMAT. Return true on success. */
441 fold_const_significand (real_value
*result
, const real_value
*arg
,
442 const real_format
*format
)
449 /* If arg is +-0, +-Inf or +-NaN, then return it. */
454 /* For normal numbers, proceed iff radix == 2. */
458 /* In GCC, normalized significands are in the range [0.5, 1.0).
459 We want them to be [1.0, 2.0) so set the exponent to 1. */
460 SET_REAL_EXP (result
, 1);
471 where FORMAT is the format of *ARG and PRECISION is the number of
472 significant bits in the result. Return true on success. */
475 fold_const_conversion (wide_int
*result
,
476 void (*fn
) (real_value
*, format_helper
,
478 const real_value
*arg
, unsigned int precision
,
479 const real_format
*format
)
481 if (!real_isfinite (arg
))
485 fn (&rounded
, format
, arg
);
488 *result
= real_to_integer (&rounded
, &fail
, precision
);
494 *RESULT = pow (*ARG0, *ARG1)
496 in format FORMAT. Return true on success. */
499 fold_const_pow (real_value
*result
, const real_value
*arg0
,
500 const real_value
*arg1
, const real_format
*format
)
502 if (do_mpfr_arg2 (result
, mpfr_pow
, arg0
, arg1
, format
))
505 /* Check for an integer exponent. */
506 REAL_VALUE_TYPE cint1
;
507 HOST_WIDE_INT n1
= real_to_integer (arg1
);
508 real_from_integer (&cint1
, VOIDmode
, n1
, SIGNED
);
509 /* Attempt to evaluate pow at compile-time, unless this should
510 raise an exception. */
511 if (real_identical (arg1
, &cint1
)
513 || (!flag_trapping_math
&& !flag_errno_math
)
514 || !real_equal (arg0
, &dconst0
)))
516 bool inexact
= real_powi (result
, format
, arg0
, n1
);
517 /* Avoid the folding if flag_signaling_nans is on. */
518 if (flag_unsafe_math_optimizations
520 && !(flag_signaling_nans
521 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))))
530 *RESULT = nextafter (*ARG0, *ARG1)
534 *RESULT = nexttoward (*ARG0, *ARG1)
536 in format FORMAT. Return true on success. */
539 fold_const_nextafter (real_value
*result
, const real_value
*arg0
,
540 const real_value
*arg1
, const real_format
*format
)
542 if (REAL_VALUE_ISSIGNALING_NAN (*arg0
)
543 || REAL_VALUE_ISSIGNALING_NAN (*arg1
))
546 /* Don't handle composite modes, nor decimal, nor modes without
547 inf or denorm at least for now. */
548 if (format
->pnan
< format
->p
551 || !format
->has_denorm
)
554 if (real_nextafter (result
, format
, arg0
, arg1
)
555 /* If raising underflow or overflow and setting errno to ERANGE,
556 fail if we care about those side-effects. */
557 && (flag_trapping_math
|| flag_errno_math
))
559 /* Similarly for nextafter (0, 1) raising underflow. */
560 else if (flag_trapping_math
561 && arg0
->cl
== rvc_zero
562 && result
->cl
!= rvc_zero
)
565 real_convert (result
, format
, result
);
572 *RESULT = ldexp (*ARG0, ARG1)
574 in format FORMAT. Return true on success. */
577 fold_const_builtin_load_exponent (real_value
*result
, const real_value
*arg0
,
578 const wide_int_ref
&arg1
,
579 const real_format
*format
)
581 /* Bound the maximum adjustment to twice the range of the
582 mode's valid exponents. Use abs to ensure the range is
583 positive as a sanity check. */
584 int max_exp_adj
= 2 * labs (format
->emax
- format
->emin
);
586 /* The requested adjustment must be inside this range. This
587 is a preliminary cap to avoid things like overflow, we
588 may still fail to compute the result for other reasons. */
589 if (wi::les_p (arg1
, -max_exp_adj
) || wi::ges_p (arg1
, max_exp_adj
))
592 /* Don't perform operation if we honor signaling NaNs and
593 operand is a signaling NaN. */
594 if (!flag_unsafe_math_optimizations
595 && flag_signaling_nans
596 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
599 REAL_VALUE_TYPE initial_result
;
600 real_ldexp (&initial_result
, arg0
, arg1
.to_shwi ());
602 /* Ensure we didn't overflow. */
603 if (real_isinf (&initial_result
))
606 /* Only proceed if the target mode can hold the
608 *result
= real_value_truncate (format
, initial_result
);
609 return real_equal (&initial_result
, result
);
612 /* Fold a call to __builtin_nan or __builtin_nans with argument ARG and
613 return type TYPE. QUIET is true if a quiet rather than signalling
617 fold_const_builtin_nan (tree type
, tree arg
, bool quiet
)
619 REAL_VALUE_TYPE real
;
620 const char *str
= c_getstr (arg
);
621 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
622 return build_real (type
, real
);
626 /* Fold a call to IFN_REDUC_<CODE> (ARG), returning a value of type TYPE. */
629 fold_const_reduction (tree type
, tree arg
, tree_code code
)
631 unsigned HOST_WIDE_INT nelts
;
632 if (TREE_CODE (arg
) != VECTOR_CST
633 || !VECTOR_CST_NELTS (arg
).is_constant (&nelts
))
636 tree res
= VECTOR_CST_ELT (arg
, 0);
637 for (unsigned HOST_WIDE_INT i
= 1; i
< nelts
; i
++)
639 res
= const_binop (code
, type
, res
, VECTOR_CST_ELT (arg
, i
));
640 if (res
== NULL_TREE
|| !CONSTANT_CLASS_P (res
))
646 /* Fold a call to IFN_VEC_CONVERT (ARG) returning TYPE. */
649 fold_const_vec_convert (tree ret_type
, tree arg
)
651 enum tree_code code
= NOP_EXPR
;
652 tree arg_type
= TREE_TYPE (arg
);
653 if (TREE_CODE (arg
) != VECTOR_CST
)
656 gcc_checking_assert (VECTOR_TYPE_P (ret_type
) && VECTOR_TYPE_P (arg_type
));
658 if (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
659 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg_type
)))
660 code
= FIX_TRUNC_EXPR
;
661 else if (INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
662 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (ret_type
)))
665 /* We can't handle steps directly when extending, since the
666 values need to wrap at the original precision first. */
668 = (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
669 && INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
670 && (TYPE_PRECISION (TREE_TYPE (ret_type
))
671 <= TYPE_PRECISION (TREE_TYPE (arg_type
))));
672 tree_vector_builder elts
;
673 if (!elts
.new_unary_operation (ret_type
, arg
, step_ok_p
))
676 unsigned int count
= elts
.encoded_nelts ();
677 for (unsigned int i
= 0; i
< count
; ++i
)
679 tree elt
= fold_unary (code
, TREE_TYPE (ret_type
),
680 VECTOR_CST_ELT (arg
, i
));
681 if (elt
== NULL_TREE
|| !CONSTANT_CLASS_P (elt
))
683 elts
.quick_push (elt
);
686 return elts
.build ();
691 IFN_WHILE_ULT (ARG0, ARG1, (TYPE) { ... })
693 Return the value on success and null on failure. */
696 fold_while_ult (tree type
, poly_uint64 arg0
, poly_uint64 arg1
)
698 if (known_ge (arg0
, arg1
))
699 return build_zero_cst (type
);
701 if (maybe_ge (arg0
, arg1
))
704 poly_uint64 diff
= arg1
- arg0
;
705 poly_uint64 nelts
= TYPE_VECTOR_SUBPARTS (type
);
706 if (known_ge (diff
, nelts
))
707 return build_all_ones_cst (type
);
709 unsigned HOST_WIDE_INT const_diff
;
710 if (known_le (diff
, nelts
) && diff
.is_constant (&const_diff
))
712 tree minus_one
= build_minus_one_cst (TREE_TYPE (type
));
713 tree zero
= build_zero_cst (TREE_TYPE (type
));
714 return build_vector_a_then_b (type
, const_diff
, minus_one
, zero
);
723 in format FORMAT. Return true on success. */
726 fold_const_call_ss (real_value
*result
, combined_fn fn
,
727 const real_value
*arg
, const real_format
*format
)
733 return (real_compare (GE_EXPR
, arg
, &dconst0
)
734 && do_mpfr_arg1 (result
, mpfr_sqrt
, arg
, format
));
737 return do_mpfr_arg1 (result
, mpfr_cbrt
, arg
, format
);
740 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
741 && real_compare (LE_EXPR
, arg
, &dconst1
)
742 && do_mpfr_arg1 (result
, mpfr_asin
, arg
, format
));
745 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
746 && real_compare (LE_EXPR
, arg
, &dconst1
)
747 && do_mpfr_arg1 (result
, mpfr_acos
, arg
, format
));
750 return do_mpfr_arg1 (result
, mpfr_atan
, arg
, format
);
753 return do_mpfr_arg1 (result
, mpfr_asinh
, arg
, format
);
756 return (real_compare (GE_EXPR
, arg
, &dconst1
)
757 && do_mpfr_arg1 (result
, mpfr_acosh
, arg
, format
));
760 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
761 && real_compare (LE_EXPR
, arg
, &dconst1
)
762 && do_mpfr_arg1 (result
, mpfr_atanh
, arg
, format
));
765 return do_mpfr_arg1 (result
, mpfr_sin
, arg
, format
);
768 return do_mpfr_arg1 (result
, mpfr_cos
, arg
, format
);
771 return do_mpfr_arg1 (result
, mpfr_tan
, arg
, format
);
774 return do_mpfr_arg1 (result
, mpfr_sinh
, arg
, format
);
777 return do_mpfr_arg1 (result
, mpfr_cosh
, arg
, format
);
780 return do_mpfr_arg1 (result
, mpfr_tanh
, arg
, format
);
783 return do_mpfr_arg1 (result
, mpfr_erf
, arg
, format
);
786 return do_mpfr_arg1 (result
, mpfr_erfc
, arg
, format
);
789 return do_mpfr_arg1 (result
, mpfr_gamma
, arg
, format
);
792 return do_mpfr_arg1 (result
, mpfr_exp
, arg
, format
);
795 return do_mpfr_arg1 (result
, mpfr_exp2
, arg
, format
);
799 return do_mpfr_arg1 (result
, mpfr_exp10
, arg
, format
);
802 return do_mpfr_arg1 (result
, mpfr_expm1
, arg
, format
);
805 return (real_compare (GT_EXPR
, arg
, &dconst0
)
806 && do_mpfr_arg1 (result
, mpfr_log
, arg
, format
));
809 return (real_compare (GT_EXPR
, arg
, &dconst0
)
810 && do_mpfr_arg1 (result
, mpfr_log2
, arg
, format
));
813 return (real_compare (GT_EXPR
, arg
, &dconst0
)
814 && do_mpfr_arg1 (result
, mpfr_log10
, arg
, format
));
817 return (real_compare (GT_EXPR
, arg
, &dconstm1
)
818 && do_mpfr_arg1 (result
, mpfr_log1p
, arg
, format
));
821 return do_mpfr_arg1 (result
, mpfr_j0
, arg
, format
);
824 return do_mpfr_arg1 (result
, mpfr_j1
, arg
, format
);
827 return (real_compare (GT_EXPR
, arg
, &dconst0
)
828 && do_mpfr_arg1 (result
, mpfr_y0
, arg
, format
));
831 return (real_compare (GT_EXPR
, arg
, &dconst0
)
832 && do_mpfr_arg1 (result
, mpfr_y1
, arg
, format
));
836 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
838 real_floor (result
, format
, arg
);
845 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
847 real_ceil (result
, format
, arg
);
854 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
856 real_trunc (result
, format
, arg
);
863 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
865 real_round (result
, format
, arg
);
871 CASE_CFN_ROUNDEVEN_FN
:
872 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
874 real_roundeven (result
, format
, arg
);
880 return fold_const_logb (result
, arg
, format
);
882 CASE_CFN_SIGNIFICAND
:
883 return fold_const_significand (result
, arg
, format
);
894 where FORMAT is the format of ARG and PRECISION is the number of
895 significant bits in the result. Return true on success. */
898 fold_const_call_ss (wide_int
*result
, combined_fn fn
,
899 const real_value
*arg
, unsigned int precision
,
900 const real_format
*format
)
905 if (real_isneg (arg
))
906 *result
= wi::one (precision
);
908 *result
= wi::zero (precision
);
912 /* For ilogb we don't know FP_ILOGB0, so only handle normal values.
913 Proceed iff radix == 2. In GCC, normalized significands are in
914 the range [0.5, 1.0). We want the exponent as if they were
915 [1.0, 2.0) so get the exponent and subtract 1. */
916 if (arg
->cl
== rvc_normal
&& format
->b
== 2)
918 *result
= wi::shwi (REAL_EXP (arg
) - 1, precision
);
926 return fold_const_conversion (result
, real_ceil
, arg
,
932 return fold_const_conversion (result
, real_floor
, arg
,
938 return fold_const_conversion (result
, real_round
, arg
,
944 /* Not yet folded to a constant. */
948 case CFN_BUILT_IN_FINITED32
:
949 case CFN_BUILT_IN_FINITED64
:
950 case CFN_BUILT_IN_FINITED128
:
951 case CFN_BUILT_IN_ISFINITE
:
952 *result
= wi::shwi (real_isfinite (arg
) ? 1 : 0, precision
);
955 case CFN_BUILT_IN_ISSIGNALING
:
956 *result
= wi::shwi (real_issignaling_nan (arg
) ? 1 : 0, precision
);
960 case CFN_BUILT_IN_ISINFD32
:
961 case CFN_BUILT_IN_ISINFD64
:
962 case CFN_BUILT_IN_ISINFD128
:
963 if (real_isinf (arg
))
964 *result
= wi::shwi (arg
->sign
? -1 : 1, precision
);
966 *result
= wi::shwi (0, precision
);
970 case CFN_BUILT_IN_ISNAND32
:
971 case CFN_BUILT_IN_ISNAND64
:
972 case CFN_BUILT_IN_ISNAND128
:
973 *result
= wi::shwi (real_isnan (arg
) ? 1 : 0, precision
);
985 where ARG_TYPE is the type of ARG and PRECISION is the number of bits
986 in the result. Return true on success. */
989 fold_const_call_ss (wide_int
*result
, combined_fn fn
, const wide_int_ref
&arg
,
990 unsigned int precision
, tree arg_type
)
995 *result
= wi::shwi (wi::ffs (arg
), precision
);
1001 if (wi::ne_p (arg
, 0))
1002 tmp
= wi::clz (arg
);
1003 else if (!CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1005 tmp
= TYPE_PRECISION (arg_type
);
1006 *result
= wi::shwi (tmp
, precision
);
1013 if (wi::ne_p (arg
, 0))
1014 tmp
= wi::ctz (arg
);
1015 else if (!CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1017 tmp
= TYPE_PRECISION (arg_type
);
1018 *result
= wi::shwi (tmp
, precision
);
1023 *result
= wi::shwi (wi::clrsb (arg
), precision
);
1027 *result
= wi::shwi (wi::popcount (arg
), precision
);
1031 *result
= wi::shwi (wi::parity (arg
), precision
);
1034 case CFN_BUILT_IN_BSWAP16
:
1035 case CFN_BUILT_IN_BSWAP32
:
1036 case CFN_BUILT_IN_BSWAP64
:
1037 case CFN_BUILT_IN_BSWAP128
:
1038 *result
= wide_int::from (arg
, precision
, TYPE_SIGN (arg_type
)).bswap ();
1050 where FORMAT is the format of ARG and of the real and imaginary parts
1051 of RESULT, passed as RESULT_REAL and RESULT_IMAG respectively. Return
1055 fold_const_call_cs (real_value
*result_real
, real_value
*result_imag
,
1056 combined_fn fn
, const real_value
*arg
,
1057 const real_format
*format
)
1062 /* cexpi(x+yi) = cos(x)+sin(y)*i. */
1063 return do_mpfr_sincos (result_imag
, result_real
, arg
, format
);
1074 where FORMAT is the format of RESULT and of the real and imaginary parts
1075 of ARG, passed as ARG_REAL and ARG_IMAG respectively. Return true on
1079 fold_const_call_sc (real_value
*result
, combined_fn fn
,
1080 const real_value
*arg_real
, const real_value
*arg_imag
,
1081 const real_format
*format
)
1086 return do_mpfr_arg2 (result
, mpfr_hypot
, arg_real
, arg_imag
, format
);
1097 where FORMAT is the format of the real and imaginary parts of RESULT
1098 (RESULT_REAL and RESULT_IMAG) and of ARG (ARG_REAL and ARG_IMAG).
1099 Return true on success. */
1102 fold_const_call_cc (real_value
*result_real
, real_value
*result_imag
,
1103 combined_fn fn
, const real_value
*arg_real
,
1104 const real_value
*arg_imag
, const real_format
*format
)
1109 return do_mpc_arg1 (result_real
, result_imag
, mpc_cos
,
1110 arg_real
, arg_imag
, format
);
1113 return do_mpc_arg1 (result_real
, result_imag
, mpc_cosh
,
1114 arg_real
, arg_imag
, format
);
1117 if (real_isinf (arg_real
) || real_isinf (arg_imag
))
1119 *result_real
= dconstinf
;
1120 *result_imag
= dconst0
;
1121 result_imag
->sign
= arg_imag
->sign
;
1125 *result_real
= *arg_real
;
1126 *result_imag
= *arg_imag
;
1131 return do_mpc_arg1 (result_real
, result_imag
, mpc_sin
,
1132 arg_real
, arg_imag
, format
);
1135 return do_mpc_arg1 (result_real
, result_imag
, mpc_sinh
,
1136 arg_real
, arg_imag
, format
);
1139 return do_mpc_arg1 (result_real
, result_imag
, mpc_tan
,
1140 arg_real
, arg_imag
, format
);
1143 return do_mpc_arg1 (result_real
, result_imag
, mpc_tanh
,
1144 arg_real
, arg_imag
, format
);
1147 return do_mpc_arg1 (result_real
, result_imag
, mpc_log
,
1148 arg_real
, arg_imag
, format
);
1151 return do_mpc_arg1 (result_real
, result_imag
, mpc_sqrt
,
1152 arg_real
, arg_imag
, format
);
1155 return do_mpc_arg1 (result_real
, result_imag
, mpc_asin
,
1156 arg_real
, arg_imag
, format
);
1159 return do_mpc_arg1 (result_real
, result_imag
, mpc_acos
,
1160 arg_real
, arg_imag
, format
);
1163 return do_mpc_arg1 (result_real
, result_imag
, mpc_atan
,
1164 arg_real
, arg_imag
, format
);
1167 return do_mpc_arg1 (result_real
, result_imag
, mpc_asinh
,
1168 arg_real
, arg_imag
, format
);
1171 return do_mpc_arg1 (result_real
, result_imag
, mpc_acosh
,
1172 arg_real
, arg_imag
, format
);
1175 return do_mpc_arg1 (result_real
, result_imag
, mpc_atanh
,
1176 arg_real
, arg_imag
, format
);
1179 return do_mpc_arg1 (result_real
, result_imag
, mpc_exp
,
1180 arg_real
, arg_imag
, format
);
1187 /* Subroutine of fold_const_call, with the same interface. Handle cases
1188 where the arguments and result are numerical. */
1191 fold_const_call_1 (combined_fn fn
, tree type
, tree arg
)
1193 machine_mode mode
= TYPE_MODE (type
);
1194 machine_mode arg_mode
= TYPE_MODE (TREE_TYPE (arg
));
1196 if (integer_cst_p (arg
))
1198 if (SCALAR_INT_MODE_P (mode
))
1201 if (fold_const_call_ss (&result
, fn
, wi::to_wide (arg
),
1202 TYPE_PRECISION (type
), TREE_TYPE (arg
)))
1203 return wide_int_to_tree (type
, result
);
1208 if (real_cst_p (arg
))
1210 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode
));
1211 if (mode
== arg_mode
)
1214 REAL_VALUE_TYPE result
;
1215 if (fold_const_call_ss (&result
, fn
, TREE_REAL_CST_PTR (arg
),
1216 REAL_MODE_FORMAT (mode
)))
1217 return build_real (type
, result
);
1219 else if (COMPLEX_MODE_P (mode
)
1220 && GET_MODE_INNER (mode
) == arg_mode
)
1222 /* real -> complex real. */
1223 REAL_VALUE_TYPE result_real
, result_imag
;
1224 if (fold_const_call_cs (&result_real
, &result_imag
, fn
,
1225 TREE_REAL_CST_PTR (arg
),
1226 REAL_MODE_FORMAT (arg_mode
)))
1227 return build_complex (type
,
1228 build_real (TREE_TYPE (type
), result_real
),
1229 build_real (TREE_TYPE (type
), result_imag
));
1231 else if (INTEGRAL_TYPE_P (type
))
1235 if (fold_const_call_ss (&result
, fn
,
1236 TREE_REAL_CST_PTR (arg
),
1237 TYPE_PRECISION (type
),
1238 REAL_MODE_FORMAT (arg_mode
)))
1239 return wide_int_to_tree (type
, result
);
1244 if (complex_cst_p (arg
))
1246 gcc_checking_assert (COMPLEX_MODE_P (arg_mode
));
1247 machine_mode inner_mode
= GET_MODE_INNER (arg_mode
);
1248 tree argr
= TREE_REALPART (arg
);
1249 tree argi
= TREE_IMAGPART (arg
);
1250 if (mode
== arg_mode
1251 && real_cst_p (argr
)
1252 && real_cst_p (argi
))
1254 /* complex real -> complex real. */
1255 REAL_VALUE_TYPE result_real
, result_imag
;
1256 if (fold_const_call_cc (&result_real
, &result_imag
, fn
,
1257 TREE_REAL_CST_PTR (argr
),
1258 TREE_REAL_CST_PTR (argi
),
1259 REAL_MODE_FORMAT (inner_mode
)))
1260 return build_complex (type
,
1261 build_real (TREE_TYPE (type
), result_real
),
1262 build_real (TREE_TYPE (type
), result_imag
));
1264 if (mode
== inner_mode
1265 && real_cst_p (argr
)
1266 && real_cst_p (argi
))
1268 /* complex real -> real. */
1269 REAL_VALUE_TYPE result
;
1270 if (fold_const_call_sc (&result
, fn
,
1271 TREE_REAL_CST_PTR (argr
),
1272 TREE_REAL_CST_PTR (argi
),
1273 REAL_MODE_FORMAT (inner_mode
)))
1274 return build_real (type
, result
);
1282 /* Try to fold FN (ARG) to a constant. Return the constant on success,
1283 otherwise return null. TYPE is the type of the return value. */
1286 fold_const_call (combined_fn fn
, tree type
, tree arg
)
1290 case CFN_BUILT_IN_STRLEN
:
1291 if (const char *str
= c_getstr (arg
))
1292 return build_int_cst (type
, strlen (str
));
1296 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NAN
):
1297 case CFN_BUILT_IN_NAND32
:
1298 case CFN_BUILT_IN_NAND64
:
1299 case CFN_BUILT_IN_NAND128
:
1300 return fold_const_builtin_nan (type
, arg
, true);
1303 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NANS
):
1304 case CFN_BUILT_IN_NANSD32
:
1305 case CFN_BUILT_IN_NANSD64
:
1306 case CFN_BUILT_IN_NANSD128
:
1307 return fold_const_builtin_nan (type
, arg
, false);
1309 case CFN_REDUC_PLUS
:
1310 return fold_const_reduction (type
, arg
, PLUS_EXPR
);
1313 return fold_const_reduction (type
, arg
, MAX_EXPR
);
1316 return fold_const_reduction (type
, arg
, MIN_EXPR
);
1319 return fold_const_reduction (type
, arg
, BIT_AND_EXPR
);
1322 return fold_const_reduction (type
, arg
, BIT_IOR_EXPR
);
1325 return fold_const_reduction (type
, arg
, BIT_XOR_EXPR
);
1327 case CFN_VEC_CONVERT
:
1328 return fold_const_vec_convert (type
, arg
);
1331 return fold_const_call_1 (fn
, type
, arg
);
1335 /* Fold a call to IFN_FOLD_LEFT_<CODE> (ARG0, ARG1), returning a value
1339 fold_const_fold_left (tree type
, tree arg0
, tree arg1
, tree_code code
)
1341 if (TREE_CODE (arg1
) != VECTOR_CST
)
1344 unsigned HOST_WIDE_INT nelts
;
1345 if (!VECTOR_CST_NELTS (arg1
).is_constant (&nelts
))
1348 for (unsigned HOST_WIDE_INT i
= 0; i
< nelts
; i
++)
1350 arg0
= const_binop (code
, type
, arg0
, VECTOR_CST_ELT (arg1
, i
));
1351 if (arg0
== NULL_TREE
|| !CONSTANT_CLASS_P (arg0
))
1359 *RESULT = FN (*ARG0, *ARG1)
1361 in format FORMAT. Return true on success. */
1364 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1365 const real_value
*arg0
, const real_value
*arg1
,
1366 const real_format
*format
)
1372 return do_mpfr_arg2 (result
, mpfr_remainder
, arg0
, arg1
, format
);
1375 return do_mpfr_arg2 (result
, mpfr_atan2
, arg0
, arg1
, format
);
1378 return do_mpfr_arg2 (result
, mpfr_dim
, arg0
, arg1
, format
);
1381 return do_mpfr_arg2 (result
, mpfr_fmod
, arg0
, arg1
, format
);
1384 return do_mpfr_arg2 (result
, mpfr_hypot
, arg0
, arg1
, format
);
1387 CASE_CFN_COPYSIGN_FN
:
1389 real_copysign (result
, arg1
);
1394 return do_mpfr_arg2 (result
, mpfr_min
, arg0
, arg1
, format
);
1398 return do_mpfr_arg2 (result
, mpfr_max
, arg0
, arg1
, format
);
1401 return fold_const_pow (result
, arg0
, arg1
, format
);
1404 CASE_CFN_NEXTTOWARD
:
1405 return fold_const_nextafter (result
, arg0
, arg1
, format
);
1414 *RESULT = FN (*ARG0, ARG1)
1416 where FORMAT is the format of *RESULT and *ARG0. Return true on
1420 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1421 const real_value
*arg0
, const wide_int_ref
&arg1
,
1422 const real_format
*format
)
1427 return fold_const_builtin_load_exponent (result
, arg0
, arg1
, format
);
1431 return (format
->b
== 2
1432 && fold_const_builtin_load_exponent (result
, arg0
, arg1
,
1436 /* Avoid the folding if flag_signaling_nans is on and
1437 operand is a signaling NaN. */
1438 if (!flag_unsafe_math_optimizations
1439 && flag_signaling_nans
1440 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
1443 real_powi (result
, format
, arg0
, arg1
.to_shwi ());
1453 *RESULT = FN (ARG0, *ARG1)
1455 where FORMAT is the format of *RESULT and *ARG1. Return true on
1459 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1460 const wide_int_ref
&arg0
, const real_value
*arg1
,
1461 const real_format
*format
)
1466 return do_mpfr_arg2 (result
, mpfr_jn
, arg0
, arg1
, format
);
1469 return (real_compare (GT_EXPR
, arg1
, &dconst0
)
1470 && do_mpfr_arg2 (result
, mpfr_yn
, arg0
, arg1
, format
));
1479 RESULT = fn (ARG0, ARG1)
1481 where FORMAT is the format of the real and imaginary parts of RESULT
1482 (RESULT_REAL and RESULT_IMAG), of ARG0 (ARG0_REAL and ARG0_IMAG)
1483 and of ARG1 (ARG1_REAL and ARG1_IMAG). Return true on success. */
1486 fold_const_call_ccc (real_value
*result_real
, real_value
*result_imag
,
1487 combined_fn fn
, const real_value
*arg0_real
,
1488 const real_value
*arg0_imag
, const real_value
*arg1_real
,
1489 const real_value
*arg1_imag
, const real_format
*format
)
1494 return do_mpc_arg2 (result_real
, result_imag
, mpc_pow
,
1495 arg0_real
, arg0_imag
, arg1_real
, arg1_imag
, format
);
1502 /* Subroutine of fold_const_call, with the same interface. Handle cases
1503 where the arguments and result are numerical. */
1506 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1508 machine_mode mode
= TYPE_MODE (type
);
1509 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1510 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1512 if (mode
== arg0_mode
1513 && real_cst_p (arg0
)
1514 && real_cst_p (arg1
))
1516 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1517 REAL_VALUE_TYPE result
;
1518 if (arg0_mode
== arg1_mode
)
1520 /* real, real -> real. */
1521 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1522 TREE_REAL_CST_PTR (arg1
),
1523 REAL_MODE_FORMAT (mode
)))
1524 return build_real (type
, result
);
1526 else if (arg1_mode
== TYPE_MODE (long_double_type_node
))
1529 CASE_CFN_NEXTTOWARD
:
1530 /* real, long double -> real. */
1531 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1532 TREE_REAL_CST_PTR (arg1
),
1533 REAL_MODE_FORMAT (mode
)))
1534 return build_real (type
, result
);
1542 if (real_cst_p (arg0
)
1543 && integer_cst_p (arg1
))
1545 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1546 if (mode
== arg0_mode
)
1548 /* real, int -> real. */
1549 REAL_VALUE_TYPE result
;
1550 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1552 REAL_MODE_FORMAT (mode
)))
1553 return build_real (type
, result
);
1558 if (integer_cst_p (arg0
)
1559 && real_cst_p (arg1
))
1561 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg1_mode
));
1562 if (mode
== arg1_mode
)
1564 /* int, real -> real. */
1565 REAL_VALUE_TYPE result
;
1566 if (fold_const_call_sss (&result
, fn
, wi::to_wide (arg0
),
1567 TREE_REAL_CST_PTR (arg1
),
1568 REAL_MODE_FORMAT (mode
)))
1569 return build_real (type
, result
);
1574 if (arg0_mode
== arg1_mode
1575 && complex_cst_p (arg0
)
1576 && complex_cst_p (arg1
))
1578 gcc_checking_assert (COMPLEX_MODE_P (arg0_mode
));
1579 machine_mode inner_mode
= GET_MODE_INNER (arg0_mode
);
1580 tree arg0r
= TREE_REALPART (arg0
);
1581 tree arg0i
= TREE_IMAGPART (arg0
);
1582 tree arg1r
= TREE_REALPART (arg1
);
1583 tree arg1i
= TREE_IMAGPART (arg1
);
1584 if (mode
== arg0_mode
1585 && real_cst_p (arg0r
)
1586 && real_cst_p (arg0i
)
1587 && real_cst_p (arg1r
)
1588 && real_cst_p (arg1i
))
1590 /* complex real, complex real -> complex real. */
1591 REAL_VALUE_TYPE result_real
, result_imag
;
1592 if (fold_const_call_ccc (&result_real
, &result_imag
, fn
,
1593 TREE_REAL_CST_PTR (arg0r
),
1594 TREE_REAL_CST_PTR (arg0i
),
1595 TREE_REAL_CST_PTR (arg1r
),
1596 TREE_REAL_CST_PTR (arg1i
),
1597 REAL_MODE_FORMAT (inner_mode
)))
1598 return build_complex (type
,
1599 build_real (TREE_TYPE (type
), result_real
),
1600 build_real (TREE_TYPE (type
), result_imag
));
1608 /* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
1609 otherwise return null. TYPE is the type of the return value. */
1612 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1614 const char *p0
, *p1
;
1618 case CFN_BUILT_IN_STRSPN
:
1619 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1620 return build_int_cst (type
, strspn (p0
, p1
));
1623 case CFN_BUILT_IN_STRCSPN
:
1624 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1625 return build_int_cst (type
, strcspn (p0
, p1
));
1628 case CFN_BUILT_IN_STRCMP
:
1629 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1630 return build_cmp_result (type
, strcmp (p0
, p1
));
1633 case CFN_BUILT_IN_STRCASECMP
:
1634 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1636 int r
= strcmp (p0
, p1
);
1638 return build_cmp_result (type
, r
);
1642 case CFN_BUILT_IN_INDEX
:
1643 case CFN_BUILT_IN_STRCHR
:
1644 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1646 const char *r
= strchr (p0
, c
);
1648 return build_int_cst (type
, 0);
1649 return fold_convert (type
,
1650 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1654 case CFN_BUILT_IN_RINDEX
:
1655 case CFN_BUILT_IN_STRRCHR
:
1656 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1658 const char *r
= strrchr (p0
, c
);
1660 return build_int_cst (type
, 0);
1661 return fold_convert (type
,
1662 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1666 case CFN_BUILT_IN_STRSTR
:
1667 if ((p1
= c_getstr (arg1
)))
1669 if ((p0
= c_getstr (arg0
)))
1671 const char *r
= strstr (p0
, p1
);
1673 return build_int_cst (type
, 0);
1674 return fold_convert (type
,
1675 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1678 return fold_convert (type
, arg0
);
1682 case CFN_FOLD_LEFT_PLUS
:
1683 return fold_const_fold_left (type
, arg0
, arg1
, PLUS_EXPR
);
1686 return fold_const_call_1 (fn
, type
, arg0
, arg1
);
1692 *RESULT = FN (*ARG0, *ARG1, *ARG2)
1694 in format FORMAT. Return true on success. */
1697 fold_const_call_ssss (real_value
*result
, combined_fn fn
,
1698 const real_value
*arg0
, const real_value
*arg1
,
1699 const real_value
*arg2
, const real_format
*format
)
1705 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, arg2
, format
);
1709 real_value new_arg2
= real_value_negate (arg2
);
1710 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, &new_arg2
, format
);
1715 real_value new_arg0
= real_value_negate (arg0
);
1716 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
, arg2
, format
);
1721 real_value new_arg0
= real_value_negate (arg0
);
1722 real_value new_arg2
= real_value_negate (arg2
);
1723 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
,
1732 /* Subroutine of fold_const_call, with the same interface. Handle cases
1733 where the arguments and result are numerical. */
1736 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1738 machine_mode mode
= TYPE_MODE (type
);
1739 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1740 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1741 machine_mode arg2_mode
= TYPE_MODE (TREE_TYPE (arg2
));
1743 if (arg0_mode
== arg1_mode
1744 && arg0_mode
== arg2_mode
1745 && real_cst_p (arg0
)
1746 && real_cst_p (arg1
)
1747 && real_cst_p (arg2
))
1749 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1750 if (mode
== arg0_mode
)
1752 /* real, real, real -> real. */
1753 REAL_VALUE_TYPE result
;
1754 if (fold_const_call_ssss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1755 TREE_REAL_CST_PTR (arg1
),
1756 TREE_REAL_CST_PTR (arg2
),
1757 REAL_MODE_FORMAT (mode
)))
1758 return build_real (type
, result
);
1766 /* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
1767 success, otherwise return null. TYPE is the type of the return value. */
1770 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1772 const char *p0
, *p1
;
1774 unsigned HOST_WIDE_INT s0
, s1
, s2
= 0;
1777 case CFN_BUILT_IN_STRNCMP
:
1778 if (!size_t_cst_p (arg2
, &s2
))
1781 && !TREE_SIDE_EFFECTS (arg0
)
1782 && !TREE_SIDE_EFFECTS (arg1
))
1783 return build_int_cst (type
, 0);
1784 else if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1785 return build_int_cst (type
, strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)));
1788 case CFN_BUILT_IN_STRNCASECMP
:
1789 if (!size_t_cst_p (arg2
, &s2
))
1792 && !TREE_SIDE_EFFECTS (arg0
)
1793 && !TREE_SIDE_EFFECTS (arg1
))
1794 return build_int_cst (type
, 0);
1795 else if ((p0
= c_getstr (arg0
))
1796 && (p1
= c_getstr (arg1
))
1797 && strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)) == 0)
1798 return build_int_cst (type
, 0);
1801 case CFN_BUILT_IN_BCMP
:
1802 case CFN_BUILT_IN_MEMCMP
:
1803 if (!size_t_cst_p (arg2
, &s2
))
1806 && !TREE_SIDE_EFFECTS (arg0
)
1807 && !TREE_SIDE_EFFECTS (arg1
))
1808 return build_int_cst (type
, 0);
1809 if ((p0
= getbyterep (arg0
, &s0
))
1810 && (p1
= getbyterep (arg1
, &s1
))
1813 return build_cmp_result (type
, memcmp (p0
, p1
, s2
));
1816 case CFN_BUILT_IN_MEMCHR
:
1817 if (!size_t_cst_p (arg2
, &s2
))
1820 && !TREE_SIDE_EFFECTS (arg0
)
1821 && !TREE_SIDE_EFFECTS (arg1
))
1822 return build_int_cst (type
, 0);
1823 if ((p0
= getbyterep (arg0
, &s0
))
1825 && target_char_cst_p (arg1
, &c
))
1827 const char *r
= (const char *) memchr (p0
, c
, s2
);
1829 return build_int_cst (type
, 0);
1830 return fold_convert (type
,
1831 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1837 poly_uint64 parg0
, parg1
;
1838 if (poly_int_tree_p (arg0
, &parg0
) && poly_int_tree_p (arg1
, &parg1
))
1839 return fold_while_ult (type
, parg0
, parg1
);
1844 return fold_const_call_1 (fn
, type
, arg0
, arg1
, arg2
);