1 /* Constant folding for calls to built-in and internal functions.
2 Copyright (C) 1988-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
25 #include "stor-layout.h"
27 #include "fold-const.h"
28 #include "fold-const-call.h"
29 #include "case-cfn-macros.h"
30 #include "tm.h" /* For C[LT]Z_DEFINED_AT_ZERO. */
32 #include "gimple-expr.h"
33 #include "tree-vector-builder.h"
35 /* Functions that test for certain constant types, abstracting away the
36 decision about whether to check for overflow. */
39 integer_cst_p (tree t
)
41 return TREE_CODE (t
) == INTEGER_CST
&& !TREE_OVERFLOW (t
);
47 return TREE_CODE (t
) == REAL_CST
&& !TREE_OVERFLOW (t
);
51 complex_cst_p (tree t
)
53 return TREE_CODE (t
) == COMPLEX_CST
;
56 /* Return true if ARG is a size_type_node constant.
57 Store it in *SIZE_OUT if so. */
60 size_t_cst_p (tree t
, unsigned HOST_WIDE_INT
*size_out
)
62 if (types_compatible_p (size_type_node
, TREE_TYPE (t
))
64 && tree_fits_uhwi_p (t
))
66 *size_out
= tree_to_uhwi (t
);
72 /* RES is the result of a comparison in which < 0 means "less", 0 means
73 "equal" and > 0 means "more". Canonicalize it to -1, 0 or 1 and
74 return it in type TYPE. */
77 build_cmp_result (tree type
, int res
)
79 return build_int_cst (type
, res
< 0 ? -1 : res
> 0 ? 1 : 0);
82 /* M is the result of trying to constant-fold an expression (starting
83 with clear MPFR flags) and INEXACT says whether the result in M is
84 exact or inexact. Return true if M can be used as a constant-folded
85 result in format FORMAT, storing the value in *RESULT if so. */
88 do_mpfr_ckconv (real_value
*result
, mpfr_srcptr m
, bool inexact
,
89 const real_format
*format
)
91 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
92 overflow/underflow occurred. If -frounding-math, proceed iff the
93 result of calling FUNC was exact. */
94 if (!mpfr_number_p (m
)
96 || mpfr_underflow_p ()
97 || (flag_rounding_math
&& inexact
))
101 real_from_mpfr (&tmp
, m
, format
, MPFR_RNDN
);
103 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
104 If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
105 underflowed in the conversion. */
106 if (!real_isfinite (&tmp
)
107 || ((tmp
.cl
== rvc_zero
) != (mpfr_zero_p (m
) != 0)))
110 real_convert (result
, format
, &tmp
);
111 return real_identical (result
, &tmp
);
118 in format FORMAT, given that FUNC is the MPFR implementation of f.
119 Return true on success. */
122 do_mpfr_arg1 (real_value
*result
,
123 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_rnd_t
),
124 const real_value
*arg
, const real_format
*format
)
126 /* To proceed, MPFR must exactly represent the target floating point
127 format, which only happens when the target base equals two. */
128 if (format
->b
!= 2 || !real_isfinite (arg
))
131 int prec
= format
->p
;
132 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
135 mpfr_init2 (m
, prec
);
136 mpfr_from_real (m
, arg
, MPFR_RNDN
);
138 bool inexact
= func (m
, m
, rnd
);
139 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
147 *RESULT_SIN = sin (*ARG);
148 *RESULT_COS = cos (*ARG);
150 for format FORMAT. Return true on success. */
153 do_mpfr_sincos (real_value
*result_sin
, real_value
*result_cos
,
154 const real_value
*arg
, const real_format
*format
)
156 /* To proceed, MPFR must exactly represent the target floating point
157 format, which only happens when the target base equals two. */
158 if (format
->b
!= 2 || !real_isfinite (arg
))
161 int prec
= format
->p
;
162 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
165 mpfr_inits2 (prec
, m
, ms
, mc
, NULL
);
166 mpfr_from_real (m
, arg
, MPFR_RNDN
);
168 bool inexact
= mpfr_sin_cos (ms
, mc
, m
, rnd
);
169 bool ok
= (do_mpfr_ckconv (result_sin
, ms
, inexact
, format
)
170 && do_mpfr_ckconv (result_cos
, mc
, inexact
, format
));
171 mpfr_clears (m
, ms
, mc
, NULL
);
178 *RESULT = f (*ARG0, *ARG1)
180 in format FORMAT, given that FUNC is the MPFR implementation of f.
181 Return true on success. */
184 do_mpfr_arg2 (real_value
*result
,
185 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
, mpfr_rnd_t
),
186 const real_value
*arg0
, const real_value
*arg1
,
187 const real_format
*format
)
189 /* To proceed, MPFR must exactly represent the target floating point
190 format, which only happens when the target base equals two. */
191 if (format
->b
!= 2 || !real_isfinite (arg0
) || !real_isfinite (arg1
))
194 int prec
= format
->p
;
195 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
198 mpfr_inits2 (prec
, m0
, m1
, NULL
);
199 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
200 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
202 bool inexact
= func (m0
, m0
, m1
, rnd
);
203 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
204 mpfr_clears (m0
, m1
, NULL
);
211 *RESULT = f (ARG0, *ARG1)
213 in format FORMAT, given that FUNC is the MPFR implementation of f.
214 Return true on success. */
217 do_mpfr_arg2 (real_value
*result
,
218 int (*func
) (mpfr_ptr
, long, mpfr_srcptr
, mpfr_rnd_t
),
219 const wide_int_ref
&arg0
, const real_value
*arg1
,
220 const real_format
*format
)
222 if (format
->b
!= 2 || !real_isfinite (arg1
))
225 int prec
= format
->p
;
226 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
229 mpfr_init2 (m
, prec
);
230 mpfr_from_real (m
, arg1
, MPFR_RNDN
);
232 bool inexact
= func (m
, arg0
.to_shwi (), m
, rnd
);
233 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
241 *RESULT = f (*ARG0, *ARG1, *ARG2)
243 in format FORMAT, given that FUNC is the MPFR implementation of f.
244 Return true on success. */
247 do_mpfr_arg3 (real_value
*result
,
248 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
,
249 mpfr_srcptr
, mpfr_rnd_t
),
250 const real_value
*arg0
, const real_value
*arg1
,
251 const real_value
*arg2
, const real_format
*format
)
253 /* To proceed, MPFR must exactly represent the target floating point
254 format, which only happens when the target base equals two. */
256 || !real_isfinite (arg0
)
257 || !real_isfinite (arg1
)
258 || !real_isfinite (arg2
))
261 int prec
= format
->p
;
262 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
265 mpfr_inits2 (prec
, m0
, m1
, m2
, NULL
);
266 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
267 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
268 mpfr_from_real (m2
, arg2
, MPFR_RNDN
);
270 bool inexact
= func (m0
, m0
, m1
, m2
, rnd
);
271 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
272 mpfr_clears (m0
, m1
, m2
, NULL
);
277 /* M is the result of trying to constant-fold an expression (starting
278 with clear MPFR flags) and INEXACT says whether the result in M is
279 exact or inexact. Return true if M can be used as a constant-folded
280 result in which the real and imaginary parts have format FORMAT.
281 Store those parts in *RESULT_REAL and *RESULT_IMAG if so. */
284 do_mpc_ckconv (real_value
*result_real
, real_value
*result_imag
,
285 mpc_srcptr m
, bool inexact
, const real_format
*format
)
287 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
288 overflow/underflow occurred. If -frounding-math, proceed iff the
289 result of calling FUNC was exact. */
290 if (!mpfr_number_p (mpc_realref (m
))
291 || !mpfr_number_p (mpc_imagref (m
))
292 || mpfr_overflow_p ()
293 || mpfr_underflow_p ()
294 || (flag_rounding_math
&& inexact
))
297 REAL_VALUE_TYPE tmp_real
, tmp_imag
;
298 real_from_mpfr (&tmp_real
, mpc_realref (m
), format
, MPFR_RNDN
);
299 real_from_mpfr (&tmp_imag
, mpc_imagref (m
), format
, MPFR_RNDN
);
301 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
302 If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
303 underflowed in the conversion. */
304 if (!real_isfinite (&tmp_real
)
305 || !real_isfinite (&tmp_imag
)
306 || (tmp_real
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_realref (m
)) != 0)
307 || (tmp_imag
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_imagref (m
)) != 0))
310 real_convert (result_real
, format
, &tmp_real
);
311 real_convert (result_imag
, format
, &tmp_imag
);
313 return (real_identical (result_real
, &tmp_real
)
314 && real_identical (result_imag
, &tmp_imag
));
321 in format FORMAT, given that FUNC is the mpc implementation of f.
322 Return true on success. Both RESULT and ARG are represented as
323 real and imaginary pairs. */
326 do_mpc_arg1 (real_value
*result_real
, real_value
*result_imag
,
327 int (*func
) (mpc_ptr
, mpc_srcptr
, mpc_rnd_t
),
328 const real_value
*arg_real
, const real_value
*arg_imag
,
329 const real_format
*format
)
331 /* To proceed, MPFR must exactly represent the target floating point
332 format, which only happens when the target base equals two. */
334 || !real_isfinite (arg_real
)
335 || !real_isfinite (arg_imag
))
338 int prec
= format
->p
;
339 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
343 mpfr_from_real (mpc_realref (m
), arg_real
, MPFR_RNDN
);
344 mpfr_from_real (mpc_imagref (m
), arg_imag
, MPFR_RNDN
);
346 bool inexact
= func (m
, m
, crnd
);
347 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m
, inexact
, format
);
355 RESULT = f (ARG0, ARG1)
357 in format FORMAT, given that FUNC is the mpc implementation of f.
358 Return true on success. RESULT, ARG0 and ARG1 are represented as
359 real and imaginary pairs. */
362 do_mpc_arg2 (real_value
*result_real
, real_value
*result_imag
,
363 int (*func
)(mpc_ptr
, mpc_srcptr
, mpc_srcptr
, mpc_rnd_t
),
364 const real_value
*arg0_real
, const real_value
*arg0_imag
,
365 const real_value
*arg1_real
, const real_value
*arg1_imag
,
366 const real_format
*format
)
368 if (!real_isfinite (arg0_real
)
369 || !real_isfinite (arg0_imag
)
370 || !real_isfinite (arg1_real
)
371 || !real_isfinite (arg1_imag
))
374 int prec
= format
->p
;
375 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
378 mpc_init2 (m0
, prec
);
379 mpc_init2 (m1
, prec
);
380 mpfr_from_real (mpc_realref (m0
), arg0_real
, MPFR_RNDN
);
381 mpfr_from_real (mpc_imagref (m0
), arg0_imag
, MPFR_RNDN
);
382 mpfr_from_real (mpc_realref (m1
), arg1_real
, MPFR_RNDN
);
383 mpfr_from_real (mpc_imagref (m1
), arg1_imag
, MPFR_RNDN
);
385 bool inexact
= func (m0
, m0
, m1
, crnd
);
386 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m0
, inexact
, format
);
395 *RESULT = logb (*ARG)
397 in format FORMAT. Return true on success. */
400 fold_const_logb (real_value
*result
, const real_value
*arg
,
401 const real_format
*format
)
406 /* If arg is +-NaN, then return it. */
411 /* If arg is +-Inf, then return +Inf. */
417 /* Zero may set errno and/or raise an exception. */
421 /* For normal numbers, proceed iff radix == 2. In GCC,
422 normalized significands are in the range [0.5, 1.0). We
423 want the exponent as if they were [1.0, 2.0) so get the
424 exponent and subtract 1. */
427 real_from_integer (result
, format
, REAL_EXP (arg
) - 1, SIGNED
);
436 *RESULT = significand (*ARG)
438 in format FORMAT. Return true on success. */
441 fold_const_significand (real_value
*result
, const real_value
*arg
,
442 const real_format
*format
)
449 /* If arg is +-0, +-Inf or +-NaN, then return it. */
454 /* For normal numbers, proceed iff radix == 2. */
458 /* In GCC, normalized significands are in the range [0.5, 1.0).
459 We want them to be [1.0, 2.0) so set the exponent to 1. */
460 SET_REAL_EXP (result
, 1);
471 where FORMAT is the format of *ARG and PRECISION is the number of
472 significant bits in the result. Return true on success. */
475 fold_const_conversion (wide_int
*result
,
476 void (*fn
) (real_value
*, format_helper
,
478 const real_value
*arg
, unsigned int precision
,
479 const real_format
*format
)
481 if (!real_isfinite (arg
))
485 fn (&rounded
, format
, arg
);
488 *result
= real_to_integer (&rounded
, &fail
, precision
);
494 *RESULT = pow (*ARG0, *ARG1)
496 in format FORMAT. Return true on success. */
499 fold_const_pow (real_value
*result
, const real_value
*arg0
,
500 const real_value
*arg1
, const real_format
*format
)
502 if (do_mpfr_arg2 (result
, mpfr_pow
, arg0
, arg1
, format
))
505 /* Check for an integer exponent. */
506 REAL_VALUE_TYPE cint1
;
507 HOST_WIDE_INT n1
= real_to_integer (arg1
);
508 real_from_integer (&cint1
, VOIDmode
, n1
, SIGNED
);
509 /* Attempt to evaluate pow at compile-time, unless this should
510 raise an exception. */
511 if (real_identical (arg1
, &cint1
)
513 || (!flag_trapping_math
&& !flag_errno_math
)
514 || !real_equal (arg0
, &dconst0
)))
516 bool inexact
= real_powi (result
, format
, arg0
, n1
);
517 /* Avoid the folding if flag_signaling_nans is on. */
518 if (flag_unsafe_math_optimizations
520 && !(flag_signaling_nans
521 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))))
530 *RESULT = nextafter (*ARG0, *ARG1)
534 *RESULT = nexttoward (*ARG0, *ARG1)
536 in format FORMAT. Return true on success. */
539 fold_const_nextafter (real_value
*result
, const real_value
*arg0
,
540 const real_value
*arg1
, const real_format
*format
)
542 if (REAL_VALUE_ISSIGNALING_NAN (*arg0
)
543 || REAL_VALUE_ISSIGNALING_NAN (*arg1
))
546 /* Don't handle composite modes, nor decimal, nor modes without
547 inf or denorm at least for now. */
548 if (format
->pnan
< format
->p
551 || !format
->has_denorm
)
554 if (real_nextafter (result
, format
, arg0
, arg1
)
555 /* If raising underflow or overflow and setting errno to ERANGE,
556 fail if we care about those side-effects. */
557 && (flag_trapping_math
|| flag_errno_math
))
559 /* Similarly for nextafter (0, 1) raising underflow. */
560 else if (flag_trapping_math
561 && arg0
->cl
== rvc_zero
562 && result
->cl
!= rvc_zero
)
565 real_convert (result
, format
, result
);
572 *RESULT = ldexp (*ARG0, ARG1)
574 in format FORMAT. Return true on success. */
577 fold_const_builtin_load_exponent (real_value
*result
, const real_value
*arg0
,
578 const wide_int_ref
&arg1
,
579 const real_format
*format
)
581 /* Bound the maximum adjustment to twice the range of the
582 mode's valid exponents. Use abs to ensure the range is
583 positive as a sanity check. */
584 int max_exp_adj
= 2 * labs (format
->emax
- format
->emin
);
586 /* The requested adjustment must be inside this range. This
587 is a preliminary cap to avoid things like overflow, we
588 may still fail to compute the result for other reasons. */
589 if (wi::les_p (arg1
, -max_exp_adj
) || wi::ges_p (arg1
, max_exp_adj
))
592 /* Don't perform operation if we honor signaling NaNs and
593 operand is a signaling NaN. */
594 if (!flag_unsafe_math_optimizations
595 && flag_signaling_nans
596 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
599 REAL_VALUE_TYPE initial_result
;
600 real_ldexp (&initial_result
, arg0
, arg1
.to_shwi ());
602 /* Ensure we didn't overflow. */
603 if (real_isinf (&initial_result
))
606 /* Only proceed if the target mode can hold the
608 *result
= real_value_truncate (format
, initial_result
);
609 return real_equal (&initial_result
, result
);
612 /* Fold a call to __builtin_nan or __builtin_nans with argument ARG and
613 return type TYPE. QUIET is true if a quiet rather than signalling
617 fold_const_builtin_nan (tree type
, tree arg
, bool quiet
)
619 REAL_VALUE_TYPE real
;
620 const char *str
= c_getstr (arg
);
621 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
622 return build_real (type
, real
);
626 /* Fold a call to IFN_REDUC_<CODE> (ARG), returning a value of type TYPE. */
629 fold_const_reduction (tree type
, tree arg
, tree_code code
)
631 unsigned HOST_WIDE_INT nelts
;
632 if (TREE_CODE (arg
) != VECTOR_CST
633 || !VECTOR_CST_NELTS (arg
).is_constant (&nelts
))
636 tree res
= VECTOR_CST_ELT (arg
, 0);
637 for (unsigned HOST_WIDE_INT i
= 1; i
< nelts
; i
++)
639 res
= const_binop (code
, type
, res
, VECTOR_CST_ELT (arg
, i
));
640 if (res
== NULL_TREE
|| !CONSTANT_CLASS_P (res
))
646 /* Fold a call to IFN_VEC_CONVERT (ARG) returning TYPE. */
649 fold_const_vec_convert (tree ret_type
, tree arg
)
651 enum tree_code code
= NOP_EXPR
;
652 tree arg_type
= TREE_TYPE (arg
);
653 if (TREE_CODE (arg
) != VECTOR_CST
)
656 gcc_checking_assert (VECTOR_TYPE_P (ret_type
) && VECTOR_TYPE_P (arg_type
));
658 if (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
659 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg_type
)))
660 code
= FIX_TRUNC_EXPR
;
661 else if (INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
662 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (ret_type
)))
665 /* We can't handle steps directly when extending, since the
666 values need to wrap at the original precision first. */
668 = (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
669 && INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
670 && (TYPE_PRECISION (TREE_TYPE (ret_type
))
671 <= TYPE_PRECISION (TREE_TYPE (arg_type
))));
672 tree_vector_builder elts
;
673 if (!elts
.new_unary_operation (ret_type
, arg
, step_ok_p
))
676 unsigned int count
= elts
.encoded_nelts ();
677 for (unsigned int i
= 0; i
< count
; ++i
)
679 tree elt
= fold_unary (code
, TREE_TYPE (ret_type
),
680 VECTOR_CST_ELT (arg
, i
));
681 if (elt
== NULL_TREE
|| !CONSTANT_CLASS_P (elt
))
683 elts
.quick_push (elt
);
686 return elts
.build ();
691 IFN_WHILE_ULT (ARG0, ARG1, (TYPE) { ... })
693 Return the value on success and null on failure. */
696 fold_while_ult (tree type
, poly_uint64 arg0
, poly_uint64 arg1
)
698 if (known_ge (arg0
, arg1
))
699 return build_zero_cst (type
);
701 if (maybe_ge (arg0
, arg1
))
704 poly_uint64 diff
= arg1
- arg0
;
705 poly_uint64 nelts
= TYPE_VECTOR_SUBPARTS (type
);
706 if (known_ge (diff
, nelts
))
707 return build_all_ones_cst (type
);
709 unsigned HOST_WIDE_INT const_diff
;
710 if (known_le (diff
, nelts
) && diff
.is_constant (&const_diff
))
712 tree minus_one
= build_minus_one_cst (TREE_TYPE (type
));
713 tree zero
= build_zero_cst (TREE_TYPE (type
));
714 return build_vector_a_then_b (type
, const_diff
, minus_one
, zero
);
723 in format FORMAT. Return true on success. */
726 fold_const_call_ss (real_value
*result
, combined_fn fn
,
727 const real_value
*arg
, const real_format
*format
)
733 return (real_compare (GE_EXPR
, arg
, &dconst0
)
734 && do_mpfr_arg1 (result
, mpfr_sqrt
, arg
, format
));
737 return do_mpfr_arg1 (result
, mpfr_cbrt
, arg
, format
);
740 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
741 && real_compare (LE_EXPR
, arg
, &dconst1
)
742 && do_mpfr_arg1 (result
, mpfr_asin
, arg
, format
));
745 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
746 && real_compare (LE_EXPR
, arg
, &dconst1
)
747 && do_mpfr_arg1 (result
, mpfr_acos
, arg
, format
));
750 return do_mpfr_arg1 (result
, mpfr_atan
, arg
, format
);
753 return do_mpfr_arg1 (result
, mpfr_asinh
, arg
, format
);
756 return (real_compare (GE_EXPR
, arg
, &dconst1
)
757 && do_mpfr_arg1 (result
, mpfr_acosh
, arg
, format
));
760 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
761 && real_compare (LE_EXPR
, arg
, &dconst1
)
762 && do_mpfr_arg1 (result
, mpfr_atanh
, arg
, format
));
765 return do_mpfr_arg1 (result
, mpfr_sin
, arg
, format
);
768 return do_mpfr_arg1 (result
, mpfr_cos
, arg
, format
);
771 return do_mpfr_arg1 (result
, mpfr_tan
, arg
, format
);
774 return do_mpfr_arg1 (result
, mpfr_sinh
, arg
, format
);
777 return do_mpfr_arg1 (result
, mpfr_cosh
, arg
, format
);
780 return do_mpfr_arg1 (result
, mpfr_tanh
, arg
, format
);
783 return do_mpfr_arg1 (result
, mpfr_erf
, arg
, format
);
786 return do_mpfr_arg1 (result
, mpfr_erfc
, arg
, format
);
789 return do_mpfr_arg1 (result
, mpfr_gamma
, arg
, format
);
792 return do_mpfr_arg1 (result
, mpfr_exp
, arg
, format
);
795 return do_mpfr_arg1 (result
, mpfr_exp2
, arg
, format
);
799 return do_mpfr_arg1 (result
, mpfr_exp10
, arg
, format
);
802 return do_mpfr_arg1 (result
, mpfr_expm1
, arg
, format
);
805 return (real_compare (GT_EXPR
, arg
, &dconst0
)
806 && do_mpfr_arg1 (result
, mpfr_log
, arg
, format
));
809 return (real_compare (GT_EXPR
, arg
, &dconst0
)
810 && do_mpfr_arg1 (result
, mpfr_log2
, arg
, format
));
813 return (real_compare (GT_EXPR
, arg
, &dconst0
)
814 && do_mpfr_arg1 (result
, mpfr_log10
, arg
, format
));
817 return (real_compare (GT_EXPR
, arg
, &dconstm1
)
818 && do_mpfr_arg1 (result
, mpfr_log1p
, arg
, format
));
821 return do_mpfr_arg1 (result
, mpfr_j0
, arg
, format
);
824 return do_mpfr_arg1 (result
, mpfr_j1
, arg
, format
);
827 return (real_compare (GT_EXPR
, arg
, &dconst0
)
828 && do_mpfr_arg1 (result
, mpfr_y0
, arg
, format
));
831 return (real_compare (GT_EXPR
, arg
, &dconst0
)
832 && do_mpfr_arg1 (result
, mpfr_y1
, arg
, format
));
836 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
838 real_floor (result
, format
, arg
);
845 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
847 real_ceil (result
, format
, arg
);
854 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
856 real_trunc (result
, format
, arg
);
863 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
865 real_round (result
, format
, arg
);
871 CASE_CFN_ROUNDEVEN_FN
:
872 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
874 real_roundeven (result
, format
, arg
);
880 return fold_const_logb (result
, arg
, format
);
882 CASE_CFN_SIGNIFICAND
:
883 return fold_const_significand (result
, arg
, format
);
894 where FORMAT is the format of ARG and PRECISION is the number of
895 significant bits in the result. Return true on success. */
898 fold_const_call_ss (wide_int
*result
, combined_fn fn
,
899 const real_value
*arg
, unsigned int precision
,
900 const real_format
*format
)
905 if (real_isneg (arg
))
906 *result
= wi::one (precision
);
908 *result
= wi::zero (precision
);
912 /* For ilogb we don't know FP_ILOGB0, so only handle normal values.
913 Proceed iff radix == 2. In GCC, normalized significands are in
914 the range [0.5, 1.0). We want the exponent as if they were
915 [1.0, 2.0) so get the exponent and subtract 1. */
916 if (arg
->cl
== rvc_normal
&& format
->b
== 2)
918 *result
= wi::shwi (REAL_EXP (arg
) - 1, precision
);
926 return fold_const_conversion (result
, real_ceil
, arg
,
932 return fold_const_conversion (result
, real_floor
, arg
,
938 return fold_const_conversion (result
, real_round
, arg
,
944 /* Not yet folded to a constant. */
948 case CFN_BUILT_IN_FINITED32
:
949 case CFN_BUILT_IN_FINITED64
:
950 case CFN_BUILT_IN_FINITED128
:
951 case CFN_BUILT_IN_ISFINITE
:
952 *result
= wi::shwi (real_isfinite (arg
) ? 1 : 0, precision
);
956 case CFN_BUILT_IN_ISINFD32
:
957 case CFN_BUILT_IN_ISINFD64
:
958 case CFN_BUILT_IN_ISINFD128
:
959 if (real_isinf (arg
))
960 *result
= wi::shwi (arg
->sign
? -1 : 1, precision
);
962 *result
= wi::shwi (0, precision
);
966 case CFN_BUILT_IN_ISNAND32
:
967 case CFN_BUILT_IN_ISNAND64
:
968 case CFN_BUILT_IN_ISNAND128
:
969 *result
= wi::shwi (real_isnan (arg
) ? 1 : 0, precision
);
981 where ARG_TYPE is the type of ARG and PRECISION is the number of bits
982 in the result. Return true on success. */
985 fold_const_call_ss (wide_int
*result
, combined_fn fn
, const wide_int_ref
&arg
,
986 unsigned int precision
, tree arg_type
)
991 *result
= wi::shwi (wi::ffs (arg
), precision
);
997 if (wi::ne_p (arg
, 0))
999 else if (!CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1001 tmp
= TYPE_PRECISION (arg_type
);
1002 *result
= wi::shwi (tmp
, precision
);
1009 if (wi::ne_p (arg
, 0))
1010 tmp
= wi::ctz (arg
);
1011 else if (!CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1013 tmp
= TYPE_PRECISION (arg_type
);
1014 *result
= wi::shwi (tmp
, precision
);
1019 *result
= wi::shwi (wi::clrsb (arg
), precision
);
1023 *result
= wi::shwi (wi::popcount (arg
), precision
);
1027 *result
= wi::shwi (wi::parity (arg
), precision
);
1030 case CFN_BUILT_IN_BSWAP16
:
1031 case CFN_BUILT_IN_BSWAP32
:
1032 case CFN_BUILT_IN_BSWAP64
:
1033 case CFN_BUILT_IN_BSWAP128
:
1034 *result
= wide_int::from (arg
, precision
, TYPE_SIGN (arg_type
)).bswap ();
1046 where FORMAT is the format of ARG and of the real and imaginary parts
1047 of RESULT, passed as RESULT_REAL and RESULT_IMAG respectively. Return
1051 fold_const_call_cs (real_value
*result_real
, real_value
*result_imag
,
1052 combined_fn fn
, const real_value
*arg
,
1053 const real_format
*format
)
1058 /* cexpi(x+yi) = cos(x)+sin(y)*i. */
1059 return do_mpfr_sincos (result_imag
, result_real
, arg
, format
);
1070 where FORMAT is the format of RESULT and of the real and imaginary parts
1071 of ARG, passed as ARG_REAL and ARG_IMAG respectively. Return true on
1075 fold_const_call_sc (real_value
*result
, combined_fn fn
,
1076 const real_value
*arg_real
, const real_value
*arg_imag
,
1077 const real_format
*format
)
1082 return do_mpfr_arg2 (result
, mpfr_hypot
, arg_real
, arg_imag
, format
);
1093 where FORMAT is the format of the real and imaginary parts of RESULT
1094 (RESULT_REAL and RESULT_IMAG) and of ARG (ARG_REAL and ARG_IMAG).
1095 Return true on success. */
1098 fold_const_call_cc (real_value
*result_real
, real_value
*result_imag
,
1099 combined_fn fn
, const real_value
*arg_real
,
1100 const real_value
*arg_imag
, const real_format
*format
)
1105 return do_mpc_arg1 (result_real
, result_imag
, mpc_cos
,
1106 arg_real
, arg_imag
, format
);
1109 return do_mpc_arg1 (result_real
, result_imag
, mpc_cosh
,
1110 arg_real
, arg_imag
, format
);
1113 if (real_isinf (arg_real
) || real_isinf (arg_imag
))
1115 real_inf (result_real
);
1116 *result_imag
= dconst0
;
1117 result_imag
->sign
= arg_imag
->sign
;
1121 *result_real
= *arg_real
;
1122 *result_imag
= *arg_imag
;
1127 return do_mpc_arg1 (result_real
, result_imag
, mpc_sin
,
1128 arg_real
, arg_imag
, format
);
1131 return do_mpc_arg1 (result_real
, result_imag
, mpc_sinh
,
1132 arg_real
, arg_imag
, format
);
1135 return do_mpc_arg1 (result_real
, result_imag
, mpc_tan
,
1136 arg_real
, arg_imag
, format
);
1139 return do_mpc_arg1 (result_real
, result_imag
, mpc_tanh
,
1140 arg_real
, arg_imag
, format
);
1143 return do_mpc_arg1 (result_real
, result_imag
, mpc_log
,
1144 arg_real
, arg_imag
, format
);
1147 return do_mpc_arg1 (result_real
, result_imag
, mpc_sqrt
,
1148 arg_real
, arg_imag
, format
);
1151 return do_mpc_arg1 (result_real
, result_imag
, mpc_asin
,
1152 arg_real
, arg_imag
, format
);
1155 return do_mpc_arg1 (result_real
, result_imag
, mpc_acos
,
1156 arg_real
, arg_imag
, format
);
1159 return do_mpc_arg1 (result_real
, result_imag
, mpc_atan
,
1160 arg_real
, arg_imag
, format
);
1163 return do_mpc_arg1 (result_real
, result_imag
, mpc_asinh
,
1164 arg_real
, arg_imag
, format
);
1167 return do_mpc_arg1 (result_real
, result_imag
, mpc_acosh
,
1168 arg_real
, arg_imag
, format
);
1171 return do_mpc_arg1 (result_real
, result_imag
, mpc_atanh
,
1172 arg_real
, arg_imag
, format
);
1175 return do_mpc_arg1 (result_real
, result_imag
, mpc_exp
,
1176 arg_real
, arg_imag
, format
);
1183 /* Subroutine of fold_const_call, with the same interface. Handle cases
1184 where the arguments and result are numerical. */
1187 fold_const_call_1 (combined_fn fn
, tree type
, tree arg
)
1189 machine_mode mode
= TYPE_MODE (type
);
1190 machine_mode arg_mode
= TYPE_MODE (TREE_TYPE (arg
));
1192 if (integer_cst_p (arg
))
1194 if (SCALAR_INT_MODE_P (mode
))
1197 if (fold_const_call_ss (&result
, fn
, wi::to_wide (arg
),
1198 TYPE_PRECISION (type
), TREE_TYPE (arg
)))
1199 return wide_int_to_tree (type
, result
);
1204 if (real_cst_p (arg
))
1206 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode
));
1207 if (mode
== arg_mode
)
1210 REAL_VALUE_TYPE result
;
1211 if (fold_const_call_ss (&result
, fn
, TREE_REAL_CST_PTR (arg
),
1212 REAL_MODE_FORMAT (mode
)))
1213 return build_real (type
, result
);
1215 else if (COMPLEX_MODE_P (mode
)
1216 && GET_MODE_INNER (mode
) == arg_mode
)
1218 /* real -> complex real. */
1219 REAL_VALUE_TYPE result_real
, result_imag
;
1220 if (fold_const_call_cs (&result_real
, &result_imag
, fn
,
1221 TREE_REAL_CST_PTR (arg
),
1222 REAL_MODE_FORMAT (arg_mode
)))
1223 return build_complex (type
,
1224 build_real (TREE_TYPE (type
), result_real
),
1225 build_real (TREE_TYPE (type
), result_imag
));
1227 else if (INTEGRAL_TYPE_P (type
))
1231 if (fold_const_call_ss (&result
, fn
,
1232 TREE_REAL_CST_PTR (arg
),
1233 TYPE_PRECISION (type
),
1234 REAL_MODE_FORMAT (arg_mode
)))
1235 return wide_int_to_tree (type
, result
);
1240 if (complex_cst_p (arg
))
1242 gcc_checking_assert (COMPLEX_MODE_P (arg_mode
));
1243 machine_mode inner_mode
= GET_MODE_INNER (arg_mode
);
1244 tree argr
= TREE_REALPART (arg
);
1245 tree argi
= TREE_IMAGPART (arg
);
1246 if (mode
== arg_mode
1247 && real_cst_p (argr
)
1248 && real_cst_p (argi
))
1250 /* complex real -> complex real. */
1251 REAL_VALUE_TYPE result_real
, result_imag
;
1252 if (fold_const_call_cc (&result_real
, &result_imag
, fn
,
1253 TREE_REAL_CST_PTR (argr
),
1254 TREE_REAL_CST_PTR (argi
),
1255 REAL_MODE_FORMAT (inner_mode
)))
1256 return build_complex (type
,
1257 build_real (TREE_TYPE (type
), result_real
),
1258 build_real (TREE_TYPE (type
), result_imag
));
1260 if (mode
== inner_mode
1261 && real_cst_p (argr
)
1262 && real_cst_p (argi
))
1264 /* complex real -> real. */
1265 REAL_VALUE_TYPE result
;
1266 if (fold_const_call_sc (&result
, fn
,
1267 TREE_REAL_CST_PTR (argr
),
1268 TREE_REAL_CST_PTR (argi
),
1269 REAL_MODE_FORMAT (inner_mode
)))
1270 return build_real (type
, result
);
1278 /* Try to fold FN (ARG) to a constant. Return the constant on success,
1279 otherwise return null. TYPE is the type of the return value. */
1282 fold_const_call (combined_fn fn
, tree type
, tree arg
)
1286 case CFN_BUILT_IN_STRLEN
:
1287 if (const char *str
= c_getstr (arg
))
1288 return build_int_cst (type
, strlen (str
));
1292 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NAN
):
1293 case CFN_BUILT_IN_NAND32
:
1294 case CFN_BUILT_IN_NAND64
:
1295 case CFN_BUILT_IN_NAND128
:
1296 return fold_const_builtin_nan (type
, arg
, true);
1299 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NANS
):
1300 case CFN_BUILT_IN_NANSD32
:
1301 case CFN_BUILT_IN_NANSD64
:
1302 case CFN_BUILT_IN_NANSD128
:
1303 return fold_const_builtin_nan (type
, arg
, false);
1305 case CFN_REDUC_PLUS
:
1306 return fold_const_reduction (type
, arg
, PLUS_EXPR
);
1309 return fold_const_reduction (type
, arg
, MAX_EXPR
);
1312 return fold_const_reduction (type
, arg
, MIN_EXPR
);
1315 return fold_const_reduction (type
, arg
, BIT_AND_EXPR
);
1318 return fold_const_reduction (type
, arg
, BIT_IOR_EXPR
);
1321 return fold_const_reduction (type
, arg
, BIT_XOR_EXPR
);
1323 case CFN_VEC_CONVERT
:
1324 return fold_const_vec_convert (type
, arg
);
1327 return fold_const_call_1 (fn
, type
, arg
);
1331 /* Fold a call to IFN_FOLD_LEFT_<CODE> (ARG0, ARG1), returning a value
1335 fold_const_fold_left (tree type
, tree arg0
, tree arg1
, tree_code code
)
1337 if (TREE_CODE (arg1
) != VECTOR_CST
)
1340 unsigned HOST_WIDE_INT nelts
;
1341 if (!VECTOR_CST_NELTS (arg1
).is_constant (&nelts
))
1344 for (unsigned HOST_WIDE_INT i
= 0; i
< nelts
; i
++)
1346 arg0
= const_binop (code
, type
, arg0
, VECTOR_CST_ELT (arg1
, i
));
1347 if (arg0
== NULL_TREE
|| !CONSTANT_CLASS_P (arg0
))
1355 *RESULT = FN (*ARG0, *ARG1)
1357 in format FORMAT. Return true on success. */
1360 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1361 const real_value
*arg0
, const real_value
*arg1
,
1362 const real_format
*format
)
1368 return do_mpfr_arg2 (result
, mpfr_remainder
, arg0
, arg1
, format
);
1371 return do_mpfr_arg2 (result
, mpfr_atan2
, arg0
, arg1
, format
);
1374 return do_mpfr_arg2 (result
, mpfr_dim
, arg0
, arg1
, format
);
1377 return do_mpfr_arg2 (result
, mpfr_fmod
, arg0
, arg1
, format
);
1380 return do_mpfr_arg2 (result
, mpfr_hypot
, arg0
, arg1
, format
);
1383 CASE_CFN_COPYSIGN_FN
:
1385 real_copysign (result
, arg1
);
1390 return do_mpfr_arg2 (result
, mpfr_min
, arg0
, arg1
, format
);
1394 return do_mpfr_arg2 (result
, mpfr_max
, arg0
, arg1
, format
);
1397 return fold_const_pow (result
, arg0
, arg1
, format
);
1400 CASE_CFN_NEXTTOWARD
:
1401 return fold_const_nextafter (result
, arg0
, arg1
, format
);
1410 *RESULT = FN (*ARG0, ARG1)
1412 where FORMAT is the format of *RESULT and *ARG0. Return true on
1416 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1417 const real_value
*arg0
, const wide_int_ref
&arg1
,
1418 const real_format
*format
)
1423 return fold_const_builtin_load_exponent (result
, arg0
, arg1
, format
);
1427 return (format
->b
== 2
1428 && fold_const_builtin_load_exponent (result
, arg0
, arg1
,
1432 /* Avoid the folding if flag_signaling_nans is on and
1433 operand is a signaling NaN. */
1434 if (!flag_unsafe_math_optimizations
1435 && flag_signaling_nans
1436 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
1439 real_powi (result
, format
, arg0
, arg1
.to_shwi ());
1449 *RESULT = FN (ARG0, *ARG1)
1451 where FORMAT is the format of *RESULT and *ARG1. Return true on
1455 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1456 const wide_int_ref
&arg0
, const real_value
*arg1
,
1457 const real_format
*format
)
1462 return do_mpfr_arg2 (result
, mpfr_jn
, arg0
, arg1
, format
);
1465 return (real_compare (GT_EXPR
, arg1
, &dconst0
)
1466 && do_mpfr_arg2 (result
, mpfr_yn
, arg0
, arg1
, format
));
1475 RESULT = fn (ARG0, ARG1)
1477 where FORMAT is the format of the real and imaginary parts of RESULT
1478 (RESULT_REAL and RESULT_IMAG), of ARG0 (ARG0_REAL and ARG0_IMAG)
1479 and of ARG1 (ARG1_REAL and ARG1_IMAG). Return true on success. */
1482 fold_const_call_ccc (real_value
*result_real
, real_value
*result_imag
,
1483 combined_fn fn
, const real_value
*arg0_real
,
1484 const real_value
*arg0_imag
, const real_value
*arg1_real
,
1485 const real_value
*arg1_imag
, const real_format
*format
)
1490 return do_mpc_arg2 (result_real
, result_imag
, mpc_pow
,
1491 arg0_real
, arg0_imag
, arg1_real
, arg1_imag
, format
);
1498 /* Subroutine of fold_const_call, with the same interface. Handle cases
1499 where the arguments and result are numerical. */
1502 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1504 machine_mode mode
= TYPE_MODE (type
);
1505 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1506 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1508 if (mode
== arg0_mode
1509 && real_cst_p (arg0
)
1510 && real_cst_p (arg1
))
1512 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1513 REAL_VALUE_TYPE result
;
1514 if (arg0_mode
== arg1_mode
)
1516 /* real, real -> real. */
1517 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1518 TREE_REAL_CST_PTR (arg1
),
1519 REAL_MODE_FORMAT (mode
)))
1520 return build_real (type
, result
);
1522 else if (arg1_mode
== TYPE_MODE (long_double_type_node
))
1525 CASE_CFN_NEXTTOWARD
:
1526 /* real, long double -> real. */
1527 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1528 TREE_REAL_CST_PTR (arg1
),
1529 REAL_MODE_FORMAT (mode
)))
1530 return build_real (type
, result
);
1538 if (real_cst_p (arg0
)
1539 && integer_cst_p (arg1
))
1541 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1542 if (mode
== arg0_mode
)
1544 /* real, int -> real. */
1545 REAL_VALUE_TYPE result
;
1546 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1548 REAL_MODE_FORMAT (mode
)))
1549 return build_real (type
, result
);
1554 if (integer_cst_p (arg0
)
1555 && real_cst_p (arg1
))
1557 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg1_mode
));
1558 if (mode
== arg1_mode
)
1560 /* int, real -> real. */
1561 REAL_VALUE_TYPE result
;
1562 if (fold_const_call_sss (&result
, fn
, wi::to_wide (arg0
),
1563 TREE_REAL_CST_PTR (arg1
),
1564 REAL_MODE_FORMAT (mode
)))
1565 return build_real (type
, result
);
1570 if (arg0_mode
== arg1_mode
1571 && complex_cst_p (arg0
)
1572 && complex_cst_p (arg1
))
1574 gcc_checking_assert (COMPLEX_MODE_P (arg0_mode
));
1575 machine_mode inner_mode
= GET_MODE_INNER (arg0_mode
);
1576 tree arg0r
= TREE_REALPART (arg0
);
1577 tree arg0i
= TREE_IMAGPART (arg0
);
1578 tree arg1r
= TREE_REALPART (arg1
);
1579 tree arg1i
= TREE_IMAGPART (arg1
);
1580 if (mode
== arg0_mode
1581 && real_cst_p (arg0r
)
1582 && real_cst_p (arg0i
)
1583 && real_cst_p (arg1r
)
1584 && real_cst_p (arg1i
))
1586 /* complex real, complex real -> complex real. */
1587 REAL_VALUE_TYPE result_real
, result_imag
;
1588 if (fold_const_call_ccc (&result_real
, &result_imag
, fn
,
1589 TREE_REAL_CST_PTR (arg0r
),
1590 TREE_REAL_CST_PTR (arg0i
),
1591 TREE_REAL_CST_PTR (arg1r
),
1592 TREE_REAL_CST_PTR (arg1i
),
1593 REAL_MODE_FORMAT (inner_mode
)))
1594 return build_complex (type
,
1595 build_real (TREE_TYPE (type
), result_real
),
1596 build_real (TREE_TYPE (type
), result_imag
));
1604 /* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
1605 otherwise return null. TYPE is the type of the return value. */
1608 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1610 const char *p0
, *p1
;
1614 case CFN_BUILT_IN_STRSPN
:
1615 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1616 return build_int_cst (type
, strspn (p0
, p1
));
1619 case CFN_BUILT_IN_STRCSPN
:
1620 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1621 return build_int_cst (type
, strcspn (p0
, p1
));
1624 case CFN_BUILT_IN_STRCMP
:
1625 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1626 return build_cmp_result (type
, strcmp (p0
, p1
));
1629 case CFN_BUILT_IN_STRCASECMP
:
1630 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1632 int r
= strcmp (p0
, p1
);
1634 return build_cmp_result (type
, r
);
1638 case CFN_BUILT_IN_INDEX
:
1639 case CFN_BUILT_IN_STRCHR
:
1640 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1642 const char *r
= strchr (p0
, c
);
1644 return build_int_cst (type
, 0);
1645 return fold_convert (type
,
1646 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1650 case CFN_BUILT_IN_RINDEX
:
1651 case CFN_BUILT_IN_STRRCHR
:
1652 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1654 const char *r
= strrchr (p0
, c
);
1656 return build_int_cst (type
, 0);
1657 return fold_convert (type
,
1658 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1662 case CFN_BUILT_IN_STRSTR
:
1663 if ((p1
= c_getstr (arg1
)))
1665 if ((p0
= c_getstr (arg0
)))
1667 const char *r
= strstr (p0
, p1
);
1669 return build_int_cst (type
, 0);
1670 return fold_convert (type
,
1671 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1674 return fold_convert (type
, arg0
);
1678 case CFN_FOLD_LEFT_PLUS
:
1679 return fold_const_fold_left (type
, arg0
, arg1
, PLUS_EXPR
);
1682 return fold_const_call_1 (fn
, type
, arg0
, arg1
);
1688 *RESULT = FN (*ARG0, *ARG1, *ARG2)
1690 in format FORMAT. Return true on success. */
1693 fold_const_call_ssss (real_value
*result
, combined_fn fn
,
1694 const real_value
*arg0
, const real_value
*arg1
,
1695 const real_value
*arg2
, const real_format
*format
)
1701 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, arg2
, format
);
1705 real_value new_arg2
= real_value_negate (arg2
);
1706 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, &new_arg2
, format
);
1711 real_value new_arg0
= real_value_negate (arg0
);
1712 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
, arg2
, format
);
1717 real_value new_arg0
= real_value_negate (arg0
);
1718 real_value new_arg2
= real_value_negate (arg2
);
1719 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
,
1728 /* Subroutine of fold_const_call, with the same interface. Handle cases
1729 where the arguments and result are numerical. */
1732 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1734 machine_mode mode
= TYPE_MODE (type
);
1735 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1736 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1737 machine_mode arg2_mode
= TYPE_MODE (TREE_TYPE (arg2
));
1739 if (arg0_mode
== arg1_mode
1740 && arg0_mode
== arg2_mode
1741 && real_cst_p (arg0
)
1742 && real_cst_p (arg1
)
1743 && real_cst_p (arg2
))
1745 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1746 if (mode
== arg0_mode
)
1748 /* real, real, real -> real. */
1749 REAL_VALUE_TYPE result
;
1750 if (fold_const_call_ssss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1751 TREE_REAL_CST_PTR (arg1
),
1752 TREE_REAL_CST_PTR (arg2
),
1753 REAL_MODE_FORMAT (mode
)))
1754 return build_real (type
, result
);
1762 /* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
1763 success, otherwise return null. TYPE is the type of the return value. */
1766 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1768 const char *p0
, *p1
;
1770 unsigned HOST_WIDE_INT s0
, s1
, s2
= 0;
1773 case CFN_BUILT_IN_STRNCMP
:
1774 if (!size_t_cst_p (arg2
, &s2
))
1777 && !TREE_SIDE_EFFECTS (arg0
)
1778 && !TREE_SIDE_EFFECTS (arg1
))
1779 return build_int_cst (type
, 0);
1780 else if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1781 return build_int_cst (type
, strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)));
1784 case CFN_BUILT_IN_STRNCASECMP
:
1785 if (!size_t_cst_p (arg2
, &s2
))
1788 && !TREE_SIDE_EFFECTS (arg0
)
1789 && !TREE_SIDE_EFFECTS (arg1
))
1790 return build_int_cst (type
, 0);
1791 else if ((p0
= c_getstr (arg0
))
1792 && (p1
= c_getstr (arg1
))
1793 && strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)) == 0)
1794 return build_int_cst (type
, 0);
1797 case CFN_BUILT_IN_BCMP
:
1798 case CFN_BUILT_IN_MEMCMP
:
1799 if (!size_t_cst_p (arg2
, &s2
))
1802 && !TREE_SIDE_EFFECTS (arg0
)
1803 && !TREE_SIDE_EFFECTS (arg1
))
1804 return build_int_cst (type
, 0);
1805 if ((p0
= getbyterep (arg0
, &s0
))
1806 && (p1
= getbyterep (arg1
, &s1
))
1809 return build_cmp_result (type
, memcmp (p0
, p1
, s2
));
1812 case CFN_BUILT_IN_MEMCHR
:
1813 if (!size_t_cst_p (arg2
, &s2
))
1816 && !TREE_SIDE_EFFECTS (arg0
)
1817 && !TREE_SIDE_EFFECTS (arg1
))
1818 return build_int_cst (type
, 0);
1819 if ((p0
= getbyterep (arg0
, &s0
))
1821 && target_char_cst_p (arg1
, &c
))
1823 const char *r
= (const char *) memchr (p0
, c
, s2
);
1825 return build_int_cst (type
, 0);
1826 return fold_convert (type
,
1827 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1833 poly_uint64 parg0
, parg1
;
1834 if (poly_int_tree_p (arg0
, &parg0
) && poly_int_tree_p (arg1
, &parg1
))
1835 return fold_while_ult (type
, parg0
, parg1
);
1840 return fold_const_call_1 (fn
, type
, arg0
, arg1
, arg2
);