1 /* Constant folding for calls to built-in and internal functions.
2 Copyright (C) 1988-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
25 #include "stor-layout.h"
27 #include "fold-const.h"
28 #include "fold-const-call.h"
29 #include "case-cfn-macros.h"
30 #include "tm.h" /* For C[LT]Z_DEFINED_AT_ZERO. */
32 #include "gimple-expr.h"
33 #include "tree-vector-builder.h"
35 /* Functions that test for certain constant types, abstracting away the
36 decision about whether to check for overflow. */
39 integer_cst_p (tree t
)
41 return TREE_CODE (t
) == INTEGER_CST
&& !TREE_OVERFLOW (t
);
47 return TREE_CODE (t
) == REAL_CST
&& !TREE_OVERFLOW (t
);
51 complex_cst_p (tree t
)
53 return TREE_CODE (t
) == COMPLEX_CST
;
56 /* Return true if ARG is a size_type_node constant.
57 Store it in *SIZE_OUT if so. */
60 size_t_cst_p (tree t
, unsigned HOST_WIDE_INT
*size_out
)
62 if (types_compatible_p (size_type_node
, TREE_TYPE (t
))
64 && tree_fits_uhwi_p (t
))
66 *size_out
= tree_to_uhwi (t
);
72 /* RES is the result of a comparison in which < 0 means "less", 0 means
73 "equal" and > 0 means "more". Canonicalize it to -1, 0 or 1 and
74 return it in type TYPE. */
77 build_cmp_result (tree type
, int res
)
79 return build_int_cst (type
, res
< 0 ? -1 : res
> 0 ? 1 : 0);
82 /* M is the result of trying to constant-fold an expression (starting
83 with clear MPFR flags) and INEXACT says whether the result in M is
84 exact or inexact. Return true if M can be used as a constant-folded
85 result in format FORMAT, storing the value in *RESULT if so. */
88 do_mpfr_ckconv (real_value
*result
, mpfr_srcptr m
, bool inexact
,
89 const real_format
*format
)
91 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
92 overflow/underflow occurred. If -frounding-math, proceed iff the
93 result of calling FUNC was exact. */
94 if (!mpfr_number_p (m
)
96 || mpfr_underflow_p ()
97 || (flag_rounding_math
&& inexact
))
101 real_from_mpfr (&tmp
, m
, format
, MPFR_RNDN
);
103 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
104 If the REAL_VALUE_TYPE is zero but the mpfr_t is not, then we
105 underflowed in the conversion. */
106 if (!real_isfinite (&tmp
)
107 || ((tmp
.cl
== rvc_zero
) != (mpfr_zero_p (m
) != 0)))
110 real_convert (result
, format
, &tmp
);
111 return real_identical (result
, &tmp
);
118 in format FORMAT, given that FUNC is the MPFR implementation of f.
119 Return true on success. */
122 do_mpfr_arg1 (real_value
*result
,
123 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_rnd_t
),
124 const real_value
*arg
, const real_format
*format
)
126 /* To proceed, MPFR must exactly represent the target floating point
127 format, which only happens when the target base equals two. */
128 if (format
->b
!= 2 || !real_isfinite (arg
))
131 int prec
= format
->p
;
132 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
135 mpfr_from_real (m
, arg
, MPFR_RNDN
);
137 bool inexact
= func (m
, m
, rnd
);
138 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
145 *RESULT_SIN = sin (*ARG);
146 *RESULT_COS = cos (*ARG);
148 for format FORMAT. Return true on success. */
151 do_mpfr_sincos (real_value
*result_sin
, real_value
*result_cos
,
152 const real_value
*arg
, const real_format
*format
)
154 /* To proceed, MPFR must exactly represent the target floating point
155 format, which only happens when the target base equals two. */
156 if (format
->b
!= 2 || !real_isfinite (arg
))
159 int prec
= format
->p
;
160 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
163 mpfr_inits2 (prec
, m
, ms
, mc
, NULL
);
164 mpfr_from_real (m
, arg
, MPFR_RNDN
);
166 bool inexact
= mpfr_sin_cos (ms
, mc
, m
, rnd
);
167 bool ok
= (do_mpfr_ckconv (result_sin
, ms
, inexact
, format
)
168 && do_mpfr_ckconv (result_cos
, mc
, inexact
, format
));
169 mpfr_clears (m
, ms
, mc
, NULL
);
176 *RESULT = f (*ARG0, *ARG1)
178 in format FORMAT, given that FUNC is the MPFR implementation of f.
179 Return true on success. */
182 do_mpfr_arg2 (real_value
*result
,
183 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
, mpfr_rnd_t
),
184 const real_value
*arg0
, const real_value
*arg1
,
185 const real_format
*format
)
187 /* To proceed, MPFR must exactly represent the target floating point
188 format, which only happens when the target base equals two. */
189 if (format
->b
!= 2 || !real_isfinite (arg0
) || !real_isfinite (arg1
))
192 int prec
= format
->p
;
193 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
196 mpfr_inits2 (prec
, m0
, m1
, NULL
);
197 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
198 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
200 bool inexact
= func (m0
, m0
, m1
, rnd
);
201 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
202 mpfr_clears (m0
, m1
, NULL
);
209 *RESULT = f (ARG0, *ARG1)
211 in format FORMAT, given that FUNC is the MPFR implementation of f.
212 Return true on success. */
215 do_mpfr_arg2 (real_value
*result
,
216 int (*func
) (mpfr_ptr
, long, mpfr_srcptr
, mpfr_rnd_t
),
217 const wide_int_ref
&arg0
, const real_value
*arg1
,
218 const real_format
*format
)
220 if (format
->b
!= 2 || !real_isfinite (arg1
))
223 int prec
= format
->p
;
224 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
227 mpfr_from_real (m
, arg1
, MPFR_RNDN
);
229 bool inexact
= func (m
, arg0
.to_shwi (), m
, rnd
);
230 bool ok
= do_mpfr_ckconv (result
, m
, inexact
, format
);
237 *RESULT = f (*ARG0, *ARG1, *ARG2)
239 in format FORMAT, given that FUNC is the MPFR implementation of f.
240 Return true on success. */
243 do_mpfr_arg3 (real_value
*result
,
244 int (*func
) (mpfr_ptr
, mpfr_srcptr
, mpfr_srcptr
,
245 mpfr_srcptr
, mpfr_rnd_t
),
246 const real_value
*arg0
, const real_value
*arg1
,
247 const real_value
*arg2
, const real_format
*format
)
249 /* To proceed, MPFR must exactly represent the target floating point
250 format, which only happens when the target base equals two. */
252 || !real_isfinite (arg0
)
253 || !real_isfinite (arg1
)
254 || !real_isfinite (arg2
))
257 int prec
= format
->p
;
258 mpfr_rnd_t rnd
= format
->round_towards_zero
? MPFR_RNDZ
: MPFR_RNDN
;
261 mpfr_inits2 (prec
, m0
, m1
, m2
, NULL
);
262 mpfr_from_real (m0
, arg0
, MPFR_RNDN
);
263 mpfr_from_real (m1
, arg1
, MPFR_RNDN
);
264 mpfr_from_real (m2
, arg2
, MPFR_RNDN
);
266 bool inexact
= func (m0
, m0
, m1
, m2
, rnd
);
267 bool ok
= do_mpfr_ckconv (result
, m0
, inexact
, format
);
268 mpfr_clears (m0
, m1
, m2
, NULL
);
273 /* M is the result of trying to constant-fold an expression (starting
274 with clear MPFR flags) and INEXACT says whether the result in M is
275 exact or inexact. Return true if M can be used as a constant-folded
276 result in which the real and imaginary parts have format FORMAT.
277 Store those parts in *RESULT_REAL and *RESULT_IMAG if so. */
280 do_mpc_ckconv (real_value
*result_real
, real_value
*result_imag
,
281 mpc_srcptr m
, bool inexact
, const real_format
*format
)
283 /* Proceed iff we get a normal number, i.e. not NaN or Inf and no
284 overflow/underflow occurred. If -frounding-math, proceed iff the
285 result of calling FUNC was exact. */
286 if (!mpfr_number_p (mpc_realref (m
))
287 || !mpfr_number_p (mpc_imagref (m
))
288 || mpfr_overflow_p ()
289 || mpfr_underflow_p ()
290 || (flag_rounding_math
&& inexact
))
293 REAL_VALUE_TYPE tmp_real
, tmp_imag
;
294 real_from_mpfr (&tmp_real
, mpc_realref (m
), format
, MPFR_RNDN
);
295 real_from_mpfr (&tmp_imag
, mpc_imagref (m
), format
, MPFR_RNDN
);
297 /* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
298 If the REAL_VALUE_TYPE is zero but the mpfr_t is not, then we
299 underflowed in the conversion. */
300 if (!real_isfinite (&tmp_real
)
301 || !real_isfinite (&tmp_imag
)
302 || (tmp_real
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_realref (m
)) != 0)
303 || (tmp_imag
.cl
== rvc_zero
) != (mpfr_zero_p (mpc_imagref (m
)) != 0))
306 real_convert (result_real
, format
, &tmp_real
);
307 real_convert (result_imag
, format
, &tmp_imag
);
309 return (real_identical (result_real
, &tmp_real
)
310 && real_identical (result_imag
, &tmp_imag
));
317 in format FORMAT, given that FUNC is the mpc implementation of f.
318 Return true on success. Both RESULT and ARG are represented as
319 real and imaginary pairs. */
322 do_mpc_arg1 (real_value
*result_real
, real_value
*result_imag
,
323 int (*func
) (mpc_ptr
, mpc_srcptr
, mpc_rnd_t
),
324 const real_value
*arg_real
, const real_value
*arg_imag
,
325 const real_format
*format
)
327 /* To proceed, MPFR must exactly represent the target floating point
328 format, which only happens when the target base equals two. */
330 || !real_isfinite (arg_real
)
331 || !real_isfinite (arg_imag
))
334 int prec
= format
->p
;
335 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
339 mpfr_from_real (mpc_realref (m
), arg_real
, MPFR_RNDN
);
340 mpfr_from_real (mpc_imagref (m
), arg_imag
, MPFR_RNDN
);
342 bool inexact
= func (m
, m
, crnd
);
343 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m
, inexact
, format
);
351 RESULT = f (ARG0, ARG1)
353 in format FORMAT, given that FUNC is the mpc implementation of f.
354 Return true on success. RESULT, ARG0 and ARG1 are represented as
355 real and imaginary pairs. */
358 do_mpc_arg2 (real_value
*result_real
, real_value
*result_imag
,
359 int (*func
)(mpc_ptr
, mpc_srcptr
, mpc_srcptr
, mpc_rnd_t
),
360 const real_value
*arg0_real
, const real_value
*arg0_imag
,
361 const real_value
*arg1_real
, const real_value
*arg1_imag
,
362 const real_format
*format
)
364 if (!real_isfinite (arg0_real
)
365 || !real_isfinite (arg0_imag
)
366 || !real_isfinite (arg1_real
)
367 || !real_isfinite (arg1_imag
))
370 int prec
= format
->p
;
371 mpc_rnd_t crnd
= format
->round_towards_zero
? MPC_RNDZZ
: MPC_RNDNN
;
374 mpc_init2 (m0
, prec
);
375 mpc_init2 (m1
, prec
);
376 mpfr_from_real (mpc_realref (m0
), arg0_real
, MPFR_RNDN
);
377 mpfr_from_real (mpc_imagref (m0
), arg0_imag
, MPFR_RNDN
);
378 mpfr_from_real (mpc_realref (m1
), arg1_real
, MPFR_RNDN
);
379 mpfr_from_real (mpc_imagref (m1
), arg1_imag
, MPFR_RNDN
);
381 bool inexact
= func (m0
, m0
, m1
, crnd
);
382 bool ok
= do_mpc_ckconv (result_real
, result_imag
, m0
, inexact
, format
);
391 *RESULT = logb (*ARG)
393 in format FORMAT. Return true on success. */
396 fold_const_logb (real_value
*result
, const real_value
*arg
,
397 const real_format
*format
)
402 /* If arg is +-NaN, then return it. */
407 /* If arg is +-Inf, then return +Inf. */
413 /* Zero may set errno and/or raise an exception. */
417 /* For normal numbers, proceed iff radix == 2. In GCC,
418 normalized significands are in the range [0.5, 1.0). We
419 want the exponent as if they were [1.0, 2.0) so get the
420 exponent and subtract 1. */
423 real_from_integer (result
, format
, REAL_EXP (arg
) - 1, SIGNED
);
432 *RESULT = significand (*ARG)
434 in format FORMAT. Return true on success. */
437 fold_const_significand (real_value
*result
, const real_value
*arg
,
438 const real_format
*format
)
445 /* If arg is +-0, +-Inf or +-NaN, then return it. */
450 /* For normal numbers, proceed iff radix == 2. */
454 /* In GCC, normalized significands are in the range [0.5, 1.0).
455 We want them to be [1.0, 2.0) so set the exponent to 1. */
456 SET_REAL_EXP (result
, 1);
467 where FORMAT is the format of *ARG and PRECISION is the number of
468 significant bits in the result. Return true on success. */
471 fold_const_conversion (wide_int
*result
,
472 void (*fn
) (real_value
*, format_helper
,
474 const real_value
*arg
, unsigned int precision
,
475 const real_format
*format
)
477 if (!real_isfinite (arg
))
481 fn (&rounded
, format
, arg
);
484 *result
= real_to_integer (&rounded
, &fail
, precision
);
490 *RESULT = pow (*ARG0, *ARG1)
492 in format FORMAT. Return true on success. */
495 fold_const_pow (real_value
*result
, const real_value
*arg0
,
496 const real_value
*arg1
, const real_format
*format
)
498 if (do_mpfr_arg2 (result
, mpfr_pow
, arg0
, arg1
, format
))
501 /* Check for an integer exponent. */
502 REAL_VALUE_TYPE cint1
;
503 HOST_WIDE_INT n1
= real_to_integer (arg1
);
504 real_from_integer (&cint1
, VOIDmode
, n1
, SIGNED
);
505 /* Attempt to evaluate pow at compile-time, unless this should
506 raise an exception. */
507 if (real_identical (arg1
, &cint1
)
509 || (!flag_trapping_math
&& !flag_errno_math
)
510 || !real_equal (arg0
, &dconst0
)))
512 bool inexact
= real_powi (result
, format
, arg0
, n1
);
513 /* Avoid the folding if flag_signaling_nans is on. */
514 if (flag_unsafe_math_optimizations
516 && !(flag_signaling_nans
517 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))))
526 *RESULT = nextafter (*ARG0, *ARG1)
530 *RESULT = nexttoward (*ARG0, *ARG1)
532 in format FORMAT. Return true on success. */
535 fold_const_nextafter (real_value
*result
, const real_value
*arg0
,
536 const real_value
*arg1
, const real_format
*format
)
538 if (REAL_VALUE_ISSIGNALING_NAN (*arg0
)
539 || REAL_VALUE_ISSIGNALING_NAN (*arg1
))
542 /* Don't handle composite modes, nor decimal, nor modes without
543 inf or denorm at least for now. */
544 if (format
->pnan
< format
->p
547 || !format
->has_denorm
)
550 if (real_nextafter (result
, format
, arg0
, arg1
)
551 /* If raising underflow or overflow and setting errno to ERANGE,
552 fail if we care about those side-effects. */
553 && (flag_trapping_math
|| flag_errno_math
))
555 /* Similarly for nextafter (0, 1) raising underflow. */
556 else if (flag_trapping_math
557 && arg0
->cl
== rvc_zero
558 && result
->cl
!= rvc_zero
)
561 real_convert (result
, format
, result
);
568 *RESULT = ldexp (*ARG0, ARG1)
570 in format FORMAT. Return true on success. */
573 fold_const_builtin_load_exponent (real_value
*result
, const real_value
*arg0
,
574 const wide_int_ref
&arg1
,
575 const real_format
*format
)
577 /* Bound the maximum adjustment to twice the range of the
578 mode's valid exponents. Use abs to ensure the range is
579 positive as a sanity check. */
580 int max_exp_adj
= 2 * labs (format
->emax
- format
->emin
);
582 /* The requested adjustment must be inside this range. This
583 is a preliminary cap to avoid things like overflow, we
584 may still fail to compute the result for other reasons. */
585 if (wi::les_p (arg1
, -max_exp_adj
) || wi::ges_p (arg1
, max_exp_adj
))
588 /* Don't perform operation if we honor signaling NaNs and
589 operand is a signaling NaN. */
590 if (!flag_unsafe_math_optimizations
591 && flag_signaling_nans
592 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
595 REAL_VALUE_TYPE initial_result
;
596 real_ldexp (&initial_result
, arg0
, arg1
.to_shwi ());
598 /* Ensure we didn't overflow. */
599 if (real_isinf (&initial_result
))
602 /* Only proceed if the target mode can hold the
604 *result
= real_value_truncate (format
, initial_result
);
605 return real_equal (&initial_result
, result
);
608 /* Fold a call to __builtin_nan or __builtin_nans with argument ARG and
609 return type TYPE. QUIET is true if a quiet rather than signalling
613 fold_const_builtin_nan (tree type
, tree arg
, bool quiet
)
615 REAL_VALUE_TYPE real
;
616 const char *str
= c_getstr (arg
);
617 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
618 return build_real (type
, real
);
622 /* Fold a call to IFN_REDUC_<CODE> (ARG), returning a value of type TYPE. */
625 fold_const_reduction (tree type
, tree arg
, tree_code code
)
627 unsigned HOST_WIDE_INT nelts
;
628 if (TREE_CODE (arg
) != VECTOR_CST
629 || !VECTOR_CST_NELTS (arg
).is_constant (&nelts
))
632 tree res
= VECTOR_CST_ELT (arg
, 0);
633 for (unsigned HOST_WIDE_INT i
= 1; i
< nelts
; i
++)
635 res
= const_binop (code
, type
, res
, VECTOR_CST_ELT (arg
, i
));
636 if (res
== NULL_TREE
|| !CONSTANT_CLASS_P (res
))
642 /* Fold a call to IFN_VEC_CONVERT (ARG) returning TYPE. */
645 fold_const_vec_convert (tree ret_type
, tree arg
)
647 enum tree_code code
= NOP_EXPR
;
648 tree arg_type
= TREE_TYPE (arg
);
649 if (TREE_CODE (arg
) != VECTOR_CST
)
652 gcc_checking_assert (VECTOR_TYPE_P (ret_type
) && VECTOR_TYPE_P (arg_type
));
654 if (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
655 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg_type
)))
656 code
= FIX_TRUNC_EXPR
;
657 else if (INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
658 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (ret_type
)))
661 /* We can't handle steps directly when extending, since the
662 values need to wrap at the original precision first. */
664 = (INTEGRAL_TYPE_P (TREE_TYPE (ret_type
))
665 && INTEGRAL_TYPE_P (TREE_TYPE (arg_type
))
666 && (TYPE_PRECISION (TREE_TYPE (ret_type
))
667 <= TYPE_PRECISION (TREE_TYPE (arg_type
))));
668 tree_vector_builder elts
;
669 if (!elts
.new_unary_operation (ret_type
, arg
, step_ok_p
))
672 unsigned int count
= elts
.encoded_nelts ();
673 for (unsigned int i
= 0; i
< count
; ++i
)
675 tree elt
= fold_unary (code
, TREE_TYPE (ret_type
),
676 VECTOR_CST_ELT (arg
, i
));
677 if (elt
== NULL_TREE
|| !CONSTANT_CLASS_P (elt
))
679 elts
.quick_push (elt
);
682 return elts
.build ();
687 IFN_WHILE_ULT (ARG0, ARG1, (TYPE) { ... })
689 Return the value on success and null on failure. */
692 fold_while_ult (tree type
, poly_uint64 arg0
, poly_uint64 arg1
)
694 if (known_ge (arg0
, arg1
))
695 return build_zero_cst (type
);
697 if (maybe_ge (arg0
, arg1
))
700 poly_uint64 diff
= arg1
- arg0
;
701 poly_uint64 nelts
= TYPE_VECTOR_SUBPARTS (type
);
702 if (known_ge (diff
, nelts
))
703 return build_all_ones_cst (type
);
705 unsigned HOST_WIDE_INT const_diff
;
706 if (known_le (diff
, nelts
) && diff
.is_constant (&const_diff
))
708 tree minus_one
= build_minus_one_cst (TREE_TYPE (type
));
709 tree zero
= build_zero_cst (TREE_TYPE (type
));
710 return build_vector_a_then_b (type
, const_diff
, minus_one
, zero
);
719 in format FORMAT. Return true on success. */
722 fold_const_call_ss (real_value
*result
, combined_fn fn
,
723 const real_value
*arg
, const real_format
*format
)
729 return (real_compare (GE_EXPR
, arg
, &dconst0
)
730 && do_mpfr_arg1 (result
, mpfr_sqrt
, arg
, format
));
734 return do_mpfr_arg1 (result
, mpfr_cbrt
, arg
, format
);
738 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
739 && real_compare (LE_EXPR
, arg
, &dconst1
)
740 && do_mpfr_arg1 (result
, mpfr_asin
, arg
, format
));
744 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
745 && real_compare (LE_EXPR
, arg
, &dconst1
)
746 && do_mpfr_arg1 (result
, mpfr_acos
, arg
, format
));
750 return do_mpfr_arg1 (result
, mpfr_atan
, arg
, format
);
754 return do_mpfr_arg1 (result
, mpfr_asinh
, arg
, format
);
758 return (real_compare (GE_EXPR
, arg
, &dconst1
)
759 && do_mpfr_arg1 (result
, mpfr_acosh
, arg
, format
));
763 return (real_compare (GE_EXPR
, arg
, &dconstm1
)
764 && real_compare (LE_EXPR
, arg
, &dconst1
)
765 && do_mpfr_arg1 (result
, mpfr_atanh
, arg
, format
));
769 return do_mpfr_arg1 (result
, mpfr_sin
, arg
, format
);
773 return do_mpfr_arg1 (result
, mpfr_cos
, arg
, format
);
777 return do_mpfr_arg1 (result
, mpfr_tan
, arg
, format
);
781 return do_mpfr_arg1 (result
, mpfr_sinh
, arg
, format
);
785 return do_mpfr_arg1 (result
, mpfr_cosh
, arg
, format
);
789 return do_mpfr_arg1 (result
, mpfr_tanh
, arg
, format
);
793 return do_mpfr_arg1 (result
, mpfr_erf
, arg
, format
);
797 return do_mpfr_arg1 (result
, mpfr_erfc
, arg
, format
);
801 return do_mpfr_arg1 (result
, mpfr_gamma
, arg
, format
);
805 return do_mpfr_arg1 (result
, mpfr_exp
, arg
, format
);
809 return do_mpfr_arg1 (result
, mpfr_exp2
, arg
, format
);
813 return do_mpfr_arg1 (result
, mpfr_exp10
, arg
, format
);
817 return do_mpfr_arg1 (result
, mpfr_expm1
, arg
, format
);
821 return (real_compare (GT_EXPR
, arg
, &dconst0
)
822 && do_mpfr_arg1 (result
, mpfr_log
, arg
, format
));
826 return (real_compare (GT_EXPR
, arg
, &dconst0
)
827 && do_mpfr_arg1 (result
, mpfr_log2
, arg
, format
));
831 return (real_compare (GT_EXPR
, arg
, &dconst0
)
832 && do_mpfr_arg1 (result
, mpfr_log10
, arg
, format
));
836 return (real_compare (GT_EXPR
, arg
, &dconstm1
)
837 && do_mpfr_arg1 (result
, mpfr_log1p
, arg
, format
));
840 return do_mpfr_arg1 (result
, mpfr_j0
, arg
, format
);
843 return do_mpfr_arg1 (result
, mpfr_j1
, arg
, format
);
846 return (real_compare (GT_EXPR
, arg
, &dconst0
)
847 && do_mpfr_arg1 (result
, mpfr_y0
, arg
, format
));
850 return (real_compare (GT_EXPR
, arg
, &dconst0
)
851 && do_mpfr_arg1 (result
, mpfr_y1
, arg
, format
));
855 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
857 real_floor (result
, format
, arg
);
864 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
866 real_ceil (result
, format
, arg
);
873 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
875 real_trunc (result
, format
, arg
);
882 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
884 real_round (result
, format
, arg
);
890 CASE_CFN_ROUNDEVEN_FN
:
891 if (!REAL_VALUE_ISSIGNALING_NAN (*arg
))
893 real_roundeven (result
, format
, arg
);
900 return fold_const_logb (result
, arg
, format
);
902 CASE_CFN_SIGNIFICAND
:
903 return fold_const_significand (result
, arg
, format
);
914 where FORMAT is the format of ARG and PRECISION is the number of
915 significant bits in the result. Return true on success. */
918 fold_const_call_ss (wide_int
*result
, combined_fn fn
,
919 const real_value
*arg
, unsigned int precision
,
920 const real_format
*format
)
925 if (real_isneg (arg
))
926 *result
= wi::one (precision
);
928 *result
= wi::zero (precision
);
933 /* For ilogb we don't know FP_ILOGB0, so only handle normal values.
934 Proceed iff radix == 2. In GCC, normalized significands are in
935 the range [0.5, 1.0). We want the exponent as if they were
936 [1.0, 2.0) so get the exponent and subtract 1. */
937 if (arg
->cl
== rvc_normal
&& format
->b
== 2)
939 *result
= wi::shwi (REAL_EXP (arg
) - 1, precision
);
947 return fold_const_conversion (result
, real_ceil
, arg
,
953 return fold_const_conversion (result
, real_floor
, arg
,
961 return fold_const_conversion (result
, real_round
, arg
,
969 /* Not yet folded to a constant. */
973 case CFN_BUILT_IN_FINITED32
:
974 case CFN_BUILT_IN_FINITED64
:
975 case CFN_BUILT_IN_FINITED128
:
976 case CFN_BUILT_IN_ISFINITE
:
977 *result
= wi::shwi (real_isfinite (arg
) ? 1 : 0, precision
);
980 case CFN_BUILT_IN_ISSIGNALING
:
981 *result
= wi::shwi (real_issignaling_nan (arg
) ? 1 : 0, precision
);
985 case CFN_BUILT_IN_ISINFD32
:
986 case CFN_BUILT_IN_ISINFD64
:
987 case CFN_BUILT_IN_ISINFD128
:
988 if (real_isinf (arg
))
989 *result
= wi::shwi (arg
->sign
? -1 : 1, precision
);
991 *result
= wi::shwi (0, precision
);
995 case CFN_BUILT_IN_ISNAND32
:
996 case CFN_BUILT_IN_ISNAND64
:
997 case CFN_BUILT_IN_ISNAND128
:
998 *result
= wi::shwi (real_isnan (arg
) ? 1 : 0, precision
);
1010 where ARG_TYPE is the type of ARG and PRECISION is the number of bits
1011 in the result. Return true on success. */
1014 fold_const_call_ss (wide_int
*result
, combined_fn fn
, const wide_int_ref
&arg
,
1015 unsigned int precision
, tree arg_type
)
1020 *result
= wi::shwi (wi::ffs (arg
), precision
);
1026 if (wi::ne_p (arg
, 0))
1027 tmp
= wi::clz (arg
);
1028 else if (!CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1030 tmp
= TYPE_PRECISION (arg_type
);
1031 *result
= wi::shwi (tmp
, precision
);
1038 if (wi::ne_p (arg
, 0))
1039 tmp
= wi::ctz (arg
);
1040 else if (!CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (arg_type
),
1042 tmp
= TYPE_PRECISION (arg_type
);
1043 *result
= wi::shwi (tmp
, precision
);
1048 *result
= wi::shwi (wi::clrsb (arg
), precision
);
1052 *result
= wi::shwi (wi::popcount (arg
), precision
);
1056 *result
= wi::shwi (wi::parity (arg
), precision
);
1059 case CFN_BUILT_IN_BSWAP16
:
1060 case CFN_BUILT_IN_BSWAP32
:
1061 case CFN_BUILT_IN_BSWAP64
:
1062 case CFN_BUILT_IN_BSWAP128
:
1063 *result
= wi::bswap (wide_int::from (arg
, precision
,
1064 TYPE_SIGN (arg_type
)));
1076 where FORMAT is the format of ARG and of the real and imaginary parts
1077 of RESULT, passed as RESULT_REAL and RESULT_IMAG respectively. Return
1081 fold_const_call_cs (real_value
*result_real
, real_value
*result_imag
,
1082 combined_fn fn
, const real_value
*arg
,
1083 const real_format
*format
)
1088 /* cexpi(x+yi) = cos(x)+sin(y)*i. */
1089 return do_mpfr_sincos (result_imag
, result_real
, arg
, format
);
1100 where FORMAT is the format of RESULT and of the real and imaginary parts
1101 of ARG, passed as ARG_REAL and ARG_IMAG respectively. Return true on
1105 fold_const_call_sc (real_value
*result
, combined_fn fn
,
1106 const real_value
*arg_real
, const real_value
*arg_imag
,
1107 const real_format
*format
)
1113 return do_mpfr_arg2 (result
, mpfr_hypot
, arg_real
, arg_imag
, format
);
1124 where FORMAT is the format of the real and imaginary parts of RESULT
1125 (RESULT_REAL and RESULT_IMAG) and of ARG (ARG_REAL and ARG_IMAG).
1126 Return true on success. */
1129 fold_const_call_cc (real_value
*result_real
, real_value
*result_imag
,
1130 combined_fn fn
, const real_value
*arg_real
,
1131 const real_value
*arg_imag
, const real_format
*format
)
1137 return do_mpc_arg1 (result_real
, result_imag
, mpc_cos
,
1138 arg_real
, arg_imag
, format
);
1142 return do_mpc_arg1 (result_real
, result_imag
, mpc_cosh
,
1143 arg_real
, arg_imag
, format
);
1147 if (real_isinf (arg_real
) || real_isinf (arg_imag
))
1149 *result_real
= dconstinf
;
1150 *result_imag
= dconst0
;
1151 result_imag
->sign
= arg_imag
->sign
;
1155 *result_real
= *arg_real
;
1156 *result_imag
= *arg_imag
;
1162 return do_mpc_arg1 (result_real
, result_imag
, mpc_sin
,
1163 arg_real
, arg_imag
, format
);
1167 return do_mpc_arg1 (result_real
, result_imag
, mpc_sinh
,
1168 arg_real
, arg_imag
, format
);
1172 return do_mpc_arg1 (result_real
, result_imag
, mpc_tan
,
1173 arg_real
, arg_imag
, format
);
1177 return do_mpc_arg1 (result_real
, result_imag
, mpc_tanh
,
1178 arg_real
, arg_imag
, format
);
1182 return do_mpc_arg1 (result_real
, result_imag
, mpc_log
,
1183 arg_real
, arg_imag
, format
);
1187 return do_mpc_arg1 (result_real
, result_imag
, mpc_sqrt
,
1188 arg_real
, arg_imag
, format
);
1192 return do_mpc_arg1 (result_real
, result_imag
, mpc_asin
,
1193 arg_real
, arg_imag
, format
);
1197 return do_mpc_arg1 (result_real
, result_imag
, mpc_acos
,
1198 arg_real
, arg_imag
, format
);
1202 return do_mpc_arg1 (result_real
, result_imag
, mpc_atan
,
1203 arg_real
, arg_imag
, format
);
1207 return do_mpc_arg1 (result_real
, result_imag
, mpc_asinh
,
1208 arg_real
, arg_imag
, format
);
1212 return do_mpc_arg1 (result_real
, result_imag
, mpc_acosh
,
1213 arg_real
, arg_imag
, format
);
1217 return do_mpc_arg1 (result_real
, result_imag
, mpc_atanh
,
1218 arg_real
, arg_imag
, format
);
1222 return do_mpc_arg1 (result_real
, result_imag
, mpc_exp
,
1223 arg_real
, arg_imag
, format
);
1230 /* Subroutine of fold_const_call, with the same interface. Handle cases
1231 where the arguments and result are numerical. */
1234 fold_const_call_1 (combined_fn fn
, tree type
, tree arg
)
1236 machine_mode mode
= TYPE_MODE (type
);
1237 machine_mode arg_mode
= TYPE_MODE (TREE_TYPE (arg
));
1239 if (integer_cst_p (arg
))
1241 if (SCALAR_INT_MODE_P (mode
))
1244 if (fold_const_call_ss (&result
, fn
, wi::to_wide (arg
),
1245 TYPE_PRECISION (type
), TREE_TYPE (arg
)))
1246 return wide_int_to_tree (type
, result
);
1251 if (real_cst_p (arg
))
1253 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode
));
1254 if (mode
== arg_mode
)
1257 REAL_VALUE_TYPE result
;
1258 if (fold_const_call_ss (&result
, fn
, TREE_REAL_CST_PTR (arg
),
1259 REAL_MODE_FORMAT (mode
)))
1260 return build_real (type
, result
);
1262 else if (COMPLEX_MODE_P (mode
)
1263 && GET_MODE_INNER (mode
) == arg_mode
)
1265 /* real -> complex real. */
1266 REAL_VALUE_TYPE result_real
, result_imag
;
1267 if (fold_const_call_cs (&result_real
, &result_imag
, fn
,
1268 TREE_REAL_CST_PTR (arg
),
1269 REAL_MODE_FORMAT (arg_mode
)))
1270 return build_complex (type
,
1271 build_real (TREE_TYPE (type
), result_real
),
1272 build_real (TREE_TYPE (type
), result_imag
));
1274 else if (INTEGRAL_TYPE_P (type
))
1278 if (fold_const_call_ss (&result
, fn
,
1279 TREE_REAL_CST_PTR (arg
),
1280 TYPE_PRECISION (type
),
1281 REAL_MODE_FORMAT (arg_mode
)))
1282 return wide_int_to_tree (type
, result
);
1287 if (complex_cst_p (arg
))
1289 gcc_checking_assert (COMPLEX_MODE_P (arg_mode
));
1290 machine_mode inner_mode
= GET_MODE_INNER (arg_mode
);
1291 tree argr
= TREE_REALPART (arg
);
1292 tree argi
= TREE_IMAGPART (arg
);
1293 if (mode
== arg_mode
1294 && real_cst_p (argr
)
1295 && real_cst_p (argi
))
1297 /* complex real -> complex real. */
1298 REAL_VALUE_TYPE result_real
, result_imag
;
1299 if (fold_const_call_cc (&result_real
, &result_imag
, fn
,
1300 TREE_REAL_CST_PTR (argr
),
1301 TREE_REAL_CST_PTR (argi
),
1302 REAL_MODE_FORMAT (inner_mode
)))
1303 return build_complex (type
,
1304 build_real (TREE_TYPE (type
), result_real
),
1305 build_real (TREE_TYPE (type
), result_imag
));
1307 if (mode
== inner_mode
1308 && real_cst_p (argr
)
1309 && real_cst_p (argi
))
1311 /* complex real -> real. */
1312 REAL_VALUE_TYPE result
;
1313 if (fold_const_call_sc (&result
, fn
,
1314 TREE_REAL_CST_PTR (argr
),
1315 TREE_REAL_CST_PTR (argi
),
1316 REAL_MODE_FORMAT (inner_mode
)))
1317 return build_real (type
, result
);
1325 /* Try to fold FN (ARG) to a constant. Return the constant on success,
1326 otherwise return null. TYPE is the type of the return value. */
1329 fold_const_call (combined_fn fn
, tree type
, tree arg
)
1333 case CFN_BUILT_IN_STRLEN
:
1334 if (const char *str
= c_getstr (arg
))
1335 return build_int_cst (type
, strlen (str
));
1339 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NAN
):
1340 case CFN_BUILT_IN_NAND32
:
1341 case CFN_BUILT_IN_NAND64
:
1342 case CFN_BUILT_IN_NAND128
:
1343 return fold_const_builtin_nan (type
, arg
, true);
1346 CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NANS
):
1347 case CFN_BUILT_IN_NANSF16B
:
1348 case CFN_BUILT_IN_NANSD32
:
1349 case CFN_BUILT_IN_NANSD64
:
1350 case CFN_BUILT_IN_NANSD128
:
1351 return fold_const_builtin_nan (type
, arg
, false);
1353 case CFN_REDUC_PLUS
:
1354 return fold_const_reduction (type
, arg
, PLUS_EXPR
);
1357 return fold_const_reduction (type
, arg
, MAX_EXPR
);
1360 return fold_const_reduction (type
, arg
, MIN_EXPR
);
1363 return fold_const_reduction (type
, arg
, BIT_AND_EXPR
);
1366 return fold_const_reduction (type
, arg
, BIT_IOR_EXPR
);
1369 return fold_const_reduction (type
, arg
, BIT_XOR_EXPR
);
1371 case CFN_VEC_CONVERT
:
1372 return fold_const_vec_convert (type
, arg
);
1375 return fold_const_call_1 (fn
, type
, arg
);
1379 /* Fold a call to IFN_FOLD_LEFT_<CODE> (ARG0, ARG1), returning a value
1383 fold_const_fold_left (tree type
, tree arg0
, tree arg1
, tree_code code
)
1385 if (TREE_CODE (arg1
) != VECTOR_CST
)
1388 unsigned HOST_WIDE_INT nelts
;
1389 if (!VECTOR_CST_NELTS (arg1
).is_constant (&nelts
))
1392 for (unsigned HOST_WIDE_INT i
= 0; i
< nelts
; i
++)
1394 arg0
= const_binop (code
, type
, arg0
, VECTOR_CST_ELT (arg1
, i
));
1395 if (arg0
== NULL_TREE
|| !CONSTANT_CLASS_P (arg0
))
1403 *RESULT = FN (*ARG0, *ARG1)
1405 in format FORMAT. Return true on success. */
1408 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1409 const real_value
*arg0
, const real_value
*arg1
,
1410 const real_format
*format
)
1416 CASE_CFN_REMAINDER_FN
:
1417 return do_mpfr_arg2 (result
, mpfr_remainder
, arg0
, arg1
, format
);
1421 return do_mpfr_arg2 (result
, mpfr_atan2
, arg0
, arg1
, format
);
1425 return do_mpfr_arg2 (result
, mpfr_dim
, arg0
, arg1
, format
);
1429 return do_mpfr_arg2 (result
, mpfr_fmod
, arg0
, arg1
, format
);
1433 return do_mpfr_arg2 (result
, mpfr_hypot
, arg0
, arg1
, format
);
1436 CASE_CFN_COPYSIGN_FN
:
1438 real_copysign (result
, arg1
);
1443 return do_mpfr_arg2 (result
, mpfr_min
, arg0
, arg1
, format
);
1447 return do_mpfr_arg2 (result
, mpfr_max
, arg0
, arg1
, format
);
1451 return fold_const_pow (result
, arg0
, arg1
, format
);
1454 CASE_CFN_NEXTAFTER_FN
:
1455 case CFN_BUILT_IN_NEXTAFTERF16B
:
1456 CASE_CFN_NEXTTOWARD
:
1457 return fold_const_nextafter (result
, arg0
, arg1
, format
);
1466 *RESULT = FN (*ARG0, ARG1)
1468 where FORMAT is the format of *RESULT and *ARG0. Return true on
1472 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1473 const real_value
*arg0
, const wide_int_ref
&arg1
,
1474 const real_format
*format
)
1480 return fold_const_builtin_load_exponent (result
, arg0
, arg1
, format
);
1485 CASE_CFN_SCALBLN_FN
:
1486 return (format
->b
== 2
1487 && fold_const_builtin_load_exponent (result
, arg0
, arg1
,
1491 /* Avoid the folding if flag_signaling_nans is on and
1492 operand is a signaling NaN. */
1493 if (!flag_unsafe_math_optimizations
1494 && flag_signaling_nans
1495 && REAL_VALUE_ISSIGNALING_NAN (*arg0
))
1498 real_powi (result
, format
, arg0
, arg1
.to_shwi ());
1508 *RESULT = FN (ARG0, *ARG1)
1510 where FORMAT is the format of *RESULT and *ARG1. Return true on
1514 fold_const_call_sss (real_value
*result
, combined_fn fn
,
1515 const wide_int_ref
&arg0
, const real_value
*arg1
,
1516 const real_format
*format
)
1521 return do_mpfr_arg2 (result
, mpfr_jn
, arg0
, arg1
, format
);
1524 return (real_compare (GT_EXPR
, arg1
, &dconst0
)
1525 && do_mpfr_arg2 (result
, mpfr_yn
, arg0
, arg1
, format
));
1534 RESULT = fn (ARG0, ARG1)
1536 where FORMAT is the format of the real and imaginary parts of RESULT
1537 (RESULT_REAL and RESULT_IMAG), of ARG0 (ARG0_REAL and ARG0_IMAG)
1538 and of ARG1 (ARG1_REAL and ARG1_IMAG). Return true on success. */
1541 fold_const_call_ccc (real_value
*result_real
, real_value
*result_imag
,
1542 combined_fn fn
, const real_value
*arg0_real
,
1543 const real_value
*arg0_imag
, const real_value
*arg1_real
,
1544 const real_value
*arg1_imag
, const real_format
*format
)
1550 return do_mpc_arg2 (result_real
, result_imag
, mpc_pow
,
1551 arg0_real
, arg0_imag
, arg1_real
, arg1_imag
, format
);
1558 /* Subroutine of fold_const_call, with the same interface. Handle cases
1559 where the arguments and result are numerical. */
1562 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1564 machine_mode mode
= TYPE_MODE (type
);
1565 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1566 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1568 if (mode
== arg0_mode
1569 && real_cst_p (arg0
)
1570 && real_cst_p (arg1
))
1572 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1573 REAL_VALUE_TYPE result
;
1574 if (arg0_mode
== arg1_mode
)
1576 /* real, real -> real. */
1577 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1578 TREE_REAL_CST_PTR (arg1
),
1579 REAL_MODE_FORMAT (mode
)))
1580 return build_real (type
, result
);
1582 else if (arg1_mode
== TYPE_MODE (long_double_type_node
))
1585 CASE_CFN_NEXTTOWARD
:
1586 /* real, long double -> real. */
1587 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1588 TREE_REAL_CST_PTR (arg1
),
1589 REAL_MODE_FORMAT (mode
)))
1590 return build_real (type
, result
);
1598 if (real_cst_p (arg0
)
1599 && integer_cst_p (arg1
))
1601 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1602 if (mode
== arg0_mode
)
1604 /* real, int -> real. */
1605 REAL_VALUE_TYPE result
;
1606 if (fold_const_call_sss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1608 REAL_MODE_FORMAT (mode
)))
1609 return build_real (type
, result
);
1614 if (integer_cst_p (arg0
)
1615 && real_cst_p (arg1
))
1617 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg1_mode
));
1618 if (mode
== arg1_mode
)
1620 /* int, real -> real. */
1621 REAL_VALUE_TYPE result
;
1622 if (fold_const_call_sss (&result
, fn
, wi::to_wide (arg0
),
1623 TREE_REAL_CST_PTR (arg1
),
1624 REAL_MODE_FORMAT (mode
)))
1625 return build_real (type
, result
);
1630 if (arg0_mode
== arg1_mode
1631 && complex_cst_p (arg0
)
1632 && complex_cst_p (arg1
))
1634 gcc_checking_assert (COMPLEX_MODE_P (arg0_mode
));
1635 machine_mode inner_mode
= GET_MODE_INNER (arg0_mode
);
1636 tree arg0r
= TREE_REALPART (arg0
);
1637 tree arg0i
= TREE_IMAGPART (arg0
);
1638 tree arg1r
= TREE_REALPART (arg1
);
1639 tree arg1i
= TREE_IMAGPART (arg1
);
1640 if (mode
== arg0_mode
1641 && real_cst_p (arg0r
)
1642 && real_cst_p (arg0i
)
1643 && real_cst_p (arg1r
)
1644 && real_cst_p (arg1i
))
1646 /* complex real, complex real -> complex real. */
1647 REAL_VALUE_TYPE result_real
, result_imag
;
1648 if (fold_const_call_ccc (&result_real
, &result_imag
, fn
,
1649 TREE_REAL_CST_PTR (arg0r
),
1650 TREE_REAL_CST_PTR (arg0i
),
1651 TREE_REAL_CST_PTR (arg1r
),
1652 TREE_REAL_CST_PTR (arg1i
),
1653 REAL_MODE_FORMAT (inner_mode
)))
1654 return build_complex (type
,
1655 build_real (TREE_TYPE (type
), result_real
),
1656 build_real (TREE_TYPE (type
), result_imag
));
1664 /* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
1665 otherwise return null. TYPE is the type of the return value. */
1668 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
)
1670 const char *p0
, *p1
;
1675 case CFN_BUILT_IN_STRSPN
:
1676 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1677 return build_int_cst (type
, strspn (p0
, p1
));
1680 case CFN_BUILT_IN_STRCSPN
:
1681 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1682 return build_int_cst (type
, strcspn (p0
, p1
));
1685 case CFN_BUILT_IN_STRCMP
:
1686 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1687 return build_cmp_result (type
, strcmp (p0
, p1
));
1690 case CFN_BUILT_IN_STRCASECMP
:
1691 if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1693 int r
= strcmp (p0
, p1
);
1695 return build_cmp_result (type
, r
);
1699 case CFN_BUILT_IN_INDEX
:
1700 case CFN_BUILT_IN_STRCHR
:
1701 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1703 const char *r
= strchr (p0
, c
);
1705 return build_int_cst (type
, 0);
1706 return fold_convert (type
,
1707 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1711 case CFN_BUILT_IN_RINDEX
:
1712 case CFN_BUILT_IN_STRRCHR
:
1713 if ((p0
= c_getstr (arg0
)) && target_char_cst_p (arg1
, &c
))
1715 const char *r
= strrchr (p0
, c
);
1717 return build_int_cst (type
, 0);
1718 return fold_convert (type
,
1719 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1723 case CFN_BUILT_IN_STRSTR
:
1724 if ((p1
= c_getstr (arg1
)))
1726 if ((p0
= c_getstr (arg0
)))
1728 const char *r
= strstr (p0
, p1
);
1730 return build_int_cst (type
, 0);
1731 return fold_convert (type
,
1732 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1735 return fold_convert (type
, arg0
);
1739 case CFN_FOLD_LEFT_PLUS
:
1740 return fold_const_fold_left (type
, arg0
, arg1
, PLUS_EXPR
);
1742 case CFN_UBSAN_CHECK_ADD
:
1743 case CFN_ADD_OVERFLOW
:
1744 subcode
= PLUS_EXPR
;
1745 goto arith_overflow
;
1747 case CFN_UBSAN_CHECK_SUB
:
1748 case CFN_SUB_OVERFLOW
:
1749 subcode
= MINUS_EXPR
;
1750 goto arith_overflow
;
1752 case CFN_UBSAN_CHECK_MUL
:
1753 case CFN_MUL_OVERFLOW
:
1754 subcode
= MULT_EXPR
;
1755 goto arith_overflow
;
1758 if (integer_cst_p (arg0
) && integer_cst_p (arg1
))
1761 = TREE_CODE (type
) == COMPLEX_TYPE
? TREE_TYPE (type
) : type
;
1763 tree r
= int_const_binop (subcode
, fold_convert (itype
, arg0
),
1764 fold_convert (itype
, arg1
));
1765 if (!r
|| TREE_CODE (r
) != INTEGER_CST
)
1767 if (arith_overflowed_p (subcode
, itype
, arg0
, arg1
))
1769 if (TREE_OVERFLOW (r
))
1770 r
= drop_tree_overflow (r
);
1778 return build_complex (type
, r
, build_int_cst (itype
, ovf
));
1783 return fold_const_call_1 (fn
, type
, arg0
, arg1
);
1789 *RESULT = FN (*ARG0, *ARG1, *ARG2)
1791 in format FORMAT. Return true on success. */
1794 fold_const_call_ssss (real_value
*result
, combined_fn fn
,
1795 const real_value
*arg0
, const real_value
*arg1
,
1796 const real_value
*arg2
, const real_format
*format
)
1802 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, arg2
, format
);
1806 real_value new_arg2
= real_value_negate (arg2
);
1807 return do_mpfr_arg3 (result
, mpfr_fma
, arg0
, arg1
, &new_arg2
, format
);
1812 real_value new_arg0
= real_value_negate (arg0
);
1813 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
, arg2
, format
);
1818 real_value new_arg0
= real_value_negate (arg0
);
1819 real_value new_arg2
= real_value_negate (arg2
);
1820 return do_mpfr_arg3 (result
, mpfr_fma
, &new_arg0
, arg1
,
1829 /* Subroutine of fold_const_call, with the same interface. Handle cases
1830 where the arguments and result are numerical. */
1833 fold_const_call_1 (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1835 machine_mode mode
= TYPE_MODE (type
);
1836 machine_mode arg0_mode
= TYPE_MODE (TREE_TYPE (arg0
));
1837 machine_mode arg1_mode
= TYPE_MODE (TREE_TYPE (arg1
));
1838 machine_mode arg2_mode
= TYPE_MODE (TREE_TYPE (arg2
));
1840 if (arg0_mode
== arg1_mode
1841 && arg0_mode
== arg2_mode
1842 && real_cst_p (arg0
)
1843 && real_cst_p (arg1
)
1844 && real_cst_p (arg2
))
1846 gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode
));
1847 if (mode
== arg0_mode
)
1849 /* real, real, real -> real. */
1850 REAL_VALUE_TYPE result
;
1851 if (fold_const_call_ssss (&result
, fn
, TREE_REAL_CST_PTR (arg0
),
1852 TREE_REAL_CST_PTR (arg1
),
1853 TREE_REAL_CST_PTR (arg2
),
1854 REAL_MODE_FORMAT (mode
)))
1855 return build_real (type
, result
);
1863 /* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
1864 success, otherwise return null. TYPE is the type of the return value. */
1867 fold_const_call (combined_fn fn
, tree type
, tree arg0
, tree arg1
, tree arg2
)
1869 const char *p0
, *p1
;
1871 unsigned HOST_WIDE_INT s0
, s1
, s2
= 0;
1874 case CFN_BUILT_IN_STRNCMP
:
1875 if (!size_t_cst_p (arg2
, &s2
))
1878 && !TREE_SIDE_EFFECTS (arg0
)
1879 && !TREE_SIDE_EFFECTS (arg1
))
1880 return build_int_cst (type
, 0);
1881 else if ((p0
= c_getstr (arg0
)) && (p1
= c_getstr (arg1
)))
1882 return build_int_cst (type
, strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)));
1885 case CFN_BUILT_IN_STRNCASECMP
:
1886 if (!size_t_cst_p (arg2
, &s2
))
1889 && !TREE_SIDE_EFFECTS (arg0
)
1890 && !TREE_SIDE_EFFECTS (arg1
))
1891 return build_int_cst (type
, 0);
1892 else if ((p0
= c_getstr (arg0
))
1893 && (p1
= c_getstr (arg1
))
1894 && strncmp (p0
, p1
, MIN (s2
, SIZE_MAX
)) == 0)
1895 return build_int_cst (type
, 0);
1898 case CFN_BUILT_IN_BCMP
:
1899 case CFN_BUILT_IN_MEMCMP
:
1900 if (!size_t_cst_p (arg2
, &s2
))
1903 && !TREE_SIDE_EFFECTS (arg0
)
1904 && !TREE_SIDE_EFFECTS (arg1
))
1905 return build_int_cst (type
, 0);
1906 if ((p0
= getbyterep (arg0
, &s0
))
1907 && (p1
= getbyterep (arg1
, &s1
))
1910 return build_cmp_result (type
, memcmp (p0
, p1
, s2
));
1913 case CFN_BUILT_IN_MEMCHR
:
1914 if (!size_t_cst_p (arg2
, &s2
))
1917 && !TREE_SIDE_EFFECTS (arg0
)
1918 && !TREE_SIDE_EFFECTS (arg1
))
1919 return build_int_cst (type
, 0);
1920 if ((p0
= getbyterep (arg0
, &s0
))
1922 && target_char_cst_p (arg1
, &c
))
1924 const char *r
= (const char *) memchr (p0
, c
, s2
);
1926 return build_int_cst (type
, 0);
1927 return fold_convert (type
,
1928 fold_build_pointer_plus_hwi (arg0
, r
- p0
));
1934 poly_uint64 parg0
, parg1
;
1935 if (poly_int_tree_p (arg0
, &parg0
) && poly_int_tree_p (arg1
, &parg1
))
1936 return fold_while_ult (type
, parg0
, parg1
);
1942 if (integer_cst_p (arg0
) && integer_cst_p (arg1
) && integer_cst_p (arg2
))
1944 tree itype
= TREE_TYPE (type
);
1946 tree_code subcode
= fn
== CFN_UADDC
? PLUS_EXPR
: MINUS_EXPR
;
1947 tree r
= int_const_binop (subcode
, fold_convert (itype
, arg0
),
1948 fold_convert (itype
, arg1
));
1951 if (arith_overflowed_p (subcode
, itype
, arg0
, arg1
))
1953 tree r2
= int_const_binop (subcode
, r
, fold_convert (itype
, arg2
));
1954 if (!r2
|| TREE_CODE (r2
) != INTEGER_CST
)
1956 if (arith_overflowed_p (subcode
, itype
, r
, arg2
))
1958 if (TREE_OVERFLOW (r2
))
1959 r2
= drop_tree_overflow (r2
);
1960 return build_complex (type
, r2
, build_int_cst (itype
, ovf
));
1965 return fold_const_call_1 (fn
, type
, arg0
, arg1
, arg2
);