1 /* Conditional Dead Call Elimination pass for the GNU compiler.
2 Copyright (C) 2008-2023 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "gimple-iterator.h"
35 #include "tree-into-ssa.h"
37 #include "internal-fn.h"
41 /* This pass serves two closely-related purposes:
43 1. It conditionally executes calls that set errno if (a) the result of
44 the call is unused and (b) a simple range check on the arguments can
45 detect most cases where errno does not need to be set.
47 This is the "conditional dead-code elimination" that gave the pass
48 its original name, since the call is dead for most argument values.
49 The calls for which it helps are usually part of the C++ abstraction
50 penalty exposed after inlining.
52 2. It looks for calls to built-in functions that set errno and whose
53 result is used. It checks whether there is an associated internal
54 function that doesn't set errno and whether the target supports
55 that internal function. If so, the pass uses the internal function
56 to compute the result of the built-in function but still arranges
57 for errno to be set when necessary. There are two ways of setting
60 a. by protecting the original call with the same argument checks as (1)
62 b. by protecting the original call with a check that the result
63 of the internal function is not equal to itself (i.e. is NaN).
65 (b) requires that NaNs are the only erroneous results. It is not
66 appropriate for functions like log, which returns ERANGE for zero
67 arguments. (b) is also likely to perform worse than (a) because it
68 requires the result to be calculated first. The pass therefore uses
69 (a) when it can and uses (b) as a fallback.
71 For (b) the pass can replace the original call with a call to
72 IFN_SET_EDOM, if the target supports direct assignments to errno.
74 In both cases, arguments that require errno to be set should occur
75 rarely in practice. Checks of the errno result should also be rare,
76 but the compiler would need powerful interprocedural analysis to
77 prove that errno is not checked. It's much easier to add argument
78 checks or result checks instead.
82 log (x); // Mostly dead call
84 if (__builtin_islessequal (x, 0))
87 With this change, call to log (x) is effectively eliminated, as
88 in the majority of the cases, log won't be called with x out of
89 range. The branch is totally predictable, so the branch cost
96 if (__builtin_isless (x, 0))
100 In the vast majority of cases we should then never need to call sqrt.
102 Note that library functions are not supposed to clear errno to zero without
103 error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
106 The condition wrapping the builtin call is conservatively set to avoid too
107 aggressive (wrong) shrink wrapping. */
110 /* A structure for representing input domain of
111 a function argument in integer. If the lower
112 bound is -inf, has_lb is set to false. If the
113 upper bound is +inf, has_ub is false.
114 is_lb_inclusive and is_ub_inclusive are flags
115 to indicate if lb and ub value are inclusive
124 bool is_lb_inclusive
;
125 bool is_ub_inclusive
;
128 /* A helper function to construct and return an input
129 domain object. LB is the lower bound, HAS_LB is
130 a boolean flag indicating if the lower bound exists,
131 and LB_INCLUSIVE is a boolean flag indicating if the
132 lower bound is inclusive or not. UB, HAS_UB, and
133 UB_INCLUSIVE have the same meaning, but for upper
134 bound of the domain. */
137 get_domain (int lb
, bool has_lb
, bool lb_inclusive
,
138 int ub
, bool has_ub
, bool ub_inclusive
)
142 domain
.has_lb
= has_lb
;
143 domain
.is_lb_inclusive
= lb_inclusive
;
145 domain
.has_ub
= has_ub
;
146 domain
.is_ub_inclusive
= ub_inclusive
;
150 /* A helper function to check the target format for the
151 argument type. In this implementation, only IEEE formats
152 are supported. ARG is the call argument to be checked.
153 Returns true if the format is supported. To support other
154 target formats, function get_no_error_domain needs to be
155 enhanced to have range bounds properly computed. Since
156 the check is cheap (very small number of candidates
157 to be checked), the result is not cached for each float type. */
160 check_target_format (tree arg
)
164 const struct real_format
*rfmt
;
166 type
= TREE_TYPE (arg
);
167 mode
= TYPE_MODE (type
);
168 rfmt
= REAL_MODE_FORMAT (mode
);
170 && (rfmt
== &ieee_single_format
|| rfmt
== &mips_single_format
171 || rfmt
== &motorola_single_format
))
173 && (rfmt
== &ieee_double_format
|| rfmt
== &mips_double_format
174 || rfmt
== &motorola_double_format
))
175 /* For long double, we cannot really check XFmode
176 which is only defined on intel platforms.
177 Candidate pre-selection using builtin function
178 code guarantees that we are checking formats
179 for long double modes: double, quad, and extended. */
180 || (mode
!= SFmode
&& mode
!= DFmode
181 && (rfmt
== &ieee_quad_format
182 || rfmt
== &mips_quad_format
183 || rfmt
== &ieee_extended_motorola_format
184 || rfmt
== &ieee_extended_intel_96_format
185 || rfmt
== &ieee_extended_intel_128_format
186 || rfmt
== &ieee_extended_intel_96_round_53_format
)))
193 /* A helper function to help select calls to pow that are suitable for
194 conditional DCE transformation. It looks for pow calls that can be
195 guided with simple conditions. Such calls either have constant base
196 values or base values converted from integers. Returns true if
197 the pow call POW_CALL is a candidate. */
199 /* The maximum integer bit size for base argument of a pow call
200 that is suitable for shrink-wrapping transformation. */
201 #define MAX_BASE_INT_BIT_SIZE 32
204 check_pow (gcall
*pow_call
)
207 enum tree_code bc
, ec
;
209 if (gimple_call_num_args (pow_call
) != 2)
212 base
= gimple_call_arg (pow_call
, 0);
213 expn
= gimple_call_arg (pow_call
, 1);
215 if (!check_target_format (expn
))
218 bc
= TREE_CODE (base
);
219 ec
= TREE_CODE (expn
);
221 /* Folding candidates are not interesting.
222 Can actually assert that it is already folded. */
223 if (ec
== REAL_CST
&& bc
== REAL_CST
)
228 /* Only handle a fixed range of constant. */
230 REAL_VALUE_TYPE bcv
= TREE_REAL_CST (base
);
231 if (real_equal (&bcv
, &dconst1
))
233 if (real_less (&bcv
, &dconst1
))
235 real_from_integer (&mv
, TYPE_MODE (TREE_TYPE (base
)), 256, UNSIGNED
);
236 if (real_less (&mv
, &bcv
))
240 else if (bc
== SSA_NAME
)
242 tree base_val0
, type
;
246 /* Only handles cases where base value is converted
247 from integer values. */
248 base_def
= SSA_NAME_DEF_STMT (base
);
249 if (gimple_code (base_def
) != GIMPLE_ASSIGN
)
252 if (gimple_assign_rhs_code (base_def
) != FLOAT_EXPR
)
254 base_val0
= gimple_assign_rhs1 (base_def
);
256 type
= TREE_TYPE (base_val0
);
257 if (TREE_CODE (type
) != INTEGER_TYPE
)
259 bit_sz
= TYPE_PRECISION (type
);
260 /* If the type of the base is too wide,
261 the resulting shrink wrapping condition
262 will be too conservative. */
263 if (bit_sz
> MAX_BASE_INT_BIT_SIZE
)
272 /* A helper function to help select candidate function calls that are
273 suitable for conditional DCE. Candidate functions must have single
274 valid input domain in this implementation except for pow (see check_pow).
275 Returns true if the function call is a candidate. */
278 check_builtin_call (gcall
*bcall
)
282 arg
= gimple_call_arg (bcall
, 0);
283 return check_target_format (arg
);
286 /* Return true if built-in function call CALL calls a math function
287 and if we know how to test the range of its arguments to detect _most_
288 situations in which errno is not set. The test must err on the side
289 of treating non-erroneous values as potentially erroneous. */
292 can_test_argument_range (gcall
*call
)
294 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call
)))
296 /* Trig functions. */
297 CASE_FLT_FN (BUILT_IN_ACOS
):
298 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ACOS
):
299 CASE_FLT_FN (BUILT_IN_ASIN
):
300 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ASIN
):
301 /* Hyperbolic functions. */
302 CASE_FLT_FN (BUILT_IN_ACOSH
):
303 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ACOSH
):
304 CASE_FLT_FN (BUILT_IN_ATANH
):
305 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ATANH
):
306 CASE_FLT_FN (BUILT_IN_COSH
):
307 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COSH
):
308 CASE_FLT_FN (BUILT_IN_SINH
):
309 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SINH
):
311 CASE_FLT_FN (BUILT_IN_LOG
):
312 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG
):
313 CASE_FLT_FN (BUILT_IN_LOG2
):
314 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG2
):
315 CASE_FLT_FN (BUILT_IN_LOG10
):
316 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG10
):
317 CASE_FLT_FN (BUILT_IN_LOG1P
):
318 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG1P
):
320 CASE_FLT_FN (BUILT_IN_EXP
):
321 CASE_FLT_FN_FLOATN_NX (BUILT_IN_EXP
):
322 CASE_FLT_FN (BUILT_IN_EXP2
):
323 CASE_FLT_FN_FLOATN_NX (BUILT_IN_EXP2
):
324 CASE_FLT_FN (BUILT_IN_EXP10
):
325 CASE_FLT_FN (BUILT_IN_EXPM1
):
326 CASE_FLT_FN_FLOATN_NX (BUILT_IN_EXPM1
):
327 CASE_FLT_FN (BUILT_IN_POW10
):
329 CASE_FLT_FN (BUILT_IN_SQRT
):
330 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT
):
331 return check_builtin_call (call
);
332 /* Special one: two argument pow. */
334 return check_pow (call
);
342 /* Return true if CALL can produce a domain error (EDOM) but can never
343 produce a pole, range overflow or range underflow error (all ERANGE).
344 This means that we can tell whether a function would have set errno
345 by testing whether the result is a NaN. */
348 edom_only_function (gcall
*call
)
350 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call
)))
352 CASE_FLT_FN (BUILT_IN_ACOS
):
353 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ACOS
):
354 CASE_FLT_FN (BUILT_IN_ASIN
):
355 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ASIN
):
356 CASE_FLT_FN (BUILT_IN_ATAN
):
357 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ATAN
):
358 CASE_FLT_FN (BUILT_IN_COS
):
359 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COS
):
360 CASE_FLT_FN (BUILT_IN_SIGNIFICAND
):
361 CASE_FLT_FN (BUILT_IN_SIN
):
362 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SIN
):
363 CASE_FLT_FN (BUILT_IN_SQRT
):
364 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT
):
365 CASE_FLT_FN (BUILT_IN_FMOD
):
366 CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMOD
):
367 CASE_FLT_FN (BUILT_IN_REMAINDER
):
368 CASE_FLT_FN_FLOATN_NX (BUILT_IN_REMAINDER
):
376 /* Return true if it is structurally possible to guard CALL. */
379 can_guard_call_p (gimple
*call
)
381 return (!stmt_ends_bb_p (call
)
382 || find_fallthru_edge (gimple_bb (call
)->succs
));
385 /* For a comparison code return the comparison code we should use if we don't
388 static enum tree_code
389 comparison_code_if_no_nans (tree_code code
)
419 /* A helper function to generate gimple statements for one bound
420 comparison, so that the built-in function is called whenever
421 TCODE <ARG, LBUB> is *false*. TEMP_NAME1/TEMP_NAME2 are names
422 of the temporaries, CONDS is a vector holding the produced GIMPLE
423 statements, and NCONDS points to the variable holding the number of
424 logical comparisons. CONDS is either empty or a list ended with a
428 gen_one_condition (tree arg
, int lbub
,
429 enum tree_code tcode
,
430 const char *temp_name1
,
431 const char *temp_name2
,
435 if (!HONOR_NANS (arg
))
436 tcode
= comparison_code_if_no_nans (tcode
);
438 tree lbub_real_cst
, lbub_cst
, float_type
;
439 tree temp
, tempn
, tempc
, tempcn
;
444 float_type
= TREE_TYPE (arg
);
445 lbub_cst
= build_int_cst (integer_type_node
, lbub
);
446 lbub_real_cst
= build_real_from_int_cst (float_type
, lbub_cst
);
448 temp
= create_tmp_var (float_type
, temp_name1
);
449 stmt1
= gimple_build_assign (temp
, arg
);
450 tempn
= make_ssa_name (temp
, stmt1
);
451 gimple_assign_set_lhs (stmt1
, tempn
);
453 tempc
= create_tmp_var (boolean_type_node
, temp_name2
);
454 stmt2
= gimple_build_assign (tempc
,
457 tempn
, lbub_real_cst
));
458 tempcn
= make_ssa_name (tempc
, stmt2
);
459 gimple_assign_set_lhs (stmt2
, tempcn
);
461 stmt3
= gimple_build_cond_from_tree (tempcn
, NULL_TREE
, NULL_TREE
);
462 conds
.quick_push (stmt1
);
463 conds
.quick_push (stmt2
);
464 conds
.quick_push (stmt3
);
468 /* A helper function to generate GIMPLE statements for
469 out of input domain check. ARG is the call argument
470 to be runtime checked, DOMAIN holds the valid domain
471 for the given function, CONDS points to the vector
472 holding the result GIMPLE statements. *NCONDS is
473 the number of logical comparisons. This function
474 produces no more than two logical comparisons, one
475 for lower bound check, one for upper bound check. */
478 gen_conditions_for_domain (tree arg
, inp_domain domain
,
483 gen_one_condition (arg
, domain
.lb
,
484 (domain
.is_lb_inclusive
485 ? UNGE_EXPR
: UNGT_EXPR
),
486 "DCE_COND_LB", "DCE_COND_LB_TEST",
491 /* Now push a separator. */
493 conds
.quick_push (NULL
);
495 gen_one_condition (arg
, domain
.ub
,
496 (domain
.is_ub_inclusive
497 ? UNLE_EXPR
: UNLT_EXPR
),
498 "DCE_COND_UB", "DCE_COND_UB_TEST",
504 /* A helper function to generate condition
505 code for the y argument in call pow (some_const, y).
506 See candidate selection in check_pow. Since the
507 candidates' base values have a limited range,
508 the guarded code generated for y are simple:
509 if (__builtin_isgreater (y, max_y))
511 Note max_y can be computed separately for each
512 const base, but in this implementation, we
513 choose to compute it using the max base
514 in the allowed range for the purpose of
515 simplicity. BASE is the constant base value,
516 EXPN is the expression for the exponent argument,
517 *CONDS is the vector to hold resulting statements,
518 and *NCONDS is the number of logical conditions. */
521 gen_conditions_for_pow_cst_base (tree base
, tree expn
,
525 inp_domain exp_domain
;
526 /* Validate the range of the base constant to make
527 sure it is consistent with check_pow. */
529 REAL_VALUE_TYPE bcv
= TREE_REAL_CST (base
);
530 gcc_assert (!real_equal (&bcv
, &dconst1
)
531 && !real_less (&bcv
, &dconst1
));
532 real_from_integer (&mv
, TYPE_MODE (TREE_TYPE (base
)), 256, UNSIGNED
);
533 gcc_assert (!real_less (&mv
, &bcv
));
535 exp_domain
= get_domain (0, false, false,
538 gen_conditions_for_domain (expn
, exp_domain
,
542 /* Generate error condition code for pow calls with
543 non constant base values. The candidates selected
544 have their base argument value converted from
545 integer (see check_pow) value (1, 2, 4 bytes), and
546 the max exp value is computed based on the size
547 of the integer type (i.e. max possible base value).
548 The resulting input domain for exp argument is thus
549 conservative (smaller than the max value allowed by
550 the runtime value of the base). BASE is the integer
551 base value, EXPN is the expression for the exponent
552 argument, *CONDS is the vector to hold resulting
553 statements, and *NCONDS is the number of logical
557 gen_conditions_for_pow_int_base (tree base
, tree expn
,
566 gimple
*stmt1
, *stmt2
;
568 inp_domain exp_domain
;
570 base_def
= SSA_NAME_DEF_STMT (base
);
571 base_val0
= gimple_assign_rhs1 (base_def
);
572 int_type
= TREE_TYPE (base_val0
);
573 bit_sz
= TYPE_PRECISION (int_type
);
574 gcc_assert (bit_sz
> 0
575 && bit_sz
<= MAX_BASE_INT_BIT_SIZE
);
577 /* Determine the max exp argument value according to
578 the size of the base integer. The max exp value
579 is conservatively estimated assuming IEEE754 double
583 else if (bit_sz
== 16)
587 gcc_assert (bit_sz
== MAX_BASE_INT_BIT_SIZE
);
591 /* For pow ((double)x, y), generate the following conditions:
594 if (__builtin_islessequal (temp1, 0))
598 if (__builtin_isgreater (temp2, max_exp_real_cst)) */
600 /* Generate condition in reverse order -- first
601 the condition for the exp argument. */
603 exp_domain
= get_domain (0, false, false,
604 max_exp
, true, true);
606 gen_conditions_for_domain (expn
, exp_domain
,
609 /* Now generate condition for the base argument.
610 Note it does not use the helper function
611 gen_conditions_for_domain because the base
614 /* Push a separator. */
615 conds
.quick_push (NULL
);
617 temp
= create_tmp_var (int_type
, "DCE_COND1");
618 cst0
= build_int_cst (int_type
, 0);
619 stmt1
= gimple_build_assign (temp
, base_val0
);
620 tempn
= make_ssa_name (temp
, stmt1
);
621 gimple_assign_set_lhs (stmt1
, tempn
);
622 stmt2
= gimple_build_cond (GT_EXPR
, tempn
, cst0
, NULL_TREE
, NULL_TREE
);
624 conds
.quick_push (stmt1
);
625 conds
.quick_push (stmt2
);
629 /* Method to generate conditional statements for guarding conditionally
630 dead calls to pow. One or more statements can be generated for
631 each logical condition. Statement groups of different conditions
632 are separated by a NULL tree and they are stored in the vec
633 conds. The number of logical conditions are stored in *nconds.
635 See C99 standard, 7.12.7.4:2, for description of pow (x, y).
636 The precise condition for domain errors are complex. In this
637 implementation, a simplified (but conservative) valid domain
638 for x and y are used: x is positive to avoid dom errors, while
639 y is smaller than a upper bound (depending on x) to avoid range
640 errors. Runtime code is generated to check x (if not constant)
641 and y against the valid domain. If it is out, jump to the call,
642 otherwise the call is bypassed. POW_CALL is the call statement,
643 *CONDS is a vector holding the resulting condition statements,
644 and *NCONDS is the number of logical conditions. */
647 gen_conditions_for_pow (gcall
*pow_call
, vec
<gimple
*> conds
,
653 gcc_checking_assert (check_pow (pow_call
));
657 base
= gimple_call_arg (pow_call
, 0);
658 expn
= gimple_call_arg (pow_call
, 1);
660 bc
= TREE_CODE (base
);
663 gen_conditions_for_pow_cst_base (base
, expn
, conds
, nconds
);
664 else if (bc
== SSA_NAME
)
665 gen_conditions_for_pow_int_base (base
, expn
, conds
, nconds
);
670 /* A helper routine to help computing the valid input domain
671 for a builtin function. See C99 7.12.7 for details. In this
672 implementation, we only handle single region domain. The
673 resulting region can be conservative (smaller) than the actual
674 one and rounded to integers. Some of the bounds are documented
675 in the standard, while other limit constants are computed
676 assuming IEEE floating point format (for SF and DF modes).
677 Since IEEE only sets minimum requirements for long double format,
678 different long double formats exist under different implementations
679 (e.g, 64 bit double precision (DF), 80 bit double-extended
680 precision (XF), and 128 bit quad precision (QF) ). For simplicity,
681 in this implementation, the computed bounds for long double assume
682 64 bit format (DF), and are therefore conservative. Another
683 assumption is that single precision float type is always SF mode,
684 and double type is DF mode. This function is quite
685 implementation specific, so it may not be suitable to be part of
686 builtins.cc. This needs to be revisited later to see if it can
687 be leveraged in x87 assembly expansion. */
690 get_no_error_domain (enum built_in_function fnc
)
694 /* Trig functions: return [-1, +1] */
695 CASE_FLT_FN (BUILT_IN_ACOS
):
696 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ACOS
):
697 CASE_FLT_FN (BUILT_IN_ASIN
):
698 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ASIN
):
699 return get_domain (-1, true, true,
701 /* Hyperbolic functions. */
702 CASE_FLT_FN (BUILT_IN_ACOSH
):
703 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ACOSH
):
704 /* acosh: [1, +inf) */
705 return get_domain (1, true, true,
707 CASE_FLT_FN (BUILT_IN_ATANH
):
708 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ATANH
):
709 /* atanh: (-1, +1) */
710 return get_domain (-1, true, false,
712 case BUILT_IN_COSHF16
:
713 case BUILT_IN_SINHF16
:
714 /* coshf16: (-11, +11) */
715 return get_domain (-11, true, false,
719 case BUILT_IN_COSHF32
:
720 case BUILT_IN_SINHF32
:
721 /* coshf: (-89, +89) */
722 return get_domain (-89, true, false,
728 case BUILT_IN_COSHF64
:
729 case BUILT_IN_SINHF64
:
730 /* cosh: (-710, +710) */
731 return get_domain (-710, true, false,
733 case BUILT_IN_COSHF128
:
734 case BUILT_IN_SINHF128
:
735 /* coshf128: (-11357, +11357) */
736 return get_domain (-11357, true, false,
738 /* Log functions: (0, +inf) */
739 CASE_FLT_FN (BUILT_IN_LOG
):
740 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG
):
741 CASE_FLT_FN (BUILT_IN_LOG2
):
742 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG2
):
743 CASE_FLT_FN (BUILT_IN_LOG10
):
744 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG10
):
745 return get_domain (0, true, false,
747 CASE_FLT_FN (BUILT_IN_LOG1P
):
748 CASE_FLT_FN_FLOATN_NX (BUILT_IN_LOG1P
):
749 return get_domain (-1, true, false,
752 case BUILT_IN_EXPF16
:
753 case BUILT_IN_EXPM1F16
:
754 /* expf: (-inf, 11) */
755 return get_domain (-1, false, false,
758 case BUILT_IN_EXPM1F
:
759 case BUILT_IN_EXPF32
:
760 case BUILT_IN_EXPM1F32
:
761 /* expf: (-inf, 88) */
762 return get_domain (-1, false, false,
767 case BUILT_IN_EXPM1L
:
768 case BUILT_IN_EXPF64
:
769 case BUILT_IN_EXPM1F64
:
770 /* exp: (-inf, 709) */
771 return get_domain (-1, false, false,
773 case BUILT_IN_EXPF128
:
774 case BUILT_IN_EXPM1F128
:
775 /* expf128: (-inf, 11356) */
776 return get_domain (-1, false, false,
778 case BUILT_IN_EXP2F16
:
779 /* exp2f16: (-inf, 16) */
780 return get_domain (-1, false, false,
783 case BUILT_IN_EXP2F32
:
784 /* exp2f: (-inf, 128) */
785 return get_domain (-1, false, false,
789 case BUILT_IN_EXP2F64
:
790 /* exp2: (-inf, 1024) */
791 return get_domain (-1, false, false,
793 case BUILT_IN_EXP2F128
:
794 /* exp2f128: (-inf, 16384) */
795 return get_domain (-1, false, false,
797 case BUILT_IN_EXP10F
:
798 case BUILT_IN_POW10F
:
799 /* exp10f: (-inf, 38) */
800 return get_domain (-1, false, false,
804 case BUILT_IN_EXP10L
:
805 case BUILT_IN_POW10L
:
806 /* exp10: (-inf, 308) */
807 return get_domain (-1, false, false,
809 /* sqrt: [0, +inf) */
810 CASE_FLT_FN (BUILT_IN_SQRT
):
811 CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT
):
812 return get_domain (0, true, true,
821 /* The function to generate shrink wrap conditions for a partially
822 dead builtin call whose return value is not used anywhere,
823 but has to be kept live due to potential error condition.
824 BI_CALL is the builtin call, CONDS is the vector of statements
825 for condition code, NCODES is the pointer to the number of
826 logical conditions. Statements belonging to different logical
827 condition are separated by NULL tree in the vector. */
830 gen_shrink_wrap_conditions (gcall
*bi_call
, const vec
<gimple
*> &conds
,
831 unsigned int *nconds
)
835 enum built_in_function fnc
;
837 gcc_assert (nconds
&& conds
.exists ());
838 gcc_assert (conds
.length () == 0);
839 gcc_assert (is_gimple_call (bi_call
));
842 fn
= gimple_call_fndecl (call
);
843 gcc_assert (fn
&& fndecl_built_in_p (fn
));
844 fnc
= DECL_FUNCTION_CODE (fn
);
847 if (fnc
== BUILT_IN_POW
)
848 gen_conditions_for_pow (call
, conds
, nconds
);
852 inp_domain domain
= get_no_error_domain (fnc
);
854 arg
= gimple_call_arg (bi_call
, 0);
855 gen_conditions_for_domain (arg
, domain
, conds
, nconds
);
861 /* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
862 conditions in CONDS is false. Also move BI_NEWCALL to a new basic block
863 when it is non-null, it is called while all of the CONDS are true. */
866 shrink_wrap_one_built_in_call_with_conds (gcall
*bi_call
,
867 const vec
<gimple
*> &conds
,
869 gcall
*bi_newcall
= NULL
)
871 gimple_stmt_iterator bi_call_bsi
;
872 basic_block bi_call_bb
, bi_newcall_bb
, join_tgt_bb
, guard_bb
;
873 edge join_tgt_in_edge_from_call
, join_tgt_in_edge_fall_thru
;
874 edge bi_call_in_edge0
, guard_bb_in_edge
;
875 unsigned tn_cond_stmts
;
877 gimple
*cond_expr
= NULL
;
878 gimple
*cond_expr_start
;
880 /* The cfg we want to create looks like this:
881 [guard n-1] <- guard_bb (old block)
888 [call] | <- bi_call_bb }
889 \ [newcall] <-bi_newcall_bb}
891 [join] <- join_tgt_bb (old iff call must end bb)
892 possible EH edges (only if [join] is old)
894 When [join] is new, the immediate dominators for these blocks are:
896 1. [guard n-1]: unchanged
897 2. [call]: [guard n-1]
898 3. [newcall]: [guard 0]
899 4. [guard m]: [guard m+1] for 0 <= m <= n-2
900 5. [join]: [guard n-1]
902 We punt for the more complex case of [join] being old and
903 simply free the dominance info. We also punt on postdominators,
904 which aren't expected to be available at this point anyway. */
905 bi_call_bb
= gimple_bb (bi_call
);
907 /* Now find the join target bb -- split bi_call_bb if needed. */
908 if (stmt_ends_bb_p (bi_call
))
910 /* We checked that there was a fallthrough edge in
912 join_tgt_in_edge_from_call
= find_fallthru_edge (bi_call_bb
->succs
);
913 gcc_assert (join_tgt_in_edge_from_call
);
914 /* We don't want to handle PHIs. */
915 if (EDGE_COUNT (join_tgt_in_edge_from_call
->dest
->preds
) > 1)
916 join_tgt_bb
= split_edge (join_tgt_in_edge_from_call
);
919 join_tgt_bb
= join_tgt_in_edge_from_call
->dest
;
920 /* We may have degenerate PHIs in the destination. Propagate
922 for (gphi_iterator i
= gsi_start_phis (join_tgt_bb
); !gsi_end_p (i
);)
924 gphi
*phi
= i
.phi ();
925 replace_uses_by (gimple_phi_result (phi
),
926 gimple_phi_arg_def (phi
, 0));
927 remove_phi_node (&i
, true);
933 join_tgt_in_edge_from_call
= split_block (bi_call_bb
, bi_call
);
934 join_tgt_bb
= join_tgt_in_edge_from_call
->dest
;
937 bi_call_bsi
= gsi_for_stmt (bi_call
);
939 /* Now it is time to insert the first conditional expression
940 into bi_call_bb and split this bb so that bi_call is
942 tn_cond_stmts
= conds
.length ();
944 cond_expr_start
= conds
[0];
945 for (ci
= 0; ci
< tn_cond_stmts
; ci
++)
947 gimple
*c
= conds
[ci
];
948 gcc_assert (c
|| ci
!= 0);
951 gsi_insert_before (&bi_call_bsi
, c
, GSI_SAME_STMT
);
955 gcc_assert (cond_expr
&& gimple_code (cond_expr
) == GIMPLE_COND
);
957 typedef std::pair
<edge
, edge
> edge_pair
;
958 auto_vec
<edge_pair
, 8> edges
;
960 bi_call_in_edge0
= split_block (bi_call_bb
, cond_expr
);
961 bi_call_in_edge0
->flags
&= ~EDGE_FALLTHRU
;
962 bi_call_in_edge0
->flags
|= EDGE_FALSE_VALUE
;
963 guard_bb
= bi_call_bb
;
964 bi_call_bb
= bi_call_in_edge0
->dest
;
965 join_tgt_in_edge_fall_thru
= make_edge (guard_bb
, join_tgt_bb
,
968 edges
.reserve (nconds
);
969 edges
.quick_push (edge_pair (bi_call_in_edge0
, join_tgt_in_edge_fall_thru
));
971 /* Code generation for the rest of the conditions */
972 for (unsigned int i
= 1; i
< nconds
; ++i
)
975 edge bi_call_in_edge
;
976 gimple_stmt_iterator guard_bsi
= gsi_for_stmt (cond_expr_start
);
978 cond_expr_start
= conds
[ci0
];
979 for (; ci
< tn_cond_stmts
; ci
++)
981 gimple
*c
= conds
[ci
];
982 gcc_assert (c
|| ci
!= ci0
);
985 gsi_insert_before (&guard_bsi
, c
, GSI_SAME_STMT
);
989 gcc_assert (cond_expr
&& gimple_code (cond_expr
) == GIMPLE_COND
);
990 guard_bb_in_edge
= split_block (guard_bb
, cond_expr
);
991 guard_bb_in_edge
->flags
&= ~EDGE_FALLTHRU
;
992 guard_bb_in_edge
->flags
|= EDGE_TRUE_VALUE
;
994 bi_call_in_edge
= make_edge (guard_bb
, bi_call_bb
, EDGE_FALSE_VALUE
);
995 edges
.quick_push (edge_pair (bi_call_in_edge
, guard_bb_in_edge
));
998 /* Move BI_NEWCALL to new basic block when it is non-null. */
1001 /* Get bi_newcall_bb by split join_tgt_in_edge_fall_thru edge,
1002 and move BI_NEWCALL to bi_newcall_bb. */
1003 bi_newcall_bb
= split_edge (join_tgt_in_edge_fall_thru
);
1004 gimple_stmt_iterator to_gsi
= gsi_start_bb (bi_newcall_bb
);
1005 gimple_stmt_iterator from_gsi
= gsi_for_stmt (bi_newcall
);
1006 gsi_move_before (&from_gsi
, &to_gsi
);
1007 join_tgt_in_edge_fall_thru
= EDGE_SUCC (bi_newcall_bb
, 0);
1008 join_tgt_bb
= join_tgt_in_edge_fall_thru
->dest
;
1010 tree bi_newcall_lhs
= gimple_call_lhs (bi_newcall
);
1011 tree bi_call_lhs
= gimple_call_lhs (bi_call
);
1014 bi_call_lhs
= copy_ssa_name (bi_newcall_lhs
);
1015 gimple_call_set_lhs (bi_call
, bi_call_lhs
);
1016 SSA_NAME_DEF_STMT (bi_call_lhs
) = bi_call
;
1019 /* Create phi node for lhs of BI_CALL and BI_NEWCALL. */
1020 gphi
*new_phi
= create_phi_node (copy_ssa_name (bi_newcall_lhs
),
1022 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (new_phi
))
1023 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (bi_newcall_lhs
);
1024 add_phi_arg (new_phi
, bi_call_lhs
, join_tgt_in_edge_from_call
,
1025 gimple_location (bi_call
));
1026 add_phi_arg (new_phi
, bi_newcall_lhs
, join_tgt_in_edge_fall_thru
,
1027 gimple_location (bi_newcall
));
1029 /* Replace all use of original return value with result of phi node. */
1030 use_operand_p use_p
;
1032 imm_use_iterator iterator
;
1033 FOR_EACH_IMM_USE_STMT (use_stmt
, iterator
, bi_newcall_lhs
)
1034 if (use_stmt
!= new_phi
)
1035 FOR_EACH_IMM_USE_ON_STMT (use_p
, iterator
)
1036 SET_USE (use_p
, PHI_RESULT (new_phi
));
1039 /* Now update the probability and profile information, processing the
1040 guards in order of execution.
1042 There are two approaches we could take here. On the one hand we
1043 could assign a probability of X to the call block and distribute
1044 that probability among its incoming edges. On the other hand we
1045 could assign a probability of X to each individual call edge.
1047 The choice only affects calls that have more than one condition.
1048 In those cases, the second approach would give the call block
1049 a greater probability than the first. However, the difference
1050 is only small, and our chosen X is a pure guess anyway.
1052 Here we take the second approach because it's slightly simpler
1053 and because it's easy to see that it doesn't lose profile counts. */
1054 bi_call_bb
->count
= profile_count::zero ();
1055 while (!edges
.is_empty ())
1057 edge_pair e
= edges
.pop ();
1058 edge call_edge
= e
.first
;
1059 edge nocall_edge
= e
.second
;
1060 basic_block src_bb
= call_edge
->src
;
1061 gcc_assert (src_bb
== nocall_edge
->src
);
1063 call_edge
->probability
= profile_probability::very_unlikely ();
1064 nocall_edge
->probability
= profile_probability::always ()
1065 - call_edge
->probability
;
1067 bi_call_bb
->count
+= call_edge
->count ();
1069 if (nocall_edge
->dest
!= join_tgt_bb
)
1070 nocall_edge
->dest
->count
= src_bb
->count
- bi_call_bb
->count
;
1073 if (dom_info_available_p (CDI_DOMINATORS
))
1075 /* The split_blocks leave [guard 0] as the immediate dominator
1076 of [call] and [call] as the immediate dominator of [join].
1078 set_immediate_dominator (CDI_DOMINATORS
, bi_call_bb
, guard_bb
);
1079 set_immediate_dominator (CDI_DOMINATORS
, join_tgt_bb
, guard_bb
);
1082 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1085 loc
= gimple_location (bi_call
);
1087 "%s:%d: note: function call is shrink-wrapped"
1088 " into error conditions.\n",
1089 LOCATION_FILE (loc
), LOCATION_LINE (loc
));
1093 /* Shrink-wrap BI_CALL so that it is only called when it might set errno
1094 (but is always called if it would set errno). */
1097 shrink_wrap_one_built_in_call (gcall
*bi_call
)
1099 unsigned nconds
= 0;
1100 auto_vec
<gimple
*, 12> conds
;
1101 gen_shrink_wrap_conditions (bi_call
, conds
, &nconds
);
1102 gcc_assert (nconds
!= 0);
1103 shrink_wrap_one_built_in_call_with_conds (bi_call
, conds
, nconds
);
1106 /* Return true if built-in function call CALL could be implemented using
1107 a combination of an internal function to compute the result and a
1108 separate call to set errno. */
1111 can_use_internal_fn (gcall
*call
)
1113 /* Only replace calls that set errno. */
1114 if (!gimple_vdef (call
))
1117 /* See whether there is an internal function for this built-in. */
1118 if (replacement_internal_fn (call
) == IFN_LAST
)
1121 /* See whether we can catch all cases where errno would be set,
1122 while still avoiding the call in most cases. */
1123 if (!can_test_argument_range (call
)
1124 && !edom_only_function (call
))
1130 /* Implement built-in function call CALL using an internal function. */
1133 use_internal_fn (gcall
*call
)
1135 /* We'll be inserting another call with the same arguments after the
1136 lhs has been set, so prevent any possible coalescing failure from
1137 having both values live at once. See PR 71020. */
1138 replace_abnormal_ssa_names (call
);
1140 unsigned nconds
= 0;
1141 auto_vec
<gimple
*, 12> conds
;
1142 bool is_arg_conds
= false;
1143 if (can_test_argument_range (call
))
1145 gen_shrink_wrap_conditions (call
, conds
, &nconds
);
1146 is_arg_conds
= true;
1147 gcc_assert (nconds
!= 0);
1150 gcc_assert (edom_only_function (call
));
1152 internal_fn ifn
= replacement_internal_fn (call
);
1153 gcc_assert (ifn
!= IFN_LAST
);
1155 /* Construct the new call, with the same arguments as the original one. */
1156 auto_vec
<tree
, 16> args
;
1157 unsigned int nargs
= gimple_call_num_args (call
);
1158 for (unsigned int i
= 0; i
< nargs
; ++i
)
1159 args
.safe_push (gimple_call_arg (call
, i
));
1160 gcall
*new_call
= gimple_build_call_internal_vec (ifn
, args
);
1161 gimple_set_location (new_call
, gimple_location (call
));
1162 gimple_call_set_nothrow (new_call
, gimple_call_nothrow_p (call
));
1164 /* Transfer the LHS to the new call. */
1165 tree lhs
= gimple_call_lhs (call
);
1166 gimple_call_set_lhs (new_call
, lhs
);
1167 gimple_call_set_lhs (call
, NULL_TREE
);
1168 SSA_NAME_DEF_STMT (lhs
) = new_call
;
1170 /* Insert the new call. */
1171 gimple_stmt_iterator gsi
= gsi_for_stmt (call
);
1172 gsi_insert_before (&gsi
, new_call
, GSI_SAME_STMT
);
1176 /* Skip the call if LHS == LHS. If we reach here, EDOM is the only
1177 valid errno value and it is used iff the result is NaN. */
1178 conds
.quick_push (gimple_build_cond (EQ_EXPR
, lhs
, lhs
,
1179 NULL_TREE
, NULL_TREE
));
1182 /* Try replacing the original call with a direct assignment to
1183 errno, via an internal function. */
1184 if (set_edom_supported_p () && !stmt_ends_bb_p (call
))
1186 gimple_stmt_iterator gsi
= gsi_for_stmt (call
);
1187 gcall
*new_call
= gimple_build_call_internal (IFN_SET_EDOM
, 0);
1188 gimple_move_vops (new_call
, call
);
1189 gimple_set_location (new_call
, gimple_location (call
));
1190 gsi_replace (&gsi
, new_call
, false);
1194 shrink_wrap_one_built_in_call_with_conds (call
, conds
, nconds
,
1195 is_arg_conds
? new_call
: NULL
);
1198 /* The top level function for conditional dead code shrink
1199 wrapping transformation. */
1202 shrink_wrap_conditional_dead_built_in_calls (const vec
<gcall
*> &calls
)
1206 unsigned n
= calls
.length ();
1209 gcall
*bi_call
= calls
[i
];
1210 if (gimple_call_lhs (bi_call
))
1211 use_internal_fn (bi_call
);
1213 shrink_wrap_one_built_in_call (bi_call
);
1219 const pass_data pass_data_call_cdce
=
1221 GIMPLE_PASS
, /* type */
1223 OPTGROUP_NONE
, /* optinfo_flags */
1224 TV_TREE_CALL_CDCE
, /* tv_id */
1225 ( PROP_cfg
| PROP_ssa
), /* properties_required */
1226 0, /* properties_provided */
1227 0, /* properties_destroyed */
1228 0, /* todo_flags_start */
1229 0, /* todo_flags_finish */
1232 class pass_call_cdce
: public gimple_opt_pass
1235 pass_call_cdce (gcc::context
*ctxt
)
1236 : gimple_opt_pass (pass_data_call_cdce
, ctxt
)
1239 /* opt_pass methods: */
1240 bool gate (function
*) final override
1242 /* The limit constants used in the implementation
1243 assume IEEE floating point format. Other formats
1244 can be supported in the future if needed. */
1245 return flag_tree_builtin_call_dce
!= 0;
1248 unsigned int execute (function
*) final override
;
1250 }; // class pass_call_cdce
1253 pass_call_cdce::execute (function
*fun
)
1256 gimple_stmt_iterator i
;
1257 auto_vec
<gcall
*> cond_dead_built_in_calls
;
1258 FOR_EACH_BB_FN (bb
, fun
)
1260 /* Skip blocks that are being optimized for size, since our
1261 transformation always increases code size. */
1262 if (optimize_bb_for_size_p (bb
))
1265 /* Collect dead call candidates. */
1266 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
1268 gcall
*stmt
= dyn_cast
<gcall
*> (gsi_stmt (i
));
1270 && gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
1271 && (gimple_call_lhs (stmt
)
1272 ? can_use_internal_fn (stmt
)
1273 : can_test_argument_range (stmt
))
1274 && can_guard_call_p (stmt
))
1276 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1278 fprintf (dump_file
, "Found conditional dead call: ");
1279 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1280 fprintf (dump_file
, "\n");
1282 if (!cond_dead_built_in_calls
.exists ())
1283 cond_dead_built_in_calls
.create (64);
1284 cond_dead_built_in_calls
.safe_push (stmt
);
1289 if (!cond_dead_built_in_calls
.exists ())
1292 shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls
);
1293 free_dominance_info (CDI_POST_DOMINATORS
);
1294 /* As we introduced new control-flow we need to insert PHI-nodes
1295 for the call-clobbers of the remaining call. */
1296 mark_virtual_operands_for_renaming (fun
);
1297 return TODO_update_ssa
;
1303 make_pass_call_cdce (gcc::context
*ctxt
)
1305 return new pass_call_cdce (ctxt
);