* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / gcc / tree-call-cdce.c
blob1578350c0c63eb4f09618411f792f27167d54da7
1 /* Conditional Dead Call Elimination pass for the GNU compiler.
2 Copyright (C) 2008-2017 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-into-ssa.h"
36 #include "builtins.h"
37 #include "internal-fn.h"
38 #include "tree-dfa.h"
41 /* This pass serves two closely-related purposes:
43 1. It conditionally executes calls that set errno if (a) the result of
44 the call is unused and (b) a simple range check on the arguments can
45 detect most cases where errno does not need to be set.
47 This is the "conditional dead-code elimination" that gave the pass
48 its original name, since the call is dead for most argument values.
49 The calls for which it helps are usually part of the C++ abstraction
50 penalty exposed after inlining.
52 2. It looks for calls to built-in functions that set errno and whose
53 result is used. It checks whether there is an associated internal
54 function that doesn't set errno and whether the target supports
55 that internal function. If so, the pass uses the internal function
56 to compute the result of the built-in function but still arranges
57 for errno to be set when necessary. There are two ways of setting
58 errno:
60 a. by protecting the original call with the same argument checks as (1)
62 b. by protecting the original call with a check that the result
63 of the internal function is not equal to itself (i.e. is NaN).
65 (b) requires that NaNs are the only erroneous results. It is not
66 appropriate for functions like log, which returns ERANGE for zero
67 arguments. (b) is also likely to perform worse than (a) because it
68 requires the result to be calculated first. The pass therefore uses
69 (a) when it can and uses (b) as a fallback.
71 For (b) the pass can replace the original call with a call to
72 IFN_SET_EDOM, if the target supports direct assignments to errno.
74 In both cases, arguments that require errno to be set should occur
75 rarely in practice. Checks of the errno result should also be rare,
76 but the compiler would need powerful interprocedural analysis to
77 prove that errno is not checked. It's much easier to add argument
78 checks or result checks instead.
80 An example of (1) is:
82 log (x); // Mostly dead call
83 ==>
84 if (__builtin_islessequal (x, 0))
85 log (x);
87 With this change, call to log (x) is effectively eliminated, as
88 in the majority of the cases, log won't be called with x out of
89 range. The branch is totally predictable, so the branch cost
90 is low.
92 An example of (2) is:
94 y = sqrt (x);
95 ==>
96 y = IFN_SQRT (x);
97 if (__builtin_isless (x, 0))
98 sqrt (x);
100 In the vast majority of cases we should then never need to call sqrt.
102 Note that library functions are not supposed to clear errno to zero without
103 error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
104 ISO/IEC 9899 (C99).
106 The condition wrapping the builtin call is conservatively set to avoid too
107 aggressive (wrong) shrink wrapping. */
110 /* A structure for representing input domain of
111 a function argument in integer. If the lower
112 bound is -inf, has_lb is set to false. If the
113 upper bound is +inf, has_ub is false.
114 is_lb_inclusive and is_ub_inclusive are flags
115 to indicate if lb and ub value are inclusive
116 respectively. */
118 struct inp_domain
120 int lb;
121 int ub;
122 bool has_lb;
123 bool has_ub;
124 bool is_lb_inclusive;
125 bool is_ub_inclusive;
128 /* A helper function to construct and return an input
129 domain object. LB is the lower bound, HAS_LB is
130 a boolean flag indicating if the lower bound exists,
131 and LB_INCLUSIVE is a boolean flag indicating if the
132 lower bound is inclusive or not. UB, HAS_UB, and
133 UB_INCLUSIVE have the same meaning, but for upper
134 bound of the domain. */
136 static inp_domain
137 get_domain (int lb, bool has_lb, bool lb_inclusive,
138 int ub, bool has_ub, bool ub_inclusive)
140 inp_domain domain;
141 domain.lb = lb;
142 domain.has_lb = has_lb;
143 domain.is_lb_inclusive = lb_inclusive;
144 domain.ub = ub;
145 domain.has_ub = has_ub;
146 domain.is_ub_inclusive = ub_inclusive;
147 return domain;
150 /* A helper function to check the target format for the
151 argument type. In this implementation, only IEEE formats
152 are supported. ARG is the call argument to be checked.
153 Returns true if the format is supported. To support other
154 target formats, function get_no_error_domain needs to be
155 enhanced to have range bounds properly computed. Since
156 the check is cheap (very small number of candidates
157 to be checked), the result is not cached for each float type. */
159 static bool
160 check_target_format (tree arg)
162 tree type;
163 machine_mode mode;
164 const struct real_format *rfmt;
166 type = TREE_TYPE (arg);
167 mode = TYPE_MODE (type);
168 rfmt = REAL_MODE_FORMAT (mode);
169 if ((mode == SFmode
170 && (rfmt == &ieee_single_format || rfmt == &mips_single_format
171 || rfmt == &motorola_single_format))
172 || (mode == DFmode
173 && (rfmt == &ieee_double_format || rfmt == &mips_double_format
174 || rfmt == &motorola_double_format))
175 /* For long double, we can not really check XFmode
176 which is only defined on intel platforms.
177 Candidate pre-selection using builtin function
178 code guarantees that we are checking formats
179 for long double modes: double, quad, and extended. */
180 || (mode != SFmode && mode != DFmode
181 && (rfmt == &ieee_quad_format
182 || rfmt == &mips_quad_format
183 || rfmt == &ieee_extended_motorola_format
184 || rfmt == &ieee_extended_intel_96_format
185 || rfmt == &ieee_extended_intel_128_format
186 || rfmt == &ieee_extended_intel_96_round_53_format)))
187 return true;
189 return false;
193 /* A helper function to help select calls to pow that are suitable for
194 conditional DCE transformation. It looks for pow calls that can be
195 guided with simple conditions. Such calls either have constant base
196 values or base values converted from integers. Returns true if
197 the pow call POW_CALL is a candidate. */
199 /* The maximum integer bit size for base argument of a pow call
200 that is suitable for shrink-wrapping transformation. */
201 #define MAX_BASE_INT_BIT_SIZE 32
203 static bool
204 check_pow (gcall *pow_call)
206 tree base, expn;
207 enum tree_code bc, ec;
209 if (gimple_call_num_args (pow_call) != 2)
210 return false;
212 base = gimple_call_arg (pow_call, 0);
213 expn = gimple_call_arg (pow_call, 1);
215 if (!check_target_format (expn))
216 return false;
218 bc = TREE_CODE (base);
219 ec = TREE_CODE (expn);
221 /* Folding candidates are not interesting.
222 Can actually assert that it is already folded. */
223 if (ec == REAL_CST && bc == REAL_CST)
224 return false;
226 if (bc == REAL_CST)
228 /* Only handle a fixed range of constant. */
229 REAL_VALUE_TYPE mv;
230 REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
231 if (real_equal (&bcv, &dconst1))
232 return false;
233 if (real_less (&bcv, &dconst1))
234 return false;
235 real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
236 if (real_less (&mv, &bcv))
237 return false;
238 return true;
240 else if (bc == SSA_NAME)
242 tree base_val0, type;
243 gimple *base_def;
244 int bit_sz;
246 /* Only handles cases where base value is converted
247 from integer values. */
248 base_def = SSA_NAME_DEF_STMT (base);
249 if (gimple_code (base_def) != GIMPLE_ASSIGN)
250 return false;
252 if (gimple_assign_rhs_code (base_def) != FLOAT_EXPR)
253 return false;
254 base_val0 = gimple_assign_rhs1 (base_def);
256 type = TREE_TYPE (base_val0);
257 if (TREE_CODE (type) != INTEGER_TYPE)
258 return false;
259 bit_sz = TYPE_PRECISION (type);
260 /* If the type of the base is too wide,
261 the resulting shrink wrapping condition
262 will be too conservative. */
263 if (bit_sz > MAX_BASE_INT_BIT_SIZE)
264 return false;
266 return true;
268 else
269 return false;
272 /* A helper function to help select candidate function calls that are
273 suitable for conditional DCE. Candidate functions must have single
274 valid input domain in this implementation except for pow (see check_pow).
275 Returns true if the function call is a candidate. */
277 static bool
278 check_builtin_call (gcall *bcall)
280 tree arg;
282 arg = gimple_call_arg (bcall, 0);
283 return check_target_format (arg);
286 /* Return true if built-in function call CALL calls a math function
287 and if we know how to test the range of its arguments to detect _most_
288 situations in which errno is not set. The test must err on the side
289 of treating non-erroneous values as potentially erroneous. */
291 static bool
292 can_test_argument_range (gcall *call)
294 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
296 /* Trig functions. */
297 CASE_FLT_FN (BUILT_IN_ACOS):
298 CASE_FLT_FN (BUILT_IN_ASIN):
299 /* Hyperbolic functions. */
300 CASE_FLT_FN (BUILT_IN_ACOSH):
301 CASE_FLT_FN (BUILT_IN_ATANH):
302 CASE_FLT_FN (BUILT_IN_COSH):
303 CASE_FLT_FN (BUILT_IN_SINH):
304 /* Log functions. */
305 CASE_FLT_FN (BUILT_IN_LOG):
306 CASE_FLT_FN (BUILT_IN_LOG2):
307 CASE_FLT_FN (BUILT_IN_LOG10):
308 CASE_FLT_FN (BUILT_IN_LOG1P):
309 /* Exp functions. */
310 CASE_FLT_FN (BUILT_IN_EXP):
311 CASE_FLT_FN (BUILT_IN_EXP2):
312 CASE_FLT_FN (BUILT_IN_EXP10):
313 CASE_FLT_FN (BUILT_IN_EXPM1):
314 CASE_FLT_FN (BUILT_IN_POW10):
315 /* Sqrt. */
316 CASE_FLT_FN (BUILT_IN_SQRT):
317 return check_builtin_call (call);
318 /* Special one: two argument pow. */
319 case BUILT_IN_POW:
320 return check_pow (call);
321 default:
322 break;
325 return false;
328 /* Return true if CALL can produce a domain error (EDOM) but can never
329 produce a pole, range overflow or range underflow error (all ERANGE).
330 This means that we can tell whether a function would have set errno
331 by testing whether the result is a NaN. */
333 static bool
334 edom_only_function (gcall *call)
336 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
338 CASE_FLT_FN (BUILT_IN_ACOS):
339 CASE_FLT_FN (BUILT_IN_ASIN):
340 CASE_FLT_FN (BUILT_IN_ATAN):
341 CASE_FLT_FN (BUILT_IN_COS):
342 CASE_FLT_FN (BUILT_IN_SIGNIFICAND):
343 CASE_FLT_FN (BUILT_IN_SIN):
344 CASE_FLT_FN (BUILT_IN_SQRT):
345 CASE_FLT_FN (BUILT_IN_FMOD):
346 CASE_FLT_FN (BUILT_IN_REMAINDER):
347 return true;
349 default:
350 return false;
354 /* Return true if it is structurally possible to guard CALL. */
356 static bool
357 can_guard_call_p (gimple *call)
359 return (!stmt_ends_bb_p (call)
360 || find_fallthru_edge (gimple_bb (call)->succs));
363 /* A helper function to generate gimple statements for one bound
364 comparison, so that the built-in function is called whenever
365 TCODE <ARG, LBUB> is *false*. TEMP_NAME1/TEMP_NAME2 are names
366 of the temporaries, CONDS is a vector holding the produced GIMPLE
367 statements, and NCONDS points to the variable holding the number of
368 logical comparisons. CONDS is either empty or a list ended with a
369 null tree. */
371 static void
372 gen_one_condition (tree arg, int lbub,
373 enum tree_code tcode,
374 const char *temp_name1,
375 const char *temp_name2,
376 vec<gimple *> conds,
377 unsigned *nconds)
379 tree lbub_real_cst, lbub_cst, float_type;
380 tree temp, tempn, tempc, tempcn;
381 gassign *stmt1;
382 gassign *stmt2;
383 gcond *stmt3;
385 float_type = TREE_TYPE (arg);
386 lbub_cst = build_int_cst (integer_type_node, lbub);
387 lbub_real_cst = build_real_from_int_cst (float_type, lbub_cst);
389 temp = create_tmp_var (float_type, temp_name1);
390 stmt1 = gimple_build_assign (temp, arg);
391 tempn = make_ssa_name (temp, stmt1);
392 gimple_assign_set_lhs (stmt1, tempn);
394 tempc = create_tmp_var (boolean_type_node, temp_name2);
395 stmt2 = gimple_build_assign (tempc,
396 fold_build2 (tcode,
397 boolean_type_node,
398 tempn, lbub_real_cst));
399 tempcn = make_ssa_name (tempc, stmt2);
400 gimple_assign_set_lhs (stmt2, tempcn);
402 stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE);
403 conds.quick_push (stmt1);
404 conds.quick_push (stmt2);
405 conds.quick_push (stmt3);
406 (*nconds)++;
409 /* A helper function to generate GIMPLE statements for
410 out of input domain check. ARG is the call argument
411 to be runtime checked, DOMAIN holds the valid domain
412 for the given function, CONDS points to the vector
413 holding the result GIMPLE statements. *NCONDS is
414 the number of logical comparisons. This function
415 produces no more than two logical comparisons, one
416 for lower bound check, one for upper bound check. */
418 static void
419 gen_conditions_for_domain (tree arg, inp_domain domain,
420 vec<gimple *> conds,
421 unsigned *nconds)
423 if (domain.has_lb)
424 gen_one_condition (arg, domain.lb,
425 (domain.is_lb_inclusive
426 ? UNGE_EXPR : UNGT_EXPR),
427 "DCE_COND_LB", "DCE_COND_LB_TEST",
428 conds, nconds);
430 if (domain.has_ub)
432 /* Now push a separator. */
433 if (domain.has_lb)
434 conds.quick_push (NULL);
436 gen_one_condition (arg, domain.ub,
437 (domain.is_ub_inclusive
438 ? UNLE_EXPR : UNLT_EXPR),
439 "DCE_COND_UB", "DCE_COND_UB_TEST",
440 conds, nconds);
445 /* A helper function to generate condition
446 code for the y argument in call pow (some_const, y).
447 See candidate selection in check_pow. Since the
448 candidates' base values have a limited range,
449 the guarded code generated for y are simple:
450 if (__builtin_isgreater (y, max_y))
451 pow (const, y);
452 Note max_y can be computed separately for each
453 const base, but in this implementation, we
454 choose to compute it using the max base
455 in the allowed range for the purpose of
456 simplicity. BASE is the constant base value,
457 EXPN is the expression for the exponent argument,
458 *CONDS is the vector to hold resulting statements,
459 and *NCONDS is the number of logical conditions. */
461 static void
462 gen_conditions_for_pow_cst_base (tree base, tree expn,
463 vec<gimple *> conds,
464 unsigned *nconds)
466 inp_domain exp_domain;
467 /* Validate the range of the base constant to make
468 sure it is consistent with check_pow. */
469 REAL_VALUE_TYPE mv;
470 REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
471 gcc_assert (!real_equal (&bcv, &dconst1)
472 && !real_less (&bcv, &dconst1));
473 real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
474 gcc_assert (!real_less (&mv, &bcv));
476 exp_domain = get_domain (0, false, false,
477 127, true, false);
479 gen_conditions_for_domain (expn, exp_domain,
480 conds, nconds);
483 /* Generate error condition code for pow calls with
484 non constant base values. The candidates selected
485 have their base argument value converted from
486 integer (see check_pow) value (1, 2, 4 bytes), and
487 the max exp value is computed based on the size
488 of the integer type (i.e. max possible base value).
489 The resulting input domain for exp argument is thus
490 conservative (smaller than the max value allowed by
491 the runtime value of the base). BASE is the integer
492 base value, EXPN is the expression for the exponent
493 argument, *CONDS is the vector to hold resulting
494 statements, and *NCONDS is the number of logical
495 conditions. */
497 static void
498 gen_conditions_for_pow_int_base (tree base, tree expn,
499 vec<gimple *> conds,
500 unsigned *nconds)
502 gimple *base_def;
503 tree base_val0;
504 tree int_type;
505 tree temp, tempn;
506 tree cst0;
507 gimple *stmt1, *stmt2;
508 int bit_sz, max_exp;
509 inp_domain exp_domain;
511 base_def = SSA_NAME_DEF_STMT (base);
512 base_val0 = gimple_assign_rhs1 (base_def);
513 int_type = TREE_TYPE (base_val0);
514 bit_sz = TYPE_PRECISION (int_type);
515 gcc_assert (bit_sz > 0
516 && bit_sz <= MAX_BASE_INT_BIT_SIZE);
518 /* Determine the max exp argument value according to
519 the size of the base integer. The max exp value
520 is conservatively estimated assuming IEEE754 double
521 precision format. */
522 if (bit_sz == 8)
523 max_exp = 128;
524 else if (bit_sz == 16)
525 max_exp = 64;
526 else
528 gcc_assert (bit_sz == MAX_BASE_INT_BIT_SIZE);
529 max_exp = 32;
532 /* For pow ((double)x, y), generate the following conditions:
533 cond 1:
534 temp1 = x;
535 if (__builtin_islessequal (temp1, 0))
537 cond 2:
538 temp2 = y;
539 if (__builtin_isgreater (temp2, max_exp_real_cst)) */
541 /* Generate condition in reverse order -- first
542 the condition for the exp argument. */
544 exp_domain = get_domain (0, false, false,
545 max_exp, true, true);
547 gen_conditions_for_domain (expn, exp_domain,
548 conds, nconds);
550 /* Now generate condition for the base argument.
551 Note it does not use the helper function
552 gen_conditions_for_domain because the base
553 type is integer. */
555 /* Push a separator. */
556 conds.quick_push (NULL);
558 temp = create_tmp_var (int_type, "DCE_COND1");
559 cst0 = build_int_cst (int_type, 0);
560 stmt1 = gimple_build_assign (temp, base_val0);
561 tempn = make_ssa_name (temp, stmt1);
562 gimple_assign_set_lhs (stmt1, tempn);
563 stmt2 = gimple_build_cond (GT_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
565 conds.quick_push (stmt1);
566 conds.quick_push (stmt2);
567 (*nconds)++;
570 /* Method to generate conditional statements for guarding conditionally
571 dead calls to pow. One or more statements can be generated for
572 each logical condition. Statement groups of different conditions
573 are separated by a NULL tree and they are stored in the vec
574 conds. The number of logical conditions are stored in *nconds.
576 See C99 standard, 7.12.7.4:2, for description of pow (x, y).
577 The precise condition for domain errors are complex. In this
578 implementation, a simplified (but conservative) valid domain
579 for x and y are used: x is positive to avoid dom errors, while
580 y is smaller than a upper bound (depending on x) to avoid range
581 errors. Runtime code is generated to check x (if not constant)
582 and y against the valid domain. If it is out, jump to the call,
583 otherwise the call is bypassed. POW_CALL is the call statement,
584 *CONDS is a vector holding the resulting condition statements,
585 and *NCONDS is the number of logical conditions. */
587 static void
588 gen_conditions_for_pow (gcall *pow_call, vec<gimple *> conds,
589 unsigned *nconds)
591 tree base, expn;
592 enum tree_code bc;
594 gcc_checking_assert (check_pow (pow_call));
596 *nconds = 0;
598 base = gimple_call_arg (pow_call, 0);
599 expn = gimple_call_arg (pow_call, 1);
601 bc = TREE_CODE (base);
603 if (bc == REAL_CST)
604 gen_conditions_for_pow_cst_base (base, expn, conds, nconds);
605 else if (bc == SSA_NAME)
606 gen_conditions_for_pow_int_base (base, expn, conds, nconds);
607 else
608 gcc_unreachable ();
611 /* A helper routine to help computing the valid input domain
612 for a builtin function. See C99 7.12.7 for details. In this
613 implementation, we only handle single region domain. The
614 resulting region can be conservative (smaller) than the actual
615 one and rounded to integers. Some of the bounds are documented
616 in the standard, while other limit constants are computed
617 assuming IEEE floating point format (for SF and DF modes).
618 Since IEEE only sets minimum requirements for long double format,
619 different long double formats exist under different implementations
620 (e.g, 64 bit double precision (DF), 80 bit double-extended
621 precision (XF), and 128 bit quad precision (QF) ). For simplicity,
622 in this implementation, the computed bounds for long double assume
623 64 bit format (DF), and are therefore conservative. Another
624 assumption is that single precision float type is always SF mode,
625 and double type is DF mode. This function is quite
626 implementation specific, so it may not be suitable to be part of
627 builtins.c. This needs to be revisited later to see if it can
628 be leveraged in x87 assembly expansion. */
630 static inp_domain
631 get_no_error_domain (enum built_in_function fnc)
633 switch (fnc)
635 /* Trig functions: return [-1, +1] */
636 CASE_FLT_FN (BUILT_IN_ACOS):
637 CASE_FLT_FN (BUILT_IN_ASIN):
638 return get_domain (-1, true, true,
639 1, true, true);
640 /* Hyperbolic functions. */
641 CASE_FLT_FN (BUILT_IN_ACOSH):
642 /* acosh: [1, +inf) */
643 return get_domain (1, true, true,
644 1, false, false);
645 CASE_FLT_FN (BUILT_IN_ATANH):
646 /* atanh: (-1, +1) */
647 return get_domain (-1, true, false,
648 1, true, false);
649 case BUILT_IN_COSHF:
650 case BUILT_IN_SINHF:
651 /* coshf: (-89, +89) */
652 return get_domain (-89, true, false,
653 89, true, false);
654 case BUILT_IN_COSH:
655 case BUILT_IN_SINH:
656 case BUILT_IN_COSHL:
657 case BUILT_IN_SINHL:
658 /* cosh: (-710, +710) */
659 return get_domain (-710, true, false,
660 710, true, false);
661 /* Log functions: (0, +inf) */
662 CASE_FLT_FN (BUILT_IN_LOG):
663 CASE_FLT_FN (BUILT_IN_LOG2):
664 CASE_FLT_FN (BUILT_IN_LOG10):
665 return get_domain (0, true, false,
666 0, false, false);
667 CASE_FLT_FN (BUILT_IN_LOG1P):
668 return get_domain (-1, true, false,
669 0, false, false);
670 /* Exp functions. */
671 case BUILT_IN_EXPF:
672 case BUILT_IN_EXPM1F:
673 /* expf: (-inf, 88) */
674 return get_domain (-1, false, false,
675 88, true, false);
676 case BUILT_IN_EXP:
677 case BUILT_IN_EXPM1:
678 case BUILT_IN_EXPL:
679 case BUILT_IN_EXPM1L:
680 /* exp: (-inf, 709) */
681 return get_domain (-1, false, false,
682 709, true, false);
683 case BUILT_IN_EXP2F:
684 /* exp2f: (-inf, 128) */
685 return get_domain (-1, false, false,
686 128, true, false);
687 case BUILT_IN_EXP2:
688 case BUILT_IN_EXP2L:
689 /* exp2: (-inf, 1024) */
690 return get_domain (-1, false, false,
691 1024, true, false);
692 case BUILT_IN_EXP10F:
693 case BUILT_IN_POW10F:
694 /* exp10f: (-inf, 38) */
695 return get_domain (-1, false, false,
696 38, true, false);
697 case BUILT_IN_EXP10:
698 case BUILT_IN_POW10:
699 case BUILT_IN_EXP10L:
700 case BUILT_IN_POW10L:
701 /* exp10: (-inf, 308) */
702 return get_domain (-1, false, false,
703 308, true, false);
704 /* sqrt: [0, +inf) */
705 CASE_FLT_FN (BUILT_IN_SQRT):
706 return get_domain (0, true, true,
707 0, false, false);
708 default:
709 gcc_unreachable ();
712 gcc_unreachable ();
715 /* The function to generate shrink wrap conditions for a partially
716 dead builtin call whose return value is not used anywhere,
717 but has to be kept live due to potential error condition.
718 BI_CALL is the builtin call, CONDS is the vector of statements
719 for condition code, NCODES is the pointer to the number of
720 logical conditions. Statements belonging to different logical
721 condition are separated by NULL tree in the vector. */
723 static void
724 gen_shrink_wrap_conditions (gcall *bi_call, vec<gimple *> conds,
725 unsigned int *nconds)
727 gcall *call;
728 tree fn;
729 enum built_in_function fnc;
731 gcc_assert (nconds && conds.exists ());
732 gcc_assert (conds.length () == 0);
733 gcc_assert (is_gimple_call (bi_call));
735 call = bi_call;
736 fn = gimple_call_fndecl (call);
737 gcc_assert (fn && DECL_BUILT_IN (fn));
738 fnc = DECL_FUNCTION_CODE (fn);
739 *nconds = 0;
741 if (fnc == BUILT_IN_POW)
742 gen_conditions_for_pow (call, conds, nconds);
743 else
745 tree arg;
746 inp_domain domain = get_no_error_domain (fnc);
747 *nconds = 0;
748 arg = gimple_call_arg (bi_call, 0);
749 gen_conditions_for_domain (arg, domain, conds, nconds);
752 return;
755 /* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
756 conditions in CONDS is false. */
758 static void
759 shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
760 unsigned int nconds)
762 gimple_stmt_iterator bi_call_bsi;
763 basic_block bi_call_bb, join_tgt_bb, guard_bb;
764 edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
765 edge bi_call_in_edge0, guard_bb_in_edge;
766 unsigned tn_cond_stmts;
767 unsigned ci;
768 gimple *cond_expr = NULL;
769 gimple *cond_expr_start;
771 /* The cfg we want to create looks like this:
773 [guard n-1] <- guard_bb (old block)
775 | [guard n-2] }
776 | / \ }
777 | / ... } new blocks
778 | / [guard 0] }
779 | / / | }
780 [ call ] | <- bi_call_bb }
781 | \ |
782 | \ |
783 | [ join ] <- join_tgt_bb (old iff call must end bb)
785 possible EH edges (only if [join] is old)
787 When [join] is new, the immediate dominators for these blocks are:
789 1. [guard n-1]: unchanged
790 2. [call]: [guard n-1]
791 3. [guard m]: [guard m+1] for 0 <= m <= n-2
792 4. [join]: [guard n-1]
794 We punt for the more complex case case of [join] being old and
795 simply free the dominance info. We also punt on postdominators,
796 which aren't expected to be available at this point anyway. */
797 bi_call_bb = gimple_bb (bi_call);
799 /* Now find the join target bb -- split bi_call_bb if needed. */
800 if (stmt_ends_bb_p (bi_call))
802 /* We checked that there was a fallthrough edge in
803 can_guard_call_p. */
804 join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
805 gcc_assert (join_tgt_in_edge_from_call);
806 /* We don't want to handle PHIs. */
807 if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1)
808 join_tgt_bb = split_edge (join_tgt_in_edge_from_call);
809 else
811 join_tgt_bb = join_tgt_in_edge_from_call->dest;
812 /* We may have degenerate PHIs in the destination. Propagate
813 those out. */
814 for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);)
816 gphi *phi = i.phi ();
817 replace_uses_by (gimple_phi_result (phi),
818 gimple_phi_arg_def (phi, 0));
819 remove_phi_node (&i, true);
823 else
825 join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
826 join_tgt_bb = join_tgt_in_edge_from_call->dest;
829 bi_call_bsi = gsi_for_stmt (bi_call);
831 /* Now it is time to insert the first conditional expression
832 into bi_call_bb and split this bb so that bi_call is
833 shrink-wrapped. */
834 tn_cond_stmts = conds.length ();
835 cond_expr = NULL;
836 cond_expr_start = conds[0];
837 for (ci = 0; ci < tn_cond_stmts; ci++)
839 gimple *c = conds[ci];
840 gcc_assert (c || ci != 0);
841 if (!c)
842 break;
843 gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
844 cond_expr = c;
846 ci++;
847 gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
849 typedef std::pair<edge, edge> edge_pair;
850 auto_vec<edge_pair, 8> edges;
852 bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
853 bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
854 bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
855 guard_bb = bi_call_bb;
856 bi_call_bb = bi_call_in_edge0->dest;
857 join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
858 EDGE_TRUE_VALUE);
860 edges.reserve (nconds);
861 edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru));
863 /* Code generation for the rest of the conditions */
864 for (unsigned int i = 1; i < nconds; ++i)
866 unsigned ci0;
867 edge bi_call_in_edge;
868 gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
869 ci0 = ci;
870 cond_expr_start = conds[ci0];
871 for (; ci < tn_cond_stmts; ci++)
873 gimple *c = conds[ci];
874 gcc_assert (c || ci != ci0);
875 if (!c)
876 break;
877 gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
878 cond_expr = c;
880 ci++;
881 gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
882 guard_bb_in_edge = split_block (guard_bb, cond_expr);
883 guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
884 guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;
886 bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
887 edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
890 /* Now update the probability and profile information, processing the
891 guards in order of execution.
893 There are two approaches we could take here. On the one hand we
894 could assign a probability of X to the call block and distribute
895 that probability among its incoming edges. On the other hand we
896 could assign a probability of X to each individual call edge.
898 The choice only affects calls that have more than one condition.
899 In those cases, the second approach would give the call block
900 a greater probability than the first. However, the difference
901 is only small, and our chosen X is a pure guess anyway.
903 Here we take the second approach because it's slightly simpler
904 and because it's easy to see that it doesn't lose profile counts. */
905 bi_call_bb->count = profile_count::zero ();
906 bi_call_bb->frequency = 0;
907 while (!edges.is_empty ())
909 edge_pair e = edges.pop ();
910 edge call_edge = e.first;
911 edge nocall_edge = e.second;
912 basic_block src_bb = call_edge->src;
913 gcc_assert (src_bb == nocall_edge->src);
915 call_edge->probability = profile_probability::very_unlikely ();
916 call_edge->count
917 = src_bb->count.apply_probability (call_edge->probability);
918 nocall_edge->probability = profile_probability::always ()
919 - call_edge->probability;
920 nocall_edge->count = src_bb->count - call_edge->count;
922 unsigned int call_frequency
923 = call_edge->probability.apply (src_bb->frequency);
925 bi_call_bb->count += call_edge->count;
926 bi_call_bb->frequency += call_frequency;
928 if (nocall_edge->dest != join_tgt_bb)
930 nocall_edge->dest->count = nocall_edge->count;
931 nocall_edge->dest->frequency = src_bb->frequency - call_frequency;
935 if (dom_info_available_p (CDI_DOMINATORS))
937 /* The split_blocks leave [guard 0] as the immediate dominator
938 of [call] and [call] as the immediate dominator of [join].
939 Fix them up. */
940 set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
941 set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
944 if (dump_file && (dump_flags & TDF_DETAILS))
946 location_t loc;
947 loc = gimple_location (bi_call);
948 fprintf (dump_file,
949 "%s:%d: note: function call is shrink-wrapped"
950 " into error conditions.\n",
951 LOCATION_FILE (loc), LOCATION_LINE (loc));
955 /* Shrink-wrap BI_CALL so that it is only called when it might set errno
956 (but is always called if it would set errno). */
958 static void
959 shrink_wrap_one_built_in_call (gcall *bi_call)
961 unsigned nconds = 0;
962 auto_vec<gimple *, 12> conds;
963 gen_shrink_wrap_conditions (bi_call, conds, &nconds);
964 gcc_assert (nconds != 0);
965 shrink_wrap_one_built_in_call_with_conds (bi_call, conds, nconds);
968 /* Return true if built-in function call CALL could be implemented using
969 a combination of an internal function to compute the result and a
970 separate call to set errno. */
972 static bool
973 can_use_internal_fn (gcall *call)
975 /* Only replace calls that set errno. */
976 if (!gimple_vdef (call))
977 return false;
979 /* See whether there is an internal function for this built-in. */
980 if (replacement_internal_fn (call) == IFN_LAST)
981 return false;
983 /* See whether we can catch all cases where errno would be set,
984 while still avoiding the call in most cases. */
985 if (!can_test_argument_range (call)
986 && !edom_only_function (call))
987 return false;
989 return true;
992 /* Implement built-in function call CALL using an internal function. */
994 static void
995 use_internal_fn (gcall *call)
997 /* We'll be inserting another call with the same arguments after the
998 lhs has been set, so prevent any possible coalescing failure from
999 having both values live at once. See PR 71020. */
1000 replace_abnormal_ssa_names (call);
1002 unsigned nconds = 0;
1003 auto_vec<gimple *, 12> conds;
1004 if (can_test_argument_range (call))
1006 gen_shrink_wrap_conditions (call, conds, &nconds);
1007 gcc_assert (nconds != 0);
1009 else
1010 gcc_assert (edom_only_function (call));
1012 internal_fn ifn = replacement_internal_fn (call);
1013 gcc_assert (ifn != IFN_LAST);
1015 /* Construct the new call, with the same arguments as the original one. */
1016 auto_vec <tree, 16> args;
1017 unsigned int nargs = gimple_call_num_args (call);
1018 for (unsigned int i = 0; i < nargs; ++i)
1019 args.safe_push (gimple_call_arg (call, i));
1020 gcall *new_call = gimple_build_call_internal_vec (ifn, args);
1021 gimple_set_location (new_call, gimple_location (call));
1022 gimple_call_set_nothrow (new_call, gimple_call_nothrow_p (call));
1024 /* Transfer the LHS to the new call. */
1025 tree lhs = gimple_call_lhs (call);
1026 gimple_call_set_lhs (new_call, lhs);
1027 gimple_call_set_lhs (call, NULL_TREE);
1028 SSA_NAME_DEF_STMT (lhs) = new_call;
1030 /* Insert the new call. */
1031 gimple_stmt_iterator gsi = gsi_for_stmt (call);
1032 gsi_insert_before (&gsi, new_call, GSI_SAME_STMT);
1034 if (nconds == 0)
1036 /* Skip the call if LHS == LHS. If we reach here, EDOM is the only
1037 valid errno value and it is used iff the result is NaN. */
1038 conds.quick_push (gimple_build_cond (EQ_EXPR, lhs, lhs,
1039 NULL_TREE, NULL_TREE));
1040 nconds++;
1042 /* Try replacing the original call with a direct assignment to
1043 errno, via an internal function. */
1044 if (set_edom_supported_p () && !stmt_ends_bb_p (call))
1046 gimple_stmt_iterator gsi = gsi_for_stmt (call);
1047 gcall *new_call = gimple_build_call_internal (IFN_SET_EDOM, 0);
1048 gimple_set_vuse (new_call, gimple_vuse (call));
1049 gimple_set_vdef (new_call, gimple_vdef (call));
1050 SSA_NAME_DEF_STMT (gimple_vdef (new_call)) = new_call;
1051 gimple_set_location (new_call, gimple_location (call));
1052 gsi_replace (&gsi, new_call, false);
1053 call = new_call;
1057 shrink_wrap_one_built_in_call_with_conds (call, conds, nconds);
1060 /* The top level function for conditional dead code shrink
1061 wrapping transformation. */
1063 static void
1064 shrink_wrap_conditional_dead_built_in_calls (vec<gcall *> calls)
1066 unsigned i = 0;
1068 unsigned n = calls.length ();
1069 for (; i < n ; i++)
1071 gcall *bi_call = calls[i];
1072 if (gimple_call_lhs (bi_call))
1073 use_internal_fn (bi_call);
1074 else
1075 shrink_wrap_one_built_in_call (bi_call);
1079 namespace {
1081 const pass_data pass_data_call_cdce =
1083 GIMPLE_PASS, /* type */
1084 "cdce", /* name */
1085 OPTGROUP_NONE, /* optinfo_flags */
1086 TV_TREE_CALL_CDCE, /* tv_id */
1087 ( PROP_cfg | PROP_ssa ), /* properties_required */
1088 0, /* properties_provided */
1089 0, /* properties_destroyed */
1090 0, /* todo_flags_start */
1091 0, /* todo_flags_finish */
1094 class pass_call_cdce : public gimple_opt_pass
1096 public:
1097 pass_call_cdce (gcc::context *ctxt)
1098 : gimple_opt_pass (pass_data_call_cdce, ctxt)
1101 /* opt_pass methods: */
1102 virtual bool gate (function *)
1104 /* The limit constants used in the implementation
1105 assume IEEE floating point format. Other formats
1106 can be supported in the future if needed. */
1107 return flag_tree_builtin_call_dce != 0;
1110 virtual unsigned int execute (function *);
1112 }; // class pass_call_cdce
1114 unsigned int
1115 pass_call_cdce::execute (function *fun)
1117 basic_block bb;
1118 gimple_stmt_iterator i;
1119 auto_vec<gcall *> cond_dead_built_in_calls;
1120 FOR_EACH_BB_FN (bb, fun)
1122 /* Skip blocks that are being optimized for size, since our
1123 transformation always increases code size. */
1124 if (optimize_bb_for_size_p (bb))
1125 continue;
1127 /* Collect dead call candidates. */
1128 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
1130 gcall *stmt = dyn_cast <gcall *> (gsi_stmt (i));
1131 if (stmt
1132 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
1133 && (gimple_call_lhs (stmt)
1134 ? can_use_internal_fn (stmt)
1135 : can_test_argument_range (stmt))
1136 && can_guard_call_p (stmt))
1138 if (dump_file && (dump_flags & TDF_DETAILS))
1140 fprintf (dump_file, "Found conditional dead call: ");
1141 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1142 fprintf (dump_file, "\n");
1144 if (!cond_dead_built_in_calls.exists ())
1145 cond_dead_built_in_calls.create (64);
1146 cond_dead_built_in_calls.safe_push (stmt);
1151 if (!cond_dead_built_in_calls.exists ())
1152 return 0;
1154 shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);
1155 free_dominance_info (CDI_POST_DOMINATORS);
1156 /* As we introduced new control-flow we need to insert PHI-nodes
1157 for the call-clobbers of the remaining call. */
1158 mark_virtual_operands_for_renaming (fun);
1159 return TODO_update_ssa;
1162 } // anon namespace
1164 gimple_opt_pass *
1165 make_pass_call_cdce (gcc::context *ctxt)
1167 return new pass_call_cdce (ctxt);