1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2023 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "basic-block.h"
28 #include "dominance.h"
33 #include "tree-pass.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
38 #include "gimple-iterator.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 #include "tree-ssa-dce.h"
56 // This class is utilized by VRP and ranger to remove __builtin_unreachable
57 // calls, and reflect any resulting global ranges.
59 // maybe_register() is called on condition statements , and if that
60 // matches the pattern of one branch being a builtin_unreachable, either check
61 // for early removal or register the resulting executable edge in a list.
63 // During early/non-final processing, we check to see if ALL exports from the
64 // block can be safely updated with a new global value. If they can, then
65 // we rewrite the condition and update those values immediately. Otherwise
66 // the unreachable condition is left in the IL until the final pass.
68 // During final processing, after all blocks have been registered,
69 // remove_and_update_globals() will
70 // - check all exports from registered blocks
71 // - ensure the cache entry of each export is set with the appropriate range
72 // - rewrite the conditions to take the executable edge
73 // - perform DCE on any feeding instructions to those rewritten conditions
75 // Then each of the immediate use chain of each export is walked, and a new
76 // global range created by unioning the ranges at all remaining use locations.
78 class remove_unreachable
{
80 remove_unreachable (gimple_ranger
&r
, bool all
) : m_ranger (r
), final_p (all
)
81 { m_list
.create (30); }
82 ~remove_unreachable () { m_list
.release (); }
83 void handle_early (gimple
*s
, edge e
);
84 void maybe_register (gimple
*s
);
85 bool remove_and_update_globals ();
86 vec
<std::pair
<int, int> > m_list
;
87 gimple_ranger
&m_ranger
;
91 // Check if block BB has a __builtin_unreachable () call on one arm, and
92 // register the executable edge if so.
95 remove_unreachable::maybe_register (gimple
*s
)
97 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
98 basic_block bb
= gimple_bb (s
);
100 edge e0
= EDGE_SUCC (bb
, 0);
101 basic_block bb0
= e0
->dest
;
102 bool un0
= EDGE_COUNT (bb0
->succs
) == 0
103 && gimple_seq_unreachable_p (bb_seq (bb0
));
104 edge e1
= EDGE_SUCC (bb
, 1);
105 basic_block bb1
= e1
->dest
;
106 bool un1
= EDGE_COUNT (bb1
->succs
) == 0
107 && gimple_seq_unreachable_p (bb_seq (bb1
));
109 // If the 2 blocks are not different, ignore.
113 // Constant expressions are ignored.
114 if (TREE_CODE (gimple_cond_lhs (s
)) != SSA_NAME
115 && TREE_CODE (gimple_cond_rhs (s
)) != SSA_NAME
)
118 edge e
= un0
? e1
: e0
;
123 m_list
.safe_push (std::make_pair (e
->src
->index
, e
->dest
->index
));
126 // Return true if all uses of NAME are dominated by by block BB. 1 use
127 // is allowed in block BB, This is one we hope to remove.
131 // goto <bb 3>; [0.00%]
132 // Any additional use of _1 or _2 in this block invalidates early replacement.
135 fully_replaceable (tree name
, basic_block bb
)
138 imm_use_iterator iter
;
139 bool saw_in_bb
= false;
141 // If a name loads from memory, we may lose information used in
142 // commoning opportunities later. See tree-ssa/ssa-pre-34.c.
143 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
144 if (gimple_vuse (def_stmt
))
147 FOR_EACH_IMM_USE_FAST (use_p
, iter
, name
)
149 gimple
*use_stmt
= USE_STMT (use_p
);
150 // Ignore debug stmts and the branch.
151 if (is_gimple_debug (use_stmt
))
153 basic_block use_bb
= gimple_bb (use_stmt
);
154 // Only one use in the block allowed to avoid complicated cases.
162 else if (!dominated_by_p (CDI_DOMINATORS
, use_bb
, bb
))
168 // This routine is called to check builtin_unreachable calls during any
169 // time before final removal. The only way we can be sure it does not
170 // provide any additional information is to expect that we can update the
171 // global values of all exports from a block. This means the branch
172 // to the unreachable call must dominate all uses of those ssa-names, with
173 // the exception that there can be a single use in the block containing
174 // the branch. IF the name used in the branch is defined in the block, it may
175 // contain the name of something else that will be an export. And likewise
176 // that may also use another name that is an export etc.
178 // As long as there is only a single use, we can be sure that there are no other
179 // side effects (like being passed to a call, or stored to a global, etc.
180 // This means we will miss cases where there are 2 or more uses that have
181 // no interveneing statements that may had side effects, but it catches most
182 // of the caes we care about, and prevents expensive in depth analysis.
184 // Ranger will still reflect the proper ranges at other places in these missed
185 // cases, we simply will not remove/set globals early.
188 remove_unreachable::handle_early (gimple
*s
, edge e
)
190 bool lhs_p
= TREE_CODE (gimple_cond_lhs (s
)) == SSA_NAME
;
191 bool rhs_p
= TREE_CODE (gimple_cond_rhs (s
)) == SSA_NAME
;
192 // Do not remove __builtin_unreachable if it confers a relation, or
193 // that relation may be lost in subsequent passes.
196 // Do not remove addresses early. ie if (x == &y)
197 if (lhs_p
&& TREE_CODE (gimple_cond_rhs (s
)) == ADDR_EXPR
)
200 gcc_checking_assert (gimple_outgoing_range_stmt_p (e
->src
) == s
);
201 gcc_checking_assert (!final_p
);
203 // Check if every export use is dominated by this branch.
205 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori (), e
->src
, name
)
207 if (!fully_replaceable (name
, e
->src
))
211 // Set the global value for each.
212 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori (), e
->src
, name
)
214 Value_Range
r (TREE_TYPE (name
));
215 m_ranger
.range_on_entry (r
, e
->dest
, name
);
216 // Nothing at this late stage we can do if the write fails.
217 if (!set_range_info (name
, r
))
221 fprintf (dump_file
, "Global Exported (via early unreachable): ");
222 print_generic_expr (dump_file
, name
, TDF_SLIM
);
223 fprintf (dump_file
, " = ");
224 gimple_range_global (r
, name
);
226 fputc ('\n', dump_file
);
230 tree ssa
= lhs_p
? gimple_cond_lhs (s
) : gimple_cond_rhs (s
);
232 // Rewrite the condition.
233 if (e
->flags
& EDGE_TRUE_VALUE
)
234 gimple_cond_make_true (as_a
<gcond
*> (s
));
236 gimple_cond_make_false (as_a
<gcond
*> (s
));
239 // If the name on S is defined in this block, see if there is DCE work to do.
240 if (gimple_bb (SSA_NAME_DEF_STMT (ssa
)) == e
->src
)
243 bitmap_set_bit (dce
, SSA_NAME_VERSION (ssa
));
244 simple_dce_from_worklist (dce
);
249 // Process the edges in the list, change the conditions and removing any
250 // dead code feeding those conditions. Calculate the range of any
251 // names that may have been exported from those blocks, and determine if
252 // there is any updates to their global ranges..
253 // Return true if any builtin_unreachables/globals eliminated/updated.
256 remove_unreachable::remove_and_update_globals ()
258 if (m_list
.length () == 0)
261 // Ensure the cache in SCEV has been cleared before processing
262 // globals to be removed.
269 auto_bitmap all_exports
;
270 for (i
= 0; i
< m_list
.length (); i
++)
273 basic_block src
= BASIC_BLOCK_FOR_FN (cfun
, eb
.first
);
274 basic_block dest
= BASIC_BLOCK_FOR_FN (cfun
, eb
.second
);
277 edge e
= find_edge (src
, dest
);
278 gimple
*s
= gimple_outgoing_range_stmt_p (e
->src
);
279 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
281 bool dominate_exit_p
= true;
282 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori (), e
->src
, name
)
284 // Ensure the cache is set for NAME in the succ block.
285 Value_Range
r(TREE_TYPE (name
));
286 Value_Range
ex(TREE_TYPE (name
));
287 m_ranger
.range_on_entry (r
, e
->dest
, name
);
288 m_ranger
.range_on_entry (ex
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
289 // If the range produced by this __builtin_unreachacble expression
290 // is not fully reflected in the range at exit, then it does not
291 // dominate the exit of the function.
292 if (ex
.intersect (r
))
293 dominate_exit_p
= false;
296 // If the exit is dominated, add to the export list. Otherwise if this
297 // isn't the final VRP pass, leave the call in the IL.
299 bitmap_ior_into (all_exports
, m_ranger
.gori ().exports (e
->src
));
304 // Rewrite the condition.
305 if (e
->flags
& EDGE_TRUE_VALUE
)
306 gimple_cond_make_true (as_a
<gcond
*> (s
));
308 gimple_cond_make_false (as_a
<gcond
*> (s
));
312 if (bitmap_empty_p (all_exports
))
314 // Invoke DCE on all exported names to eliminate dead feeding defs.
316 bitmap_copy (dce
, all_exports
);
317 // Don't attempt to DCE parameters.
318 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
319 if (!ssa_name (i
) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i
)))
320 bitmap_clear_bit (dce
, i
);
321 simple_dce_from_worklist (dce
);
323 // Loop over all uses of each name and find maximal range. This is the
326 imm_use_iterator iter
;
327 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
330 if (!name
|| SSA_NAME_IN_FREE_LIST (name
))
332 Value_Range
r (TREE_TYPE (name
));
333 Value_Range
exp_range (TREE_TYPE (name
));
335 FOR_EACH_IMM_USE_FAST (use_p
, iter
, name
)
337 gimple
*use_stmt
= USE_STMT (use_p
);
338 if (is_gimple_debug (use_stmt
))
340 if (!m_ranger
.range_of_expr (exp_range
, name
, use_stmt
))
341 exp_range
.set_varying (TREE_TYPE (name
));
342 r
.union_ (exp_range
);
346 // Include the on-exit range to ensure non-dominated unreachables
347 // don't incorrectly impact the global range.
348 m_ranger
.range_on_entry (exp_range
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
349 r
.union_ (exp_range
);
350 if (r
.varying_p () || r
.undefined_p ())
352 if (!set_range_info (name
, r
))
357 fprintf (dump_file
, "Global Exported (via unreachable): ");
358 print_generic_expr (dump_file
, name
, TDF_SLIM
);
359 fprintf (dump_file
, " = ");
360 gimple_range_global (r
, name
);
362 fputc ('\n', dump_file
);
368 /* VR_TYPE describes a range with minimum value *MIN and maximum
369 value *MAX. Restrict the range to the set of values that have
370 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
371 return the new range type.
373 SGN gives the sign of the values described by the range. */
375 enum value_range_kind
376 intersect_range_with_nonzero_bits (enum value_range_kind vr_type
,
377 wide_int
*min
, wide_int
*max
,
378 const wide_int
&nonzero_bits
,
381 if (vr_type
== VR_ANTI_RANGE
)
383 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
384 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
385 to create an inclusive upper bound for A and an inclusive lower
387 wide_int a_max
= wi::round_down_for_mask (*min
- 1, nonzero_bits
);
388 wide_int b_min
= wi::round_up_for_mask (*max
+ 1, nonzero_bits
);
390 /* If the calculation of A_MAX wrapped, A is effectively empty
391 and A_MAX is the highest value that satisfies NONZERO_BITS.
392 Likewise if the calculation of B_MIN wrapped, B is effectively
393 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
394 bool a_empty
= wi::ge_p (a_max
, *min
, sgn
);
395 bool b_empty
= wi::le_p (b_min
, *max
, sgn
);
397 /* If both A and B are empty, there are no valid values. */
398 if (a_empty
&& b_empty
)
401 /* If exactly one of A or B is empty, return a VR_RANGE for the
403 if (a_empty
|| b_empty
)
407 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
411 /* Update the VR_ANTI_RANGE bounds. */
414 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
416 /* Now check whether the excluded range includes any values that
417 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
418 if (wi::round_up_for_mask (*min
, nonzero_bits
) == b_min
)
420 unsigned int precision
= min
->get_precision ();
421 *min
= wi::min_value (precision
, sgn
);
422 *max
= wi::max_value (precision
, sgn
);
426 if (vr_type
== VR_RANGE
|| vr_type
== VR_VARYING
)
428 *max
= wi::round_down_for_mask (*max
, nonzero_bits
);
430 /* Check that the range contains at least one valid value. */
431 if (wi::gt_p (*min
, *max
, sgn
))
434 *min
= wi::round_up_for_mask (*min
, nonzero_bits
);
435 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
440 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
441 otherwise. We only handle additive operations and set NEG to true if the
442 symbol is negated and INV to the invariant part, if any. */
445 get_single_symbol (tree t
, bool *neg
, tree
*inv
)
453 if (TREE_CODE (t
) == PLUS_EXPR
454 || TREE_CODE (t
) == POINTER_PLUS_EXPR
455 || TREE_CODE (t
) == MINUS_EXPR
)
457 if (is_gimple_min_invariant (TREE_OPERAND (t
, 0)))
459 neg_
= (TREE_CODE (t
) == MINUS_EXPR
);
460 inv_
= TREE_OPERAND (t
, 0);
461 t
= TREE_OPERAND (t
, 1);
463 else if (is_gimple_min_invariant (TREE_OPERAND (t
, 1)))
466 inv_
= TREE_OPERAND (t
, 1);
467 t
= TREE_OPERAND (t
, 0);
478 if (TREE_CODE (t
) == NEGATE_EXPR
)
480 t
= TREE_OPERAND (t
, 0);
484 if (TREE_CODE (t
) != SSA_NAME
)
487 if (inv_
&& TREE_OVERFLOW_P (inv_
))
488 inv_
= drop_tree_overflow (inv_
);
495 /* Compare two values VAL1 and VAL2. Return
497 -2 if VAL1 and VAL2 cannot be compared at compile-time,
500 +1 if VAL1 > VAL2, and
503 This is similar to tree_int_cst_compare but supports pointer values
504 and values that cannot be compared at compile time.
506 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
507 true if the return value is only valid if we assume that signed
508 overflow is undefined. */
511 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
516 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
518 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
519 == POINTER_TYPE_P (TREE_TYPE (val2
)));
521 /* Convert the two values into the same type. This is needed because
522 sizetype causes sign extension even for unsigned types. */
523 if (!useless_type_conversion_p (TREE_TYPE (val1
), TREE_TYPE (val2
)))
524 val2
= fold_convert (TREE_TYPE (val1
), val2
);
526 const bool overflow_undefined
527 = INTEGRAL_TYPE_P (TREE_TYPE (val1
))
528 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
));
531 tree sym1
= get_single_symbol (val1
, &neg1
, &inv1
);
532 tree sym2
= get_single_symbol (val2
, &neg2
, &inv2
);
534 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
535 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
538 /* Both values must use the same name with the same sign. */
539 if (sym1
!= sym2
|| neg1
!= neg2
)
542 /* [-]NAME + CST == [-]NAME + CST. */
546 /* If overflow is defined we cannot simplify more. */
547 if (!overflow_undefined
)
550 if (strict_overflow_p
!= NULL
551 /* Symbolic range building sets the no-warning bit to declare
552 that overflow doesn't happen. */
553 && (!inv1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
554 && (!inv2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
555 *strict_overflow_p
= true;
558 inv1
= build_int_cst (TREE_TYPE (val1
), 0);
560 inv2
= build_int_cst (TREE_TYPE (val2
), 0);
562 return wi::cmp (wi::to_wide (inv1
), wi::to_wide (inv2
),
563 TYPE_SIGN (TREE_TYPE (val1
)));
566 const bool cst1
= is_gimple_min_invariant (val1
);
567 const bool cst2
= is_gimple_min_invariant (val2
);
569 /* If one is of the form '[-]NAME + CST' and the other is constant, then
570 it might be possible to say something depending on the constants. */
571 if ((sym1
&& inv1
&& cst2
) || (sym2
&& inv2
&& cst1
))
573 if (!overflow_undefined
)
576 if (strict_overflow_p
!= NULL
577 /* Symbolic range building sets the no-warning bit to declare
578 that overflow doesn't happen. */
579 && (!sym1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
580 && (!sym2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
581 *strict_overflow_p
= true;
583 const signop sgn
= TYPE_SIGN (TREE_TYPE (val1
));
584 tree cst
= cst1
? val1
: val2
;
585 tree inv
= cst1
? inv2
: inv1
;
587 /* Compute the difference between the constants. If it overflows or
588 underflows, this means that we can trivially compare the NAME with
589 it and, consequently, the two values with each other. */
590 wide_int diff
= wi::to_wide (cst
) - wi::to_wide (inv
);
591 if (wi::cmp (0, wi::to_wide (inv
), sgn
)
592 != wi::cmp (diff
, wi::to_wide (cst
), sgn
))
594 const int res
= wi::cmp (wi::to_wide (cst
), wi::to_wide (inv
), sgn
);
595 return cst1
? res
: -res
;
601 /* We cannot say anything more for non-constants. */
605 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
607 /* We cannot compare overflowed values. */
608 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
611 if (TREE_CODE (val1
) == INTEGER_CST
612 && TREE_CODE (val2
) == INTEGER_CST
)
613 return tree_int_cst_compare (val1
, val2
);
615 if (poly_int_tree_p (val1
) && poly_int_tree_p (val2
))
617 if (known_eq (wi::to_poly_widest (val1
),
618 wi::to_poly_widest (val2
)))
620 if (known_lt (wi::to_poly_widest (val1
),
621 wi::to_poly_widest (val2
)))
623 if (known_gt (wi::to_poly_widest (val1
),
624 wi::to_poly_widest (val2
)))
632 if (TREE_CODE (val1
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
634 /* We cannot compare overflowed values. */
635 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
638 return tree_int_cst_compare (val1
, val2
);
641 /* First see if VAL1 and VAL2 are not the same. */
642 if (operand_equal_p (val1
, val2
, 0))
645 fold_defer_overflow_warnings ();
647 /* If VAL1 is a lower address than VAL2, return -1. */
648 tree t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val1
, val2
);
649 if (t
&& integer_onep (t
))
651 fold_undefer_and_ignore_overflow_warnings ();
655 /* If VAL1 is a higher address than VAL2, return +1. */
656 t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val2
, val1
);
657 if (t
&& integer_onep (t
))
659 fold_undefer_and_ignore_overflow_warnings ();
663 /* If VAL1 is different than VAL2, return +2. */
664 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
665 fold_undefer_and_ignore_overflow_warnings ();
666 if (t
&& integer_onep (t
))
673 /* Compare values like compare_values_warnv. */
676 compare_values (tree val1
, tree val2
)
679 return compare_values_warnv (val1
, val2
, &sop
);
682 /* Helper for overflow_comparison_p
684 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
685 OP1's defining statement to see if it ultimately has the form
686 OP0 CODE (OP0 PLUS INTEGER_CST)
688 If so, return TRUE indicating this is an overflow test and store into
689 *NEW_CST an updated constant that can be used in a narrowed range test.
691 REVERSED indicates if the comparison was originally:
695 This affects how we build the updated constant. */
698 overflow_comparison_p_1 (enum tree_code code
, tree op0
, tree op1
,
699 bool reversed
, tree
*new_cst
)
701 /* See if this is a relational operation between two SSA_NAMES with
702 unsigned, overflow wrapping values. If so, check it more deeply. */
703 if ((code
== LT_EXPR
|| code
== LE_EXPR
704 || code
== GE_EXPR
|| code
== GT_EXPR
)
705 && TREE_CODE (op0
) == SSA_NAME
706 && TREE_CODE (op1
) == SSA_NAME
707 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
708 && TYPE_UNSIGNED (TREE_TYPE (op0
))
709 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0
)))
711 gimple
*op1_def
= SSA_NAME_DEF_STMT (op1
);
713 /* Now look at the defining statement of OP1 to see if it adds
714 or subtracts a nonzero constant from another operand. */
716 && is_gimple_assign (op1_def
)
717 && gimple_assign_rhs_code (op1_def
) == PLUS_EXPR
718 && TREE_CODE (gimple_assign_rhs2 (op1_def
)) == INTEGER_CST
719 && !integer_zerop (gimple_assign_rhs2 (op1_def
)))
721 tree target
= gimple_assign_rhs1 (op1_def
);
723 /* If we did not find our target SSA_NAME, then this is not
728 tree type
= TREE_TYPE (op0
);
729 wide_int max
= wi::max_value (TYPE_PRECISION (type
), UNSIGNED
);
730 tree inc
= gimple_assign_rhs2 (op1_def
);
732 *new_cst
= wide_int_to_tree (type
, max
+ wi::to_wide (inc
));
734 *new_cst
= wide_int_to_tree (type
, max
- wi::to_wide (inc
));
741 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
742 OP1's defining statement to see if it ultimately has the form
743 OP0 CODE (OP0 PLUS INTEGER_CST)
745 If so, return TRUE indicating this is an overflow test and store into
746 *NEW_CST an updated constant that can be used in a narrowed range test.
748 These statements are left as-is in the IL to facilitate discovery of
749 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
750 the alternate range representation is often useful within VRP. */
753 overflow_comparison_p (tree_code code
, tree name
, tree val
, tree
*new_cst
)
755 if (overflow_comparison_p_1 (code
, name
, val
, false, new_cst
))
757 return overflow_comparison_p_1 (swap_tree_comparison (code
), val
, name
,
761 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
762 that includes the value VAL. The search is restricted to the range
763 [START_IDX, n - 1] where n is the size of VEC.
765 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
768 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
769 it is placed in IDX and false is returned.
771 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
775 find_case_label_index (gswitch
*stmt
, size_t start_idx
, tree val
, size_t *idx
)
777 size_t n
= gimple_switch_num_labels (stmt
);
780 /* Find case label for minimum of the value range or the next one.
781 At each iteration we are searching in [low, high - 1]. */
783 for (low
= start_idx
, high
= n
; high
!= low
; )
787 /* Note that i != high, so we never ask for n. */
788 size_t i
= (high
+ low
) / 2;
789 t
= gimple_switch_label (stmt
, i
);
791 /* Cache the result of comparing CASE_LOW and val. */
792 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
796 /* Ranges cannot be empty. */
805 if (CASE_HIGH (t
) != NULL
806 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
818 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
819 for values between MIN and MAX. The first index is placed in MIN_IDX. The
820 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
821 then MAX_IDX < MIN_IDX.
822 Returns true if the default label is not needed. */
825 find_case_label_range (gswitch
*stmt
, tree min
, tree max
, size_t *min_idx
,
829 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
830 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
836 /* Only the default case label reached.
837 Return an empty range. */
844 bool take_default
= min_take_default
|| max_take_default
;
848 if (max_take_default
)
851 /* If the case label range is continuous, we do not need
852 the default case label. Verify that. */
853 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
854 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
855 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
856 for (k
= i
+ 1; k
<= j
; ++k
)
858 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
859 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
865 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
866 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
871 return !take_default
;
875 /* Given a SWITCH_STMT, return the case label that encompasses the
876 known possible values for the switch operand. RANGE_OF_OP is a
877 range for the known values of the switch operand. */
880 find_case_label_range (gswitch
*switch_stmt
, const irange
*range_of_op
)
882 if (range_of_op
->undefined_p ()
883 || range_of_op
->varying_p ())
887 tree op
= gimple_switch_index (switch_stmt
);
888 tree type
= TREE_TYPE (op
);
889 tree tmin
= wide_int_to_tree (type
, range_of_op
->lower_bound ());
890 tree tmax
= wide_int_to_tree (type
, range_of_op
->upper_bound ());
891 find_case_label_range (switch_stmt
, tmin
, tmax
, &i
, &j
);
894 /* Look for exactly one label that encompasses the range of
896 tree label
= gimple_switch_label (switch_stmt
, i
);
898 = CASE_HIGH (label
) ? CASE_HIGH (label
) : CASE_LOW (label
);
899 wide_int wlow
= wi::to_wide (CASE_LOW (label
));
900 wide_int whigh
= wi::to_wide (case_high
);
901 int_range_max
label_range (TREE_TYPE (case_high
), wlow
, whigh
);
902 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
903 range_cast (label_range
, range_of_op
->type ());
904 label_range
.intersect (*range_of_op
);
905 if (label_range
== *range_of_op
)
910 /* If there are no labels at all, take the default. */
911 return gimple_switch_label (switch_stmt
, 0);
915 /* Otherwise, there are various labels that can encompass
916 the range of operand. In which case, see if the range of
917 the operand is entirely *outside* the bounds of all the
918 (non-default) case labels. If so, take the default. */
919 unsigned n
= gimple_switch_num_labels (switch_stmt
);
920 tree min_label
= gimple_switch_label (switch_stmt
, 1);
921 tree max_label
= gimple_switch_label (switch_stmt
, n
- 1);
922 tree case_high
= CASE_HIGH (max_label
);
924 case_high
= CASE_LOW (max_label
);
925 int_range_max
label_range (TREE_TYPE (CASE_LOW (min_label
)),
926 wi::to_wide (CASE_LOW (min_label
)),
927 wi::to_wide (case_high
));
928 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
929 range_cast (label_range
, range_of_op
->type ());
930 label_range
.intersect (*range_of_op
);
931 if (label_range
.undefined_p ())
932 return gimple_switch_label (switch_stmt
, 0);
943 // This is a ranger based folder which continues to use the dominator
944 // walk to access the substitute and fold machinery. Ranges are calculated
947 class rvrp_folder
: public substitute_and_fold_engine
951 rvrp_folder (gimple_ranger
*r
, bool all
)
952 : substitute_and_fold_engine (),
953 m_unreachable (*r
, all
),
954 m_simplifier (r
, r
->non_executable_edge_flag
)
957 m_pta
= new pointer_equiv_analyzer (m_ranger
);
958 m_last_bb_stmt
= NULL
;
966 tree
value_of_expr (tree name
, gimple
*s
= NULL
) override
968 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
969 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
971 tree ret
= m_ranger
->value_of_expr (name
, s
);
972 if (!ret
&& supported_pointer_equiv_p (name
))
973 ret
= m_pta
->get_equiv (name
);
977 tree
value_on_edge (edge e
, tree name
) override
979 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
980 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
982 tree ret
= m_ranger
->value_on_edge (e
, name
);
983 if (!ret
&& supported_pointer_equiv_p (name
))
984 ret
= m_pta
->get_equiv (name
);
988 tree
value_of_stmt (gimple
*s
, tree name
= NULL
) override
990 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
991 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
993 return m_ranger
->value_of_stmt (s
, name
);
996 void pre_fold_bb (basic_block bb
) override
999 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1001 m_ranger
->register_inferred_ranges (gsi
.phi ());
1002 m_last_bb_stmt
= last_nondebug_stmt (bb
);
1005 void post_fold_bb (basic_block bb
) override
1010 void pre_fold_stmt (gimple
*stmt
) override
1012 m_pta
->visit_stmt (stmt
);
1013 // If this is the last stmt and there are inferred ranges, reparse the
1014 // block for transitive inferred ranges that occur earlier in the block.
1015 if (stmt
== m_last_bb_stmt
)
1017 m_ranger
->register_transitive_inferred_ranges (gimple_bb (stmt
));
1018 // Also check for builtin_unreachable calls.
1019 if (cfun
->after_inlining
&& gimple_code (stmt
) == GIMPLE_COND
)
1020 m_unreachable
.maybe_register (stmt
);
1024 bool fold_stmt (gimple_stmt_iterator
*gsi
) override
1026 bool ret
= m_simplifier
.simplify (gsi
);
1028 ret
= m_ranger
->fold_stmt (gsi
, follow_single_use_edges
);
1029 m_ranger
->register_inferred_ranges (gsi_stmt (*gsi
));
1033 remove_unreachable m_unreachable
;
1035 DISABLE_COPY_AND_ASSIGN (rvrp_folder
);
1036 gimple_ranger
*m_ranger
;
1037 simplify_using_ranges m_simplifier
;
1038 pointer_equiv_analyzer
*m_pta
;
1039 gimple
*m_last_bb_stmt
;
1042 /* Main entry point for a VRP pass using just ranger. This can be called
1043 from anywhere to perform a VRP pass, including from EVRP. */
1046 execute_ranger_vrp (struct function
*fun
, bool warn_array_bounds_p
,
1049 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
1050 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1052 calculate_dominance_info (CDI_DOMINATORS
);
1054 set_all_edges_as_executable (fun
);
1055 gimple_ranger
*ranger
= enable_ranger (fun
, false);
1056 rvrp_folder
folder (ranger
, final_p
);
1057 phi_analysis_initialize (ranger
->const_query ());
1058 folder
.substitute_and_fold ();
1059 // Remove tagged builtin-unreachable and maybe update globals.
1060 folder
.m_unreachable
.remove_and_update_globals ();
1061 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1062 ranger
->dump (dump_file
);
1064 if ((warn_array_bounds
|| warn_strict_flex_arrays
) && warn_array_bounds_p
)
1066 // Set all edges as executable, except those ranger says aren't.
1067 int non_exec_flag
= ranger
->non_executable_edge_flag
;
1069 FOR_ALL_BB_FN (bb
, fun
)
1073 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1074 if (e
->flags
& non_exec_flag
)
1075 e
->flags
&= ~EDGE_EXECUTABLE
;
1077 e
->flags
|= EDGE_EXECUTABLE
;
1080 array_bounds_checker
array_checker (fun
, ranger
);
1081 array_checker
.check ();
1084 phi_analysis_finalize ();
1085 disable_ranger (fun
);
1087 loop_optimizer_finalize ();
1091 // Implement a Fast VRP folder. Not quite as effective but faster.
1093 class fvrp_folder
: public substitute_and_fold_engine
1096 fvrp_folder (dom_ranger
*dr
) : substitute_and_fold_engine (),
1098 { m_dom_ranger
= dr
; }
1102 tree
value_of_expr (tree name
, gimple
*s
= NULL
) override
1104 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1105 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1107 return m_dom_ranger
->value_of_expr (name
, s
);
1110 tree
value_on_edge (edge e
, tree name
) override
1112 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1113 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1115 return m_dom_ranger
->value_on_edge (e
, name
);
1118 tree
value_of_stmt (gimple
*s
, tree name
= NULL
) override
1120 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1121 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1123 return m_dom_ranger
->value_of_stmt (s
, name
);
1126 void pre_fold_bb (basic_block bb
) override
1128 m_dom_ranger
->pre_bb (bb
);
1129 // Now process the PHIs in advance.
1130 gphi_iterator psi
= gsi_start_phis (bb
);
1131 for ( ; !gsi_end_p (psi
); gsi_next (&psi
))
1133 tree name
= gimple_range_ssa_p (PHI_RESULT (psi
.phi ()));
1136 Value_Range
vr(TREE_TYPE (name
));
1137 m_dom_ranger
->range_of_stmt (vr
, psi
.phi (), name
);
1142 void post_fold_bb (basic_block bb
) override
1144 m_dom_ranger
->post_bb (bb
);
1147 void pre_fold_stmt (gimple
*s
) override
1149 // Ensure range_of_stmt has been called.
1150 tree type
= gimple_range_type (s
);
1153 Value_Range
vr(type
);
1154 m_dom_ranger
->range_of_stmt (vr
, s
);
1158 bool fold_stmt (gimple_stmt_iterator
*gsi
) override
1160 bool ret
= m_simplifier
.simplify (gsi
);
1162 ret
= ::fold_stmt (gsi
, follow_single_use_edges
);
1167 DISABLE_COPY_AND_ASSIGN (fvrp_folder
);
1168 simplify_using_ranges m_simplifier
;
1169 dom_ranger
*m_dom_ranger
;
1173 // Main entry point for a FAST VRP pass using a dom ranger.
1176 execute_fast_vrp (struct function
*fun
)
1178 calculate_dominance_info (CDI_DOMINATORS
);
1180 fvrp_folder
folder (&dr
);
1182 gcc_checking_assert (!fun
->x_range_query
);
1183 fun
->x_range_query
= &dr
;
1185 folder
.substitute_and_fold ();
1187 fun
->x_range_query
= NULL
;
1193 const pass_data pass_data_vrp
=
1195 GIMPLE_PASS
, /* type */
1197 OPTGROUP_NONE
, /* optinfo_flags */
1198 TV_TREE_VRP
, /* tv_id */
1199 PROP_ssa
, /* properties_required */
1200 0, /* properties_provided */
1201 0, /* properties_destroyed */
1202 0, /* todo_flags_start */
1203 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
1206 const pass_data pass_data_early_vrp
=
1208 GIMPLE_PASS
, /* type */
1210 OPTGROUP_NONE
, /* optinfo_flags */
1211 TV_TREE_EARLY_VRP
, /* tv_id */
1212 PROP_ssa
, /* properties_required */
1213 0, /* properties_provided */
1214 0, /* properties_destroyed */
1215 0, /* todo_flags_start */
1216 ( TODO_cleanup_cfg
| TODO_update_ssa
| TODO_verify_all
),
1219 const pass_data pass_data_fast_vrp
=
1221 GIMPLE_PASS
, /* type */
1223 OPTGROUP_NONE
, /* optinfo_flags */
1224 TV_TREE_FAST_VRP
, /* tv_id */
1225 PROP_ssa
, /* properties_required */
1226 0, /* properties_provided */
1227 0, /* properties_destroyed */
1228 0, /* todo_flags_start */
1229 ( TODO_cleanup_cfg
| TODO_update_ssa
| TODO_verify_all
),
1233 class pass_vrp
: public gimple_opt_pass
1236 pass_vrp (gcc::context
*ctxt
, const pass_data
&data_
, bool warn_p
)
1237 : gimple_opt_pass (data_
, ctxt
), data (data_
),
1238 warn_array_bounds_p (warn_p
), final_p (false)
1241 /* opt_pass methods: */
1242 opt_pass
* clone () final override
1243 { return new pass_vrp (m_ctxt
, data
, false); }
1244 void set_pass_param (unsigned int n
, bool param
) final override
1246 gcc_assert (n
== 0);
1249 bool gate (function
*) final override
{ return flag_tree_vrp
!= 0; }
1250 unsigned int execute (function
*fun
) final override
1252 // Check for fast vrp.
1253 if (&data
== &pass_data_fast_vrp
)
1254 return execute_fast_vrp (fun
);
1256 return execute_ranger_vrp (fun
, warn_array_bounds_p
, final_p
);
1260 const pass_data
&data
;
1261 bool warn_array_bounds_p
;
1263 }; // class pass_vrp
1265 const pass_data pass_data_assumptions
=
1267 GIMPLE_PASS
, /* type */
1268 "assumptions", /* name */
1269 OPTGROUP_NONE
, /* optinfo_flags */
1270 TV_TREE_ASSUMPTIONS
, /* tv_id */
1271 PROP_ssa
, /* properties_required */
1272 PROP_assumptions_done
, /* properties_provided */
1273 0, /* properties_destroyed */
1274 0, /* todo_flags_start */
1275 0, /* todo_flags_end */
1278 class pass_assumptions
: public gimple_opt_pass
1281 pass_assumptions (gcc::context
*ctxt
)
1282 : gimple_opt_pass (pass_data_assumptions
, ctxt
)
1285 /* opt_pass methods: */
1286 bool gate (function
*fun
) final override
{ return fun
->assume_function
; }
1287 unsigned int execute (function
*) final override
1291 fprintf (dump_file
, "Assumptions :\n--------------\n");
1293 for (tree arg
= DECL_ARGUMENTS (cfun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
1295 tree name
= ssa_default_def (cfun
, arg
);
1296 if (!name
|| !gimple_range_ssa_p (name
))
1298 tree type
= TREE_TYPE (name
);
1299 if (!Value_Range::supports_type_p (type
))
1301 Value_Range
assume_range (type
);
1302 if (query
.assume_range_p (assume_range
, name
))
1304 // Set the global range of NAME to anything calculated.
1305 set_range_info (name
, assume_range
);
1308 print_generic_expr (dump_file
, name
, TDF_SLIM
);
1309 fprintf (dump_file
, " -> ");
1310 assume_range
.dump (dump_file
);
1311 fputc ('\n', dump_file
);
1317 fputc ('\n', dump_file
);
1318 gimple_dump_cfg (dump_file
, dump_flags
& ~TDF_DETAILS
);
1319 if (dump_flags
& TDF_DETAILS
)
1320 query
.dump (dump_file
);
1322 return TODO_discard_function
;
1325 }; // class pass_assumptions
1330 make_pass_vrp (gcc::context
*ctxt
)
1332 return new pass_vrp (ctxt
, pass_data_vrp
, true);
1336 make_pass_early_vrp (gcc::context
*ctxt
)
1338 return new pass_vrp (ctxt
, pass_data_early_vrp
, false);
1342 make_pass_fast_vrp (gcc::context
*ctxt
)
1344 return new pass_vrp (ctxt
, pass_data_fast_vrp
, false);
1348 make_pass_assumptions (gcc::context
*ctx
)
1350 return new pass_assumptions (ctx
);