1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2023 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "basic-block.h"
28 #include "dominance.h"
33 #include "tree-pass.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
38 #include "gimple-iterator.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 #include "tree-ssa-dce.h"
56 // This class is utilized by VRP and ranger to remove __builtin_unreachable
57 // calls, and reflect any resulting global ranges.
59 // maybe_register_block () is called on basic blocks, and if that block
60 // matches the pattern of one branch being a builtin_unreachable, register
61 // the resulting executable edge in a list.
63 // After all blocks have been processed, remove_and_update_globals() will
64 // - check all exports from registered blocks
65 // - ensure the cache entry of each export is set with the appropriate range
66 // - rewrite the conditions to take the executable edge
67 // - perform DCE on any feeding instructions to those rewritten conditions
69 // Then each of the immediate use chain of each export is walked, and a new
70 // global range created by unioning the ranges at all remaining use locations.
72 class remove_unreachable
{
74 remove_unreachable (gimple_ranger
&r
) : m_ranger (r
) { m_list
.create (30); }
75 ~remove_unreachable () { m_list
.release (); }
76 void maybe_register_block (basic_block bb
);
77 bool remove_and_update_globals (bool final_p
);
78 vec
<std::pair
<int, int> > m_list
;
79 gimple_ranger
&m_ranger
;
82 // Check if block BB has a __builtin_unreachable () call on one arm, and
83 // register the executable edge if so.
86 remove_unreachable::maybe_register_block (basic_block bb
)
88 gimple
*s
= gimple_outgoing_range_stmt_p (bb
);
89 if (!s
|| gimple_code (s
) != GIMPLE_COND
)
92 edge e0
= EDGE_SUCC (bb
, 0);
93 basic_block bb0
= e0
->dest
;
94 bool un0
= EDGE_COUNT (bb0
->succs
) == 0
95 && gimple_seq_unreachable_p (bb_seq (bb0
));
96 edge e1
= EDGE_SUCC (bb
, 1);
97 basic_block bb1
= e1
->dest
;
98 bool un1
= EDGE_COUNT (bb1
->succs
) == 0
99 && gimple_seq_unreachable_p (bb_seq (bb1
));
101 // If the 2 blocks are not different, ignore.
106 m_list
.safe_push (std::make_pair (e1
->src
->index
, e1
->dest
->index
));
108 m_list
.safe_push (std::make_pair (e0
->src
->index
, e0
->dest
->index
));
111 // Process the edges in the list, change the conditions and removing any
112 // dead code feeding those conditions. Calculate the range of any
113 // names that may have been exported from those blocks, and determine if
114 // there is any updates to their global ranges..
115 // FINAL_P indicates all builtin_unreachable calls should be removed.
116 // Return true if any builtin_unreachables/globals eliminated/updated.
119 remove_unreachable::remove_and_update_globals (bool final_p
)
121 if (m_list
.length () == 0)
124 // Ensure the cache in SCEV has been cleared before processing
125 // globals to be removed.
132 auto_bitmap all_exports
;
133 for (i
= 0; i
< m_list
.length (); i
++)
136 basic_block src
= BASIC_BLOCK_FOR_FN (cfun
, eb
.first
);
137 basic_block dest
= BASIC_BLOCK_FOR_FN (cfun
, eb
.second
);
140 edge e
= find_edge (src
, dest
);
141 gimple
*s
= gimple_outgoing_range_stmt_p (e
->src
);
142 gcc_checking_assert (gimple_code (s
) == GIMPLE_COND
);
143 bool lhs_p
= TREE_CODE (gimple_cond_lhs (s
)) == SSA_NAME
;
144 bool rhs_p
= TREE_CODE (gimple_cond_rhs (s
)) == SSA_NAME
;
145 // Do not remove __builtin_unreachable if it confers a relation, or
146 // that relation will be lost in subsequent passes. Unless its the
148 if (!final_p
&& lhs_p
&& rhs_p
)
150 // If this is already a constant condition, don't look either
151 if (!lhs_p
&& !rhs_p
)
153 // Do not remove addresses early. ie if (x == &y)
154 if (!final_p
&& lhs_p
&& TREE_CODE (gimple_cond_rhs (s
)) == ADDR_EXPR
)
156 bool dominate_exit_p
= true;
157 FOR_EACH_GORI_EXPORT_NAME (m_ranger
.gori (), e
->src
, name
)
159 // Ensure the cache is set for NAME in the succ block.
160 Value_Range
r(TREE_TYPE (name
));
161 Value_Range
ex(TREE_TYPE (name
));
162 m_ranger
.range_on_entry (r
, e
->dest
, name
);
163 m_ranger
.range_on_entry (ex
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
164 // If the range produced by this __builtin_unreachacble expression
165 // is not fully reflected in the range at exit, then it does not
166 // dominate the exit of the function.
167 if (ex
.intersect (r
))
168 dominate_exit_p
= false;
171 // If the exit is dominated, add to the export list. Otherwise if this
172 // isn't the final VRP pass, leave the call in the IL.
174 bitmap_ior_into (all_exports
, m_ranger
.gori ().exports (e
->src
));
179 // Rewrite the condition.
180 if (e
->flags
& EDGE_TRUE_VALUE
)
181 gimple_cond_make_true (as_a
<gcond
*> (s
));
183 gimple_cond_make_false (as_a
<gcond
*> (s
));
187 if (bitmap_empty_p (all_exports
))
189 // Invoke DCE on all exported names to eliminate dead feeding defs.
191 bitmap_copy (dce
, all_exports
);
192 // Don't attempt to DCE parameters.
193 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
194 if (!ssa_name (i
) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i
)))
195 bitmap_clear_bit (dce
, i
);
196 simple_dce_from_worklist (dce
);
198 // Loop over all uses of each name and find maximal range. This is the
201 imm_use_iterator iter
;
202 EXECUTE_IF_SET_IN_BITMAP (all_exports
, 0, i
, bi
)
205 if (!name
|| SSA_NAME_IN_FREE_LIST (name
))
207 Value_Range
r (TREE_TYPE (name
));
208 Value_Range
exp_range (TREE_TYPE (name
));
210 FOR_EACH_IMM_USE_FAST (use_p
, iter
, name
)
212 gimple
*use_stmt
= USE_STMT (use_p
);
213 if (is_gimple_debug (use_stmt
))
215 if (!m_ranger
.range_of_expr (exp_range
, name
, use_stmt
))
216 exp_range
.set_varying (TREE_TYPE (name
));
217 r
.union_ (exp_range
);
221 // Include the on-exit range to ensure non-dominated unreachables
222 // don't incorrectly impact the global range.
223 m_ranger
.range_on_entry (exp_range
, EXIT_BLOCK_PTR_FOR_FN (cfun
), name
);
224 r
.union_ (exp_range
);
225 if (r
.varying_p () || r
.undefined_p ())
227 if (!set_range_info (name
, r
))
232 fprintf (dump_file
, "Global Exported (via unreachable): ");
233 print_generic_expr (dump_file
, name
, TDF_SLIM
);
234 fprintf (dump_file
, " = ");
235 gimple_range_global (r
, name
);
237 fputc ('\n', dump_file
);
243 /* VR_TYPE describes a range with minimum value *MIN and maximum
244 value *MAX. Restrict the range to the set of values that have
245 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
246 return the new range type.
248 SGN gives the sign of the values described by the range. */
250 enum value_range_kind
251 intersect_range_with_nonzero_bits (enum value_range_kind vr_type
,
252 wide_int
*min
, wide_int
*max
,
253 const wide_int
&nonzero_bits
,
256 if (vr_type
== VR_ANTI_RANGE
)
258 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
259 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
260 to create an inclusive upper bound for A and an inclusive lower
262 wide_int a_max
= wi::round_down_for_mask (*min
- 1, nonzero_bits
);
263 wide_int b_min
= wi::round_up_for_mask (*max
+ 1, nonzero_bits
);
265 /* If the calculation of A_MAX wrapped, A is effectively empty
266 and A_MAX is the highest value that satisfies NONZERO_BITS.
267 Likewise if the calculation of B_MIN wrapped, B is effectively
268 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
269 bool a_empty
= wi::ge_p (a_max
, *min
, sgn
);
270 bool b_empty
= wi::le_p (b_min
, *max
, sgn
);
272 /* If both A and B are empty, there are no valid values. */
273 if (a_empty
&& b_empty
)
276 /* If exactly one of A or B is empty, return a VR_RANGE for the
278 if (a_empty
|| b_empty
)
282 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
286 /* Update the VR_ANTI_RANGE bounds. */
289 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
291 /* Now check whether the excluded range includes any values that
292 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
293 if (wi::round_up_for_mask (*min
, nonzero_bits
) == b_min
)
295 unsigned int precision
= min
->get_precision ();
296 *min
= wi::min_value (precision
, sgn
);
297 *max
= wi::max_value (precision
, sgn
);
301 if (vr_type
== VR_RANGE
|| vr_type
== VR_VARYING
)
303 *max
= wi::round_down_for_mask (*max
, nonzero_bits
);
305 /* Check that the range contains at least one valid value. */
306 if (wi::gt_p (*min
, *max
, sgn
))
309 *min
= wi::round_up_for_mask (*min
, nonzero_bits
);
310 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
315 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
316 otherwise. We only handle additive operations and set NEG to true if the
317 symbol is negated and INV to the invariant part, if any. */
320 get_single_symbol (tree t
, bool *neg
, tree
*inv
)
328 if (TREE_CODE (t
) == PLUS_EXPR
329 || TREE_CODE (t
) == POINTER_PLUS_EXPR
330 || TREE_CODE (t
) == MINUS_EXPR
)
332 if (is_gimple_min_invariant (TREE_OPERAND (t
, 0)))
334 neg_
= (TREE_CODE (t
) == MINUS_EXPR
);
335 inv_
= TREE_OPERAND (t
, 0);
336 t
= TREE_OPERAND (t
, 1);
338 else if (is_gimple_min_invariant (TREE_OPERAND (t
, 1)))
341 inv_
= TREE_OPERAND (t
, 1);
342 t
= TREE_OPERAND (t
, 0);
353 if (TREE_CODE (t
) == NEGATE_EXPR
)
355 t
= TREE_OPERAND (t
, 0);
359 if (TREE_CODE (t
) != SSA_NAME
)
362 if (inv_
&& TREE_OVERFLOW_P (inv_
))
363 inv_
= drop_tree_overflow (inv_
);
370 /* Compare two values VAL1 and VAL2. Return
372 -2 if VAL1 and VAL2 cannot be compared at compile-time,
375 +1 if VAL1 > VAL2, and
378 This is similar to tree_int_cst_compare but supports pointer values
379 and values that cannot be compared at compile time.
381 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
382 true if the return value is only valid if we assume that signed
383 overflow is undefined. */
386 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
391 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
393 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
394 == POINTER_TYPE_P (TREE_TYPE (val2
)));
396 /* Convert the two values into the same type. This is needed because
397 sizetype causes sign extension even for unsigned types. */
398 if (!useless_type_conversion_p (TREE_TYPE (val1
), TREE_TYPE (val2
)))
399 val2
= fold_convert (TREE_TYPE (val1
), val2
);
401 const bool overflow_undefined
402 = INTEGRAL_TYPE_P (TREE_TYPE (val1
))
403 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
));
406 tree sym1
= get_single_symbol (val1
, &neg1
, &inv1
);
407 tree sym2
= get_single_symbol (val2
, &neg2
, &inv2
);
409 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
410 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
413 /* Both values must use the same name with the same sign. */
414 if (sym1
!= sym2
|| neg1
!= neg2
)
417 /* [-]NAME + CST == [-]NAME + CST. */
421 /* If overflow is defined we cannot simplify more. */
422 if (!overflow_undefined
)
425 if (strict_overflow_p
!= NULL
426 /* Symbolic range building sets the no-warning bit to declare
427 that overflow doesn't happen. */
428 && (!inv1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
429 && (!inv2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
430 *strict_overflow_p
= true;
433 inv1
= build_int_cst (TREE_TYPE (val1
), 0);
435 inv2
= build_int_cst (TREE_TYPE (val2
), 0);
437 return wi::cmp (wi::to_wide (inv1
), wi::to_wide (inv2
),
438 TYPE_SIGN (TREE_TYPE (val1
)));
441 const bool cst1
= is_gimple_min_invariant (val1
);
442 const bool cst2
= is_gimple_min_invariant (val2
);
444 /* If one is of the form '[-]NAME + CST' and the other is constant, then
445 it might be possible to say something depending on the constants. */
446 if ((sym1
&& inv1
&& cst2
) || (sym2
&& inv2
&& cst1
))
448 if (!overflow_undefined
)
451 if (strict_overflow_p
!= NULL
452 /* Symbolic range building sets the no-warning bit to declare
453 that overflow doesn't happen. */
454 && (!sym1
|| !warning_suppressed_p (val1
, OPT_Woverflow
))
455 && (!sym2
|| !warning_suppressed_p (val2
, OPT_Woverflow
)))
456 *strict_overflow_p
= true;
458 const signop sgn
= TYPE_SIGN (TREE_TYPE (val1
));
459 tree cst
= cst1
? val1
: val2
;
460 tree inv
= cst1
? inv2
: inv1
;
462 /* Compute the difference between the constants. If it overflows or
463 underflows, this means that we can trivially compare the NAME with
464 it and, consequently, the two values with each other. */
465 wide_int diff
= wi::to_wide (cst
) - wi::to_wide (inv
);
466 if (wi::cmp (0, wi::to_wide (inv
), sgn
)
467 != wi::cmp (diff
, wi::to_wide (cst
), sgn
))
469 const int res
= wi::cmp (wi::to_wide (cst
), wi::to_wide (inv
), sgn
);
470 return cst1
? res
: -res
;
476 /* We cannot say anything more for non-constants. */
480 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
482 /* We cannot compare overflowed values. */
483 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
486 if (TREE_CODE (val1
) == INTEGER_CST
487 && TREE_CODE (val2
) == INTEGER_CST
)
488 return tree_int_cst_compare (val1
, val2
);
490 if (poly_int_tree_p (val1
) && poly_int_tree_p (val2
))
492 if (known_eq (wi::to_poly_widest (val1
),
493 wi::to_poly_widest (val2
)))
495 if (known_lt (wi::to_poly_widest (val1
),
496 wi::to_poly_widest (val2
)))
498 if (known_gt (wi::to_poly_widest (val1
),
499 wi::to_poly_widest (val2
)))
507 if (TREE_CODE (val1
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
509 /* We cannot compare overflowed values. */
510 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
513 return tree_int_cst_compare (val1
, val2
);
516 /* First see if VAL1 and VAL2 are not the same. */
517 if (operand_equal_p (val1
, val2
, 0))
520 fold_defer_overflow_warnings ();
522 /* If VAL1 is a lower address than VAL2, return -1. */
523 tree t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val1
, val2
);
524 if (t
&& integer_onep (t
))
526 fold_undefer_and_ignore_overflow_warnings ();
530 /* If VAL1 is a higher address than VAL2, return +1. */
531 t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val2
, val1
);
532 if (t
&& integer_onep (t
))
534 fold_undefer_and_ignore_overflow_warnings ();
538 /* If VAL1 is different than VAL2, return +2. */
539 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
540 fold_undefer_and_ignore_overflow_warnings ();
541 if (t
&& integer_onep (t
))
548 /* Compare values like compare_values_warnv. */
551 compare_values (tree val1
, tree val2
)
554 return compare_values_warnv (val1
, val2
, &sop
);
557 /* Helper for overflow_comparison_p
559 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
560 OP1's defining statement to see if it ultimately has the form
561 OP0 CODE (OP0 PLUS INTEGER_CST)
563 If so, return TRUE indicating this is an overflow test and store into
564 *NEW_CST an updated constant that can be used in a narrowed range test.
566 REVERSED indicates if the comparison was originally:
570 This affects how we build the updated constant. */
573 overflow_comparison_p_1 (enum tree_code code
, tree op0
, tree op1
,
574 bool reversed
, tree
*new_cst
)
576 /* See if this is a relational operation between two SSA_NAMES with
577 unsigned, overflow wrapping values. If so, check it more deeply. */
578 if ((code
== LT_EXPR
|| code
== LE_EXPR
579 || code
== GE_EXPR
|| code
== GT_EXPR
)
580 && TREE_CODE (op0
) == SSA_NAME
581 && TREE_CODE (op1
) == SSA_NAME
582 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
583 && TYPE_UNSIGNED (TREE_TYPE (op0
))
584 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0
)))
586 gimple
*op1_def
= SSA_NAME_DEF_STMT (op1
);
588 /* Now look at the defining statement of OP1 to see if it adds
589 or subtracts a nonzero constant from another operand. */
591 && is_gimple_assign (op1_def
)
592 && gimple_assign_rhs_code (op1_def
) == PLUS_EXPR
593 && TREE_CODE (gimple_assign_rhs2 (op1_def
)) == INTEGER_CST
594 && !integer_zerop (gimple_assign_rhs2 (op1_def
)))
596 tree target
= gimple_assign_rhs1 (op1_def
);
598 /* If we did not find our target SSA_NAME, then this is not
603 tree type
= TREE_TYPE (op0
);
604 wide_int max
= wi::max_value (TYPE_PRECISION (type
), UNSIGNED
);
605 tree inc
= gimple_assign_rhs2 (op1_def
);
607 *new_cst
= wide_int_to_tree (type
, max
+ wi::to_wide (inc
));
609 *new_cst
= wide_int_to_tree (type
, max
- wi::to_wide (inc
));
616 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
617 OP1's defining statement to see if it ultimately has the form
618 OP0 CODE (OP0 PLUS INTEGER_CST)
620 If so, return TRUE indicating this is an overflow test and store into
621 *NEW_CST an updated constant that can be used in a narrowed range test.
623 These statements are left as-is in the IL to facilitate discovery of
624 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
625 the alternate range representation is often useful within VRP. */
628 overflow_comparison_p (tree_code code
, tree name
, tree val
, tree
*new_cst
)
630 if (overflow_comparison_p_1 (code
, name
, val
, false, new_cst
))
632 return overflow_comparison_p_1 (swap_tree_comparison (code
), val
, name
,
643 __builtin_unreachable ();
646 If x_3 has no other immediate uses (checked by caller), var is the
647 x_3 var, we can clear low 5 bits from the non-zero bitmask. */
650 maybe_set_nonzero_bits (edge e
, tree var
)
652 basic_block cond_bb
= e
->src
;
653 gcond
*cond
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (cond_bb
));
657 || gimple_cond_code (cond
) != ((e
->flags
& EDGE_TRUE_VALUE
)
659 || TREE_CODE (gimple_cond_lhs (cond
)) != SSA_NAME
660 || !integer_zerop (gimple_cond_rhs (cond
)))
663 gimple
*stmt
= SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
));
664 if (!is_gimple_assign (stmt
)
665 || gimple_assign_rhs_code (stmt
) != BIT_AND_EXPR
666 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
)
668 if (gimple_assign_rhs1 (stmt
) != var
)
672 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
674 stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
675 if (!gimple_assign_cast_p (stmt2
)
676 || gimple_assign_rhs1 (stmt2
) != var
677 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2
))
678 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt
)))
679 != TYPE_PRECISION (TREE_TYPE (var
))))
682 cst
= gimple_assign_rhs2 (stmt
);
683 if (POINTER_TYPE_P (TREE_TYPE (var
)))
685 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (var
);
686 if (pi
&& pi
->misalign
)
688 wide_int w
= wi::bit_not (wi::to_wide (cst
));
689 unsigned int bits
= wi::ctz (w
);
690 if (bits
== 0 || bits
>= HOST_BITS_PER_INT
)
692 unsigned int align
= 1U << bits
;
693 if (pi
== NULL
|| pi
->align
< align
)
694 set_ptr_info_alignment (get_ptr_info (var
), align
, 0);
697 set_nonzero_bits (var
, wi::bit_and_not (get_nonzero_bits (var
),
701 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
702 that includes the value VAL. The search is restricted to the range
703 [START_IDX, n - 1] where n is the size of VEC.
705 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
708 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
709 it is placed in IDX and false is returned.
711 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
715 find_case_label_index (gswitch
*stmt
, size_t start_idx
, tree val
, size_t *idx
)
717 size_t n
= gimple_switch_num_labels (stmt
);
720 /* Find case label for minimum of the value range or the next one.
721 At each iteration we are searching in [low, high - 1]. */
723 for (low
= start_idx
, high
= n
; high
!= low
; )
727 /* Note that i != high, so we never ask for n. */
728 size_t i
= (high
+ low
) / 2;
729 t
= gimple_switch_label (stmt
, i
);
731 /* Cache the result of comparing CASE_LOW and val. */
732 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
736 /* Ranges cannot be empty. */
745 if (CASE_HIGH (t
) != NULL
746 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
758 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
759 for values between MIN and MAX. The first index is placed in MIN_IDX. The
760 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
761 then MAX_IDX < MIN_IDX.
762 Returns true if the default label is not needed. */
765 find_case_label_range (gswitch
*stmt
, tree min
, tree max
, size_t *min_idx
,
769 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
770 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
776 /* Only the default case label reached.
777 Return an empty range. */
784 bool take_default
= min_take_default
|| max_take_default
;
788 if (max_take_default
)
791 /* If the case label range is continuous, we do not need
792 the default case label. Verify that. */
793 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
794 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
795 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
796 for (k
= i
+ 1; k
<= j
; ++k
)
798 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
799 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
805 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
806 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
811 return !take_default
;
815 /* Given a SWITCH_STMT, return the case label that encompasses the
816 known possible values for the switch operand. RANGE_OF_OP is a
817 range for the known values of the switch operand. */
820 find_case_label_range (gswitch
*switch_stmt
, const irange
*range_of_op
)
822 if (range_of_op
->undefined_p ()
823 || range_of_op
->varying_p ())
827 tree op
= gimple_switch_index (switch_stmt
);
828 tree type
= TREE_TYPE (op
);
829 unsigned prec
= TYPE_PRECISION (type
);
830 signop sign
= TYPE_SIGN (type
);
831 tree tmin
= wide_int_to_tree (type
, range_of_op
->lower_bound ());
832 tree tmax
= wide_int_to_tree (type
, range_of_op
->upper_bound ());
833 find_case_label_range (switch_stmt
, tmin
, tmax
, &i
, &j
);
836 /* Look for exactly one label that encompasses the range of
838 tree label
= gimple_switch_label (switch_stmt
, i
);
840 = CASE_HIGH (label
) ? CASE_HIGH (label
) : CASE_LOW (label
);
841 wide_int wlow
= wi::to_wide (CASE_LOW (label
));
842 wide_int whigh
= wi::to_wide (case_high
);
843 int_range_max
label_range (type
,
844 wide_int::from (wlow
, prec
, sign
),
845 wide_int::from (whigh
, prec
, sign
));
846 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
847 range_cast (label_range
, range_of_op
->type ());
848 label_range
.intersect (*range_of_op
);
849 if (label_range
== *range_of_op
)
854 /* If there are no labels at all, take the default. */
855 return gimple_switch_label (switch_stmt
, 0);
859 /* Otherwise, there are various labels that can encompass
860 the range of operand. In which case, see if the range of
861 the operand is entirely *outside* the bounds of all the
862 (non-default) case labels. If so, take the default. */
863 unsigned n
= gimple_switch_num_labels (switch_stmt
);
864 tree min_label
= gimple_switch_label (switch_stmt
, 1);
865 tree max_label
= gimple_switch_label (switch_stmt
, n
- 1);
866 tree case_high
= CASE_HIGH (max_label
);
868 case_high
= CASE_LOW (max_label
);
869 int_range_max
label_range (TREE_TYPE (CASE_LOW (min_label
)),
870 wi::to_wide (CASE_LOW (min_label
)),
871 wi::to_wide (case_high
));
872 if (!types_compatible_p (label_range
.type (), range_of_op
->type ()))
873 range_cast (label_range
, range_of_op
->type ());
874 label_range
.intersect (*range_of_op
);
875 if (label_range
.undefined_p ())
876 return gimple_switch_label (switch_stmt
, 0);
887 // This is a ranger based folder which continues to use the dominator
888 // walk to access the substitute and fold machinery. Ranges are calculated
891 class rvrp_folder
: public substitute_and_fold_engine
895 rvrp_folder (gimple_ranger
*r
) : substitute_and_fold_engine (),
897 m_simplifier (r
, r
->non_executable_edge_flag
)
900 m_pta
= new pointer_equiv_analyzer (m_ranger
);
901 m_last_bb_stmt
= NULL
;
909 tree
value_of_expr (tree name
, gimple
*s
= NULL
) override
911 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
912 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
914 tree ret
= m_ranger
->value_of_expr (name
, s
);
915 if (!ret
&& supported_pointer_equiv_p (name
))
916 ret
= m_pta
->get_equiv (name
);
920 tree
value_on_edge (edge e
, tree name
) override
922 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
923 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
925 tree ret
= m_ranger
->value_on_edge (e
, name
);
926 if (!ret
&& supported_pointer_equiv_p (name
))
927 ret
= m_pta
->get_equiv (name
);
931 tree
value_of_stmt (gimple
*s
, tree name
= NULL
) override
933 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
934 if (TREE_CODE (name
) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
936 return m_ranger
->value_of_stmt (s
, name
);
939 void pre_fold_bb (basic_block bb
) override
942 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
944 m_ranger
->register_inferred_ranges (gsi
.phi ());
945 m_last_bb_stmt
= last_nondebug_stmt (bb
);
948 void post_fold_bb (basic_block bb
) override
951 if (cfun
->after_inlining
)
952 m_unreachable
.maybe_register_block (bb
);
955 void pre_fold_stmt (gimple
*stmt
) override
957 m_pta
->visit_stmt (stmt
);
958 // If this is the last stmt and there are inferred ranges, reparse the
959 // block for transitive inferred ranges that occur earlier in the block.
960 if (stmt
== m_last_bb_stmt
)
961 m_ranger
->register_transitive_inferred_ranges (gimple_bb (stmt
));
964 bool fold_stmt (gimple_stmt_iterator
*gsi
) override
966 bool ret
= m_simplifier
.simplify (gsi
);
968 ret
= m_ranger
->fold_stmt (gsi
, follow_single_use_edges
);
969 m_ranger
->register_inferred_ranges (gsi_stmt (*gsi
));
973 remove_unreachable m_unreachable
;
975 DISABLE_COPY_AND_ASSIGN (rvrp_folder
);
976 gimple_ranger
*m_ranger
;
977 simplify_using_ranges m_simplifier
;
978 pointer_equiv_analyzer
*m_pta
;
979 gimple
*m_last_bb_stmt
;
982 /* Main entry point for a VRP pass using just ranger. This can be called
983 from anywhere to perform a VRP pass, including from EVRP. */
986 execute_ranger_vrp (struct function
*fun
, bool warn_array_bounds_p
,
989 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
990 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
992 calculate_dominance_info (CDI_DOMINATORS
);
994 set_all_edges_as_executable (fun
);
995 gimple_ranger
*ranger
= enable_ranger (fun
, false);
996 rvrp_folder
folder (ranger
);
997 phi_analysis_initialize (ranger
->const_query ());
998 folder
.substitute_and_fold ();
999 // Remove tagged builtin-unreachable and maybe update globals.
1000 folder
.m_unreachable
.remove_and_update_globals (final_p
);
1001 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1003 phi_analysis ().dump (dump_file
);
1004 ranger
->dump (dump_file
);
1007 if ((warn_array_bounds
|| warn_strict_flex_arrays
) && warn_array_bounds_p
)
1009 // Set all edges as executable, except those ranger says aren't.
1010 int non_exec_flag
= ranger
->non_executable_edge_flag
;
1012 FOR_ALL_BB_FN (bb
, fun
)
1016 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1017 if (e
->flags
& non_exec_flag
)
1018 e
->flags
&= ~EDGE_EXECUTABLE
;
1020 e
->flags
|= EDGE_EXECUTABLE
;
1023 array_bounds_checker
array_checker (fun
, ranger
);
1024 array_checker
.check ();
1027 phi_analysis_finalize ();
1028 disable_ranger (fun
);
1030 loop_optimizer_finalize ();
1036 const pass_data pass_data_vrp
=
1038 GIMPLE_PASS
, /* type */
1040 OPTGROUP_NONE
, /* optinfo_flags */
1041 TV_TREE_VRP
, /* tv_id */
1042 PROP_ssa
, /* properties_required */
1043 0, /* properties_provided */
1044 0, /* properties_destroyed */
1045 0, /* todo_flags_start */
1046 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
1049 const pass_data pass_data_early_vrp
=
1051 GIMPLE_PASS
, /* type */
1053 OPTGROUP_NONE
, /* optinfo_flags */
1054 TV_TREE_EARLY_VRP
, /* tv_id */
1055 PROP_ssa
, /* properties_required */
1056 0, /* properties_provided */
1057 0, /* properties_destroyed */
1058 0, /* todo_flags_start */
1059 ( TODO_cleanup_cfg
| TODO_update_ssa
| TODO_verify_all
),
1062 static int vrp_pass_num
= 0;
1063 class pass_vrp
: public gimple_opt_pass
1066 pass_vrp (gcc::context
*ctxt
, const pass_data
&data_
)
1067 : gimple_opt_pass (data_
, ctxt
), data (data_
), warn_array_bounds_p (false),
1068 my_pass (vrp_pass_num
++)
1071 /* opt_pass methods: */
1072 opt_pass
* clone () final override
{ return new pass_vrp (m_ctxt
, data
); }
1073 void set_pass_param (unsigned int n
, bool param
) final override
1075 gcc_assert (n
== 0);
1076 warn_array_bounds_p
= param
;
1078 bool gate (function
*) final override
{ return flag_tree_vrp
!= 0; }
1079 unsigned int execute (function
*fun
) final override
1083 return execute_ranger_vrp (fun
, /*warn_array_bounds_p=*/false, false);
1085 return execute_ranger_vrp (fun
, warn_array_bounds_p
, my_pass
== 2);
1089 const pass_data
&data
;
1090 bool warn_array_bounds_p
;
1092 }; // class pass_vrp
1094 const pass_data pass_data_assumptions
=
1096 GIMPLE_PASS
, /* type */
1097 "assumptions", /* name */
1098 OPTGROUP_NONE
, /* optinfo_flags */
1099 TV_TREE_ASSUMPTIONS
, /* tv_id */
1100 PROP_ssa
, /* properties_required */
1101 PROP_assumptions_done
, /* properties_provided */
1102 0, /* properties_destroyed */
1103 0, /* todo_flags_start */
1104 0, /* todo_flags_end */
1107 class pass_assumptions
: public gimple_opt_pass
1110 pass_assumptions (gcc::context
*ctxt
)
1111 : gimple_opt_pass (pass_data_assumptions
, ctxt
)
1114 /* opt_pass methods: */
1115 bool gate (function
*fun
) final override
{ return fun
->assume_function
; }
1116 unsigned int execute (function
*) final override
1120 fprintf (dump_file
, "Assumptions :\n--------------\n");
1122 for (tree arg
= DECL_ARGUMENTS (cfun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
1124 tree name
= ssa_default_def (cfun
, arg
);
1125 if (!name
|| !gimple_range_ssa_p (name
))
1127 tree type
= TREE_TYPE (name
);
1128 if (!Value_Range::supports_type_p (type
))
1130 Value_Range
assume_range (type
);
1131 if (query
.assume_range_p (assume_range
, name
))
1133 // Set the global range of NAME to anything calculated.
1134 set_range_info (name
, assume_range
);
1137 print_generic_expr (dump_file
, name
, TDF_SLIM
);
1138 fprintf (dump_file
, " -> ");
1139 assume_range
.dump (dump_file
);
1140 fputc ('\n', dump_file
);
1146 fputc ('\n', dump_file
);
1147 gimple_dump_cfg (dump_file
, dump_flags
& ~TDF_DETAILS
);
1148 if (dump_flags
& TDF_DETAILS
)
1149 query
.dump (dump_file
);
1151 return TODO_discard_function
;
1154 }; // class pass_assumptions
1159 make_pass_vrp (gcc::context
*ctxt
)
1161 return new pass_vrp (ctxt
, pass_data_vrp
);
1165 make_pass_early_vrp (gcc::context
*ctxt
)
1167 return new pass_vrp (ctxt
, pass_data_early_vrp
);
1171 make_pass_assumptions (gcc::context
*ctx
)
1173 return new pass_assumptions (ctx
);