1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
40 #include "tree-scalar-evolution.h"
41 #include "tree-ssa-propagate.h"
42 #include "tree-chrec.h"
45 /* Type of value ranges. See value_range_d for a description of these
47 enum value_range_type
{ VR_UNDEFINED
, VR_RANGE
, VR_ANTI_RANGE
, VR_VARYING
};
49 /* Range of values that can be associated with an SSA_NAME after VRP
53 /* Lattice value represented by this range. */
54 enum value_range_type type
;
56 /* Minimum and maximum values represented by this range. These
57 values should be interpreted as follows:
59 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 - If TYPE == VR_RANGE then MIN holds the minimum value and
63 MAX holds the maximum value of the range [MIN, MAX].
65 - If TYPE == ANTI_RANGE the variable is known to NOT
66 take any values in the range [MIN, MAX]. */
70 /* Set of SSA names whose value ranges are equivalent to this one.
71 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
75 typedef struct value_range_d value_range_t
;
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
81 /* Return true if the SSA name NAME is live on the edge E. */
84 live_on_edge (edge e
, tree name
)
86 return (live
[e
->dest
->index
]
87 && TEST_BIT (live
[e
->dest
->index
], SSA_NAME_VERSION (name
)));
90 /* Local functions. */
91 static int compare_values (tree val1
, tree val2
);
92 static int compare_values_warnv (tree val1
, tree val2
, bool *);
93 static void vrp_meet (value_range_t
*, value_range_t
*);
94 static tree
vrp_evaluate_conditional_warnv_with_ops (enum tree_code
,
95 tree
, tree
, bool, bool *,
98 /* Location information for ASSERT_EXPRs. Each instance of this
99 structure describes an ASSERT_EXPR for an SSA name. Since a single
100 SSA name may have more than one assertion associated with it, these
101 locations are kept in a linked list attached to the corresponding
103 struct assert_locus_d
105 /* Basic block where the assertion would be inserted. */
108 /* Some assertions need to be inserted on an edge (e.g., assertions
109 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 /* Pointer to the statement that generated this assertion. */
113 gimple_stmt_iterator si
;
115 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
116 enum tree_code comp_code
;
118 /* Value being compared against. */
121 /* Expression to compare. */
124 /* Next node in the linked list. */
125 struct assert_locus_d
*next
;
128 typedef struct assert_locus_d
*assert_locus_t
;
130 /* If bit I is present, it means that SSA name N_i has a list of
131 assertions that should be inserted in the IL. */
132 static bitmap need_assert_for
;
134 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
135 holds a list of ASSERT_LOCUS_T nodes that describe where
136 ASSERT_EXPRs for SSA name N_I should be inserted. */
137 static assert_locus_t
*asserts_for
;
139 /* Value range array. After propagation, VR_VALUE[I] holds the range
140 of values that SSA name N_I may take. */
141 static value_range_t
**vr_value
;
143 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
144 number of executable edges we saw the last time we visited the
146 static int *vr_phi_edge_counts
;
153 static VEC (edge
, heap
) *to_remove_edges
;
154 DEF_VEC_O(switch_update
);
155 DEF_VEC_ALLOC_O(switch_update
, heap
);
156 static VEC (switch_update
, heap
) *to_update_switch_stmts
;
159 /* Return the maximum value for TYPE. */
162 vrp_val_max (const_tree type
)
164 if (!INTEGRAL_TYPE_P (type
))
167 return TYPE_MAX_VALUE (type
);
170 /* Return the minimum value for TYPE. */
173 vrp_val_min (const_tree type
)
175 if (!INTEGRAL_TYPE_P (type
))
178 return TYPE_MIN_VALUE (type
);
181 /* Return whether VAL is equal to the maximum value of its type. This
182 will be true for a positive overflow infinity. We can't do a
183 simple equality comparison with TYPE_MAX_VALUE because C typedefs
184 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
185 to the integer constant with the same value in the type. */
188 vrp_val_is_max (const_tree val
)
190 tree type_max
= vrp_val_max (TREE_TYPE (val
));
191 return (val
== type_max
192 || (type_max
!= NULL_TREE
193 && operand_equal_p (val
, type_max
, 0)));
196 /* Return whether VAL is equal to the minimum value of its type. This
197 will be true for a negative overflow infinity. */
200 vrp_val_is_min (const_tree val
)
202 tree type_min
= vrp_val_min (TREE_TYPE (val
));
203 return (val
== type_min
204 || (type_min
!= NULL_TREE
205 && operand_equal_p (val
, type_min
, 0)));
209 /* Return whether TYPE should use an overflow infinity distinct from
210 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
211 represent a signed overflow during VRP computations. An infinity
212 is distinct from a half-range, which will go from some number to
213 TYPE_{MIN,MAX}_VALUE. */
216 needs_overflow_infinity (const_tree type
)
218 return INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_WRAPS (type
);
221 /* Return whether TYPE can support our overflow infinity
222 representation: we use the TREE_OVERFLOW flag, which only exists
223 for constants. If TYPE doesn't support this, we don't optimize
224 cases which would require signed overflow--we drop them to
228 supports_overflow_infinity (const_tree type
)
230 tree min
= vrp_val_min (type
), max
= vrp_val_max (type
);
231 #ifdef ENABLE_CHECKING
232 gcc_assert (needs_overflow_infinity (type
));
234 return (min
!= NULL_TREE
235 && CONSTANT_CLASS_P (min
)
237 && CONSTANT_CLASS_P (max
));
240 /* VAL is the maximum or minimum value of a type. Return a
241 corresponding overflow infinity. */
244 make_overflow_infinity (tree val
)
246 gcc_checking_assert (val
!= NULL_TREE
&& CONSTANT_CLASS_P (val
));
247 val
= copy_node (val
);
248 TREE_OVERFLOW (val
) = 1;
252 /* Return a negative overflow infinity for TYPE. */
255 negative_overflow_infinity (tree type
)
257 gcc_checking_assert (supports_overflow_infinity (type
));
258 return make_overflow_infinity (vrp_val_min (type
));
261 /* Return a positive overflow infinity for TYPE. */
264 positive_overflow_infinity (tree type
)
266 gcc_checking_assert (supports_overflow_infinity (type
));
267 return make_overflow_infinity (vrp_val_max (type
));
270 /* Return whether VAL is a negative overflow infinity. */
273 is_negative_overflow_infinity (const_tree val
)
275 return (needs_overflow_infinity (TREE_TYPE (val
))
276 && CONSTANT_CLASS_P (val
)
277 && TREE_OVERFLOW (val
)
278 && vrp_val_is_min (val
));
281 /* Return whether VAL is a positive overflow infinity. */
284 is_positive_overflow_infinity (const_tree val
)
286 return (needs_overflow_infinity (TREE_TYPE (val
))
287 && CONSTANT_CLASS_P (val
)
288 && TREE_OVERFLOW (val
)
289 && vrp_val_is_max (val
));
292 /* Return whether VAL is a positive or negative overflow infinity. */
295 is_overflow_infinity (const_tree val
)
297 return (needs_overflow_infinity (TREE_TYPE (val
))
298 && CONSTANT_CLASS_P (val
)
299 && TREE_OVERFLOW (val
)
300 && (vrp_val_is_min (val
) || vrp_val_is_max (val
)));
303 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
306 stmt_overflow_infinity (gimple stmt
)
308 if (is_gimple_assign (stmt
)
309 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)) ==
311 return is_overflow_infinity (gimple_assign_rhs1 (stmt
));
315 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
316 the same value with TREE_OVERFLOW clear. This can be used to avoid
317 confusing a regular value with an overflow value. */
320 avoid_overflow_infinity (tree val
)
322 if (!is_overflow_infinity (val
))
325 if (vrp_val_is_max (val
))
326 return vrp_val_max (TREE_TYPE (val
));
329 gcc_checking_assert (vrp_val_is_min (val
));
330 return vrp_val_min (TREE_TYPE (val
));
335 /* Return true if ARG is marked with the nonnull attribute in the
336 current function signature. */
339 nonnull_arg_p (const_tree arg
)
341 tree t
, attrs
, fntype
;
342 unsigned HOST_WIDE_INT arg_num
;
344 gcc_assert (TREE_CODE (arg
) == PARM_DECL
&& POINTER_TYPE_P (TREE_TYPE (arg
)));
346 /* The static chain decl is always non null. */
347 if (arg
== cfun
->static_chain_decl
)
350 fntype
= TREE_TYPE (current_function_decl
);
351 attrs
= lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype
));
353 /* If "nonnull" wasn't specified, we know nothing about the argument. */
354 if (attrs
== NULL_TREE
)
357 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
358 if (TREE_VALUE (attrs
) == NULL_TREE
)
361 /* Get the position number for ARG in the function signature. */
362 for (arg_num
= 1, t
= DECL_ARGUMENTS (current_function_decl
);
364 t
= DECL_CHAIN (t
), arg_num
++)
370 gcc_assert (t
== arg
);
372 /* Now see if ARG_NUM is mentioned in the nonnull list. */
373 for (t
= TREE_VALUE (attrs
); t
; t
= TREE_CHAIN (t
))
375 if (compare_tree_int (TREE_VALUE (t
), arg_num
) == 0)
383 /* Set value range VR to VR_VARYING. */
386 set_value_range_to_varying (value_range_t
*vr
)
388 vr
->type
= VR_VARYING
;
389 vr
->min
= vr
->max
= NULL_TREE
;
391 bitmap_clear (vr
->equiv
);
395 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
398 set_value_range (value_range_t
*vr
, enum value_range_type t
, tree min
,
399 tree max
, bitmap equiv
)
401 #if defined ENABLE_CHECKING
402 /* Check the validity of the range. */
403 if (t
== VR_RANGE
|| t
== VR_ANTI_RANGE
)
407 gcc_assert (min
&& max
);
409 if (INTEGRAL_TYPE_P (TREE_TYPE (min
)) && t
== VR_ANTI_RANGE
)
410 gcc_assert (!vrp_val_is_min (min
) || !vrp_val_is_max (max
));
412 cmp
= compare_values (min
, max
);
413 gcc_assert (cmp
== 0 || cmp
== -1 || cmp
== -2);
415 if (needs_overflow_infinity (TREE_TYPE (min
)))
416 gcc_assert (!is_overflow_infinity (min
)
417 || !is_overflow_infinity (max
));
420 if (t
== VR_UNDEFINED
|| t
== VR_VARYING
)
421 gcc_assert (min
== NULL_TREE
&& max
== NULL_TREE
);
423 if (t
== VR_UNDEFINED
|| t
== VR_VARYING
)
424 gcc_assert (equiv
== NULL
|| bitmap_empty_p (equiv
));
431 /* Since updating the equivalence set involves deep copying the
432 bitmaps, only do it if absolutely necessary. */
433 if (vr
->equiv
== NULL
435 vr
->equiv
= BITMAP_ALLOC (NULL
);
437 if (equiv
!= vr
->equiv
)
439 if (equiv
&& !bitmap_empty_p (equiv
))
440 bitmap_copy (vr
->equiv
, equiv
);
442 bitmap_clear (vr
->equiv
);
447 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
448 This means adjusting T, MIN and MAX representing the case of a
449 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
450 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
451 In corner cases where MAX+1 or MIN-1 wraps this will fall back
453 This routine exists to ease canonicalization in the case where we
454 extract ranges from var + CST op limit. */
457 set_and_canonicalize_value_range (value_range_t
*vr
, enum value_range_type t
,
458 tree min
, tree max
, bitmap equiv
)
460 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
462 && t
!= VR_ANTI_RANGE
)
463 || TREE_CODE (min
) != INTEGER_CST
464 || TREE_CODE (max
) != INTEGER_CST
)
466 set_value_range (vr
, t
, min
, max
, equiv
);
470 /* Wrong order for min and max, to swap them and the VR type we need
472 if (tree_int_cst_lt (max
, min
))
474 tree one
= build_int_cst (TREE_TYPE (min
), 1);
475 tree tmp
= int_const_binop (PLUS_EXPR
, max
, one
, 0);
476 max
= int_const_binop (MINUS_EXPR
, min
, one
, 0);
479 /* There's one corner case, if we had [C+1, C] before we now have
480 that again. But this represents an empty value range, so drop
481 to varying in this case. */
482 if (tree_int_cst_lt (max
, min
))
484 set_value_range_to_varying (vr
);
488 t
= t
== VR_RANGE
? VR_ANTI_RANGE
: VR_RANGE
;
491 /* Anti-ranges that can be represented as ranges should be so. */
492 if (t
== VR_ANTI_RANGE
)
494 bool is_min
= vrp_val_is_min (min
);
495 bool is_max
= vrp_val_is_max (max
);
497 if (is_min
&& is_max
)
499 /* We cannot deal with empty ranges, drop to varying. */
500 set_value_range_to_varying (vr
);
504 /* As a special exception preserve non-null ranges. */
505 && !(TYPE_UNSIGNED (TREE_TYPE (min
))
506 && integer_zerop (max
)))
508 tree one
= build_int_cst (TREE_TYPE (max
), 1);
509 min
= int_const_binop (PLUS_EXPR
, max
, one
, 0);
510 max
= vrp_val_max (TREE_TYPE (max
));
515 tree one
= build_int_cst (TREE_TYPE (min
), 1);
516 max
= int_const_binop (MINUS_EXPR
, min
, one
, 0);
517 min
= vrp_val_min (TREE_TYPE (min
));
522 set_value_range (vr
, t
, min
, max
, equiv
);
525 /* Copy value range FROM into value range TO. */
528 copy_value_range (value_range_t
*to
, value_range_t
*from
)
530 set_value_range (to
, from
->type
, from
->min
, from
->max
, from
->equiv
);
533 /* Set value range VR to a single value. This function is only called
534 with values we get from statements, and exists to clear the
535 TREE_OVERFLOW flag so that we don't think we have an overflow
536 infinity when we shouldn't. */
539 set_value_range_to_value (value_range_t
*vr
, tree val
, bitmap equiv
)
541 gcc_assert (is_gimple_min_invariant (val
));
542 val
= avoid_overflow_infinity (val
);
543 set_value_range (vr
, VR_RANGE
, val
, val
, equiv
);
546 /* Set value range VR to a non-negative range of type TYPE.
547 OVERFLOW_INFINITY indicates whether to use an overflow infinity
548 rather than TYPE_MAX_VALUE; this should be true if we determine
549 that the range is nonnegative based on the assumption that signed
550 overflow does not occur. */
553 set_value_range_to_nonnegative (value_range_t
*vr
, tree type
,
554 bool overflow_infinity
)
558 if (overflow_infinity
&& !supports_overflow_infinity (type
))
560 set_value_range_to_varying (vr
);
564 zero
= build_int_cst (type
, 0);
565 set_value_range (vr
, VR_RANGE
, zero
,
567 ? positive_overflow_infinity (type
)
568 : TYPE_MAX_VALUE (type
)),
572 /* Set value range VR to a non-NULL range of type TYPE. */
575 set_value_range_to_nonnull (value_range_t
*vr
, tree type
)
577 tree zero
= build_int_cst (type
, 0);
578 set_value_range (vr
, VR_ANTI_RANGE
, zero
, zero
, vr
->equiv
);
582 /* Set value range VR to a NULL range of type TYPE. */
585 set_value_range_to_null (value_range_t
*vr
, tree type
)
587 set_value_range_to_value (vr
, build_int_cst (type
, 0), vr
->equiv
);
591 /* Set value range VR to a range of a truthvalue of type TYPE. */
594 set_value_range_to_truthvalue (value_range_t
*vr
, tree type
)
596 if (TYPE_PRECISION (type
) == 1)
597 set_value_range_to_varying (vr
);
599 set_value_range (vr
, VR_RANGE
,
600 build_int_cst (type
, 0), build_int_cst (type
, 1),
605 /* Set value range VR to VR_UNDEFINED. */
608 set_value_range_to_undefined (value_range_t
*vr
)
610 vr
->type
= VR_UNDEFINED
;
611 vr
->min
= vr
->max
= NULL_TREE
;
613 bitmap_clear (vr
->equiv
);
617 /* If abs (min) < abs (max), set VR to [-max, max], if
618 abs (min) >= abs (max), set VR to [-min, min]. */
621 abs_extent_range (value_range_t
*vr
, tree min
, tree max
)
625 gcc_assert (TREE_CODE (min
) == INTEGER_CST
);
626 gcc_assert (TREE_CODE (max
) == INTEGER_CST
);
627 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min
)));
628 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min
)));
629 min
= fold_unary (ABS_EXPR
, TREE_TYPE (min
), min
);
630 max
= fold_unary (ABS_EXPR
, TREE_TYPE (max
), max
);
631 if (TREE_OVERFLOW (min
) || TREE_OVERFLOW (max
))
633 set_value_range_to_varying (vr
);
636 cmp
= compare_values (min
, max
);
638 min
= fold_unary (NEGATE_EXPR
, TREE_TYPE (min
), max
);
639 else if (cmp
== 0 || cmp
== 1)
642 min
= fold_unary (NEGATE_EXPR
, TREE_TYPE (min
), min
);
646 set_value_range_to_varying (vr
);
649 set_and_canonicalize_value_range (vr
, VR_RANGE
, min
, max
, NULL
);
653 /* Return value range information for VAR.
655 If we have no values ranges recorded (ie, VRP is not running), then
656 return NULL. Otherwise create an empty range if none existed for VAR. */
658 static value_range_t
*
659 get_value_range (const_tree var
)
663 unsigned ver
= SSA_NAME_VERSION (var
);
665 /* If we have no recorded ranges, then return NULL. */
673 /* Create a default value range. */
674 vr_value
[ver
] = vr
= XCNEW (value_range_t
);
676 /* Defer allocating the equivalence set. */
679 /* If VAR is a default definition, the variable can take any value
681 sym
= SSA_NAME_VAR (var
);
682 if (SSA_NAME_IS_DEFAULT_DEF (var
))
684 /* Try to use the "nonnull" attribute to create ~[0, 0]
685 anti-ranges for pointers. Note that this is only valid with
686 default definitions of PARM_DECLs. */
687 if (TREE_CODE (sym
) == PARM_DECL
688 && POINTER_TYPE_P (TREE_TYPE (sym
))
689 && nonnull_arg_p (sym
))
690 set_value_range_to_nonnull (vr
, TREE_TYPE (sym
));
692 set_value_range_to_varying (vr
);
698 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
701 vrp_operand_equal_p (const_tree val1
, const_tree val2
)
705 if (!val1
|| !val2
|| !operand_equal_p (val1
, val2
, 0))
707 if (is_overflow_infinity (val1
))
708 return is_overflow_infinity (val2
);
712 /* Return true, if the bitmaps B1 and B2 are equal. */
715 vrp_bitmap_equal_p (const_bitmap b1
, const_bitmap b2
)
719 && bitmap_equal_p (b1
, b2
)));
722 /* Update the value range and equivalence set for variable VAR to
723 NEW_VR. Return true if NEW_VR is different from VAR's previous
726 NOTE: This function assumes that NEW_VR is a temporary value range
727 object created for the sole purpose of updating VAR's range. The
728 storage used by the equivalence set from NEW_VR will be freed by
729 this function. Do not call update_value_range when NEW_VR
730 is the range object associated with another SSA name. */
733 update_value_range (const_tree var
, value_range_t
*new_vr
)
735 value_range_t
*old_vr
;
738 /* Update the value range, if necessary. */
739 old_vr
= get_value_range (var
);
740 is_new
= old_vr
->type
!= new_vr
->type
741 || !vrp_operand_equal_p (old_vr
->min
, new_vr
->min
)
742 || !vrp_operand_equal_p (old_vr
->max
, new_vr
->max
)
743 || !vrp_bitmap_equal_p (old_vr
->equiv
, new_vr
->equiv
);
746 set_value_range (old_vr
, new_vr
->type
, new_vr
->min
, new_vr
->max
,
749 BITMAP_FREE (new_vr
->equiv
);
755 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
756 point where equivalence processing can be turned on/off. */
759 add_equivalence (bitmap
*equiv
, const_tree var
)
761 unsigned ver
= SSA_NAME_VERSION (var
);
762 value_range_t
*vr
= vr_value
[ver
];
765 *equiv
= BITMAP_ALLOC (NULL
);
766 bitmap_set_bit (*equiv
, ver
);
768 bitmap_ior_into (*equiv
, vr
->equiv
);
772 /* Return true if VR is ~[0, 0]. */
775 range_is_nonnull (value_range_t
*vr
)
777 return vr
->type
== VR_ANTI_RANGE
778 && integer_zerop (vr
->min
)
779 && integer_zerop (vr
->max
);
783 /* Return true if VR is [0, 0]. */
786 range_is_null (value_range_t
*vr
)
788 return vr
->type
== VR_RANGE
789 && integer_zerop (vr
->min
)
790 && integer_zerop (vr
->max
);
793 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
797 range_int_cst_p (value_range_t
*vr
)
799 return (vr
->type
== VR_RANGE
800 && TREE_CODE (vr
->max
) == INTEGER_CST
801 && TREE_CODE (vr
->min
) == INTEGER_CST
802 && !TREE_OVERFLOW (vr
->max
)
803 && !TREE_OVERFLOW (vr
->min
));
806 /* Return true if VR is a INTEGER_CST singleton. */
809 range_int_cst_singleton_p (value_range_t
*vr
)
811 return (range_int_cst_p (vr
)
812 && tree_int_cst_equal (vr
->min
, vr
->max
));
815 /* Return true if value range VR involves at least one symbol. */
818 symbolic_range_p (value_range_t
*vr
)
820 return (!is_gimple_min_invariant (vr
->min
)
821 || !is_gimple_min_invariant (vr
->max
));
824 /* Return true if value range VR uses an overflow infinity. */
827 overflow_infinity_range_p (value_range_t
*vr
)
829 return (vr
->type
== VR_RANGE
830 && (is_overflow_infinity (vr
->min
)
831 || is_overflow_infinity (vr
->max
)));
834 /* Return false if we can not make a valid comparison based on VR;
835 this will be the case if it uses an overflow infinity and overflow
836 is not undefined (i.e., -fno-strict-overflow is in effect).
837 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
838 uses an overflow infinity. */
841 usable_range_p (value_range_t
*vr
, bool *strict_overflow_p
)
843 gcc_assert (vr
->type
== VR_RANGE
);
844 if (is_overflow_infinity (vr
->min
))
846 *strict_overflow_p
= true;
847 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr
->min
)))
850 if (is_overflow_infinity (vr
->max
))
852 *strict_overflow_p
= true;
853 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr
->max
)))
860 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
861 ranges obtained so far. */
864 vrp_expr_computes_nonnegative (tree expr
, bool *strict_overflow_p
)
866 return (tree_expr_nonnegative_warnv_p (expr
, strict_overflow_p
)
867 || (TREE_CODE (expr
) == SSA_NAME
868 && ssa_name_nonnegative_p (expr
)));
871 /* Return true if the result of assignment STMT is know to be non-negative.
872 If the return value is based on the assumption that signed overflow is
873 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
874 *STRICT_OVERFLOW_P.*/
877 gimple_assign_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
879 enum tree_code code
= gimple_assign_rhs_code (stmt
);
880 switch (get_gimple_rhs_class (code
))
882 case GIMPLE_UNARY_RHS
:
883 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt
),
884 gimple_expr_type (stmt
),
885 gimple_assign_rhs1 (stmt
),
887 case GIMPLE_BINARY_RHS
:
888 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt
),
889 gimple_expr_type (stmt
),
890 gimple_assign_rhs1 (stmt
),
891 gimple_assign_rhs2 (stmt
),
893 case GIMPLE_TERNARY_RHS
:
895 case GIMPLE_SINGLE_RHS
:
896 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt
),
898 case GIMPLE_INVALID_RHS
:
905 /* Return true if return value of call STMT is know to be non-negative.
906 If the return value is based on the assumption that signed overflow is
907 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
908 *STRICT_OVERFLOW_P.*/
911 gimple_call_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
913 tree arg0
= gimple_call_num_args (stmt
) > 0 ?
914 gimple_call_arg (stmt
, 0) : NULL_TREE
;
915 tree arg1
= gimple_call_num_args (stmt
) > 1 ?
916 gimple_call_arg (stmt
, 1) : NULL_TREE
;
918 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt
),
919 gimple_call_fndecl (stmt
),
925 /* Return true if STMT is know to to compute a non-negative value.
926 If the return value is based on the assumption that signed overflow is
927 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
928 *STRICT_OVERFLOW_P.*/
931 gimple_stmt_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
933 switch (gimple_code (stmt
))
936 return gimple_assign_nonnegative_warnv_p (stmt
, strict_overflow_p
);
938 return gimple_call_nonnegative_warnv_p (stmt
, strict_overflow_p
);
944 /* Return true if the result of assignment STMT is know to be non-zero.
945 If the return value is based on the assumption that signed overflow is
946 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
947 *STRICT_OVERFLOW_P.*/
950 gimple_assign_nonzero_warnv_p (gimple stmt
, bool *strict_overflow_p
)
952 enum tree_code code
= gimple_assign_rhs_code (stmt
);
953 switch (get_gimple_rhs_class (code
))
955 case GIMPLE_UNARY_RHS
:
956 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt
),
957 gimple_expr_type (stmt
),
958 gimple_assign_rhs1 (stmt
),
960 case GIMPLE_BINARY_RHS
:
961 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt
),
962 gimple_expr_type (stmt
),
963 gimple_assign_rhs1 (stmt
),
964 gimple_assign_rhs2 (stmt
),
966 case GIMPLE_TERNARY_RHS
:
968 case GIMPLE_SINGLE_RHS
:
969 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt
),
971 case GIMPLE_INVALID_RHS
:
978 /* Return true if STMT is know to to compute a non-zero value.
979 If the return value is based on the assumption that signed overflow is
980 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
981 *STRICT_OVERFLOW_P.*/
984 gimple_stmt_nonzero_warnv_p (gimple stmt
, bool *strict_overflow_p
)
986 switch (gimple_code (stmt
))
989 return gimple_assign_nonzero_warnv_p (stmt
, strict_overflow_p
);
991 return gimple_alloca_call_p (stmt
);
997 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1001 vrp_stmt_computes_nonzero (gimple stmt
, bool *strict_overflow_p
)
1003 if (gimple_stmt_nonzero_warnv_p (stmt
, strict_overflow_p
))
1006 /* If we have an expression of the form &X->a, then the expression
1007 is nonnull if X is nonnull. */
1008 if (is_gimple_assign (stmt
)
1009 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
)
1011 tree expr
= gimple_assign_rhs1 (stmt
);
1012 tree base
= get_base_address (TREE_OPERAND (expr
, 0));
1014 if (base
!= NULL_TREE
1015 && TREE_CODE (base
) == MEM_REF
1016 && TREE_CODE (TREE_OPERAND (base
, 0)) == SSA_NAME
)
1018 value_range_t
*vr
= get_value_range (TREE_OPERAND (base
, 0));
1019 if (range_is_nonnull (vr
))
1027 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1028 a gimple invariant, or SSA_NAME +- CST. */
1031 valid_value_p (tree expr
)
1033 if (TREE_CODE (expr
) == SSA_NAME
)
1036 if (TREE_CODE (expr
) == PLUS_EXPR
1037 || TREE_CODE (expr
) == MINUS_EXPR
)
1038 return (TREE_CODE (TREE_OPERAND (expr
, 0)) == SSA_NAME
1039 && TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
);
1041 return is_gimple_min_invariant (expr
);
1047 -2 if those are incomparable. */
1049 operand_less_p (tree val
, tree val2
)
1051 /* LT is folded faster than GE and others. Inline the common case. */
1052 if (TREE_CODE (val
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
1054 if (TYPE_UNSIGNED (TREE_TYPE (val
)))
1055 return INT_CST_LT_UNSIGNED (val
, val2
);
1058 if (INT_CST_LT (val
, val2
))
1066 fold_defer_overflow_warnings ();
1068 tcmp
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val
, val2
);
1070 fold_undefer_and_ignore_overflow_warnings ();
1073 || TREE_CODE (tcmp
) != INTEGER_CST
)
1076 if (!integer_zerop (tcmp
))
1080 /* val >= val2, not considering overflow infinity. */
1081 if (is_negative_overflow_infinity (val
))
1082 return is_negative_overflow_infinity (val2
) ? 0 : 1;
1083 else if (is_positive_overflow_infinity (val2
))
1084 return is_positive_overflow_infinity (val
) ? 0 : 1;
1089 /* Compare two values VAL1 and VAL2. Return
1091 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1094 +1 if VAL1 > VAL2, and
1097 This is similar to tree_int_cst_compare but supports pointer values
1098 and values that cannot be compared at compile time.
1100 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1101 true if the return value is only valid if we assume that signed
1102 overflow is undefined. */
1105 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
1110 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1112 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
1113 == POINTER_TYPE_P (TREE_TYPE (val2
)));
1114 /* Convert the two values into the same type. This is needed because
1115 sizetype causes sign extension even for unsigned types. */
1116 val2
= fold_convert (TREE_TYPE (val1
), val2
);
1117 STRIP_USELESS_TYPE_CONVERSION (val2
);
1119 if ((TREE_CODE (val1
) == SSA_NAME
1120 || TREE_CODE (val1
) == PLUS_EXPR
1121 || TREE_CODE (val1
) == MINUS_EXPR
)
1122 && (TREE_CODE (val2
) == SSA_NAME
1123 || TREE_CODE (val2
) == PLUS_EXPR
1124 || TREE_CODE (val2
) == MINUS_EXPR
))
1126 tree n1
, c1
, n2
, c2
;
1127 enum tree_code code1
, code2
;
1129 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1130 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1131 same name, return -2. */
1132 if (TREE_CODE (val1
) == SSA_NAME
)
1140 code1
= TREE_CODE (val1
);
1141 n1
= TREE_OPERAND (val1
, 0);
1142 c1
= TREE_OPERAND (val1
, 1);
1143 if (tree_int_cst_sgn (c1
) == -1)
1145 if (is_negative_overflow_infinity (c1
))
1147 c1
= fold_unary_to_constant (NEGATE_EXPR
, TREE_TYPE (c1
), c1
);
1150 code1
= code1
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
;
1154 if (TREE_CODE (val2
) == SSA_NAME
)
1162 code2
= TREE_CODE (val2
);
1163 n2
= TREE_OPERAND (val2
, 0);
1164 c2
= TREE_OPERAND (val2
, 1);
1165 if (tree_int_cst_sgn (c2
) == -1)
1167 if (is_negative_overflow_infinity (c2
))
1169 c2
= fold_unary_to_constant (NEGATE_EXPR
, TREE_TYPE (c2
), c2
);
1172 code2
= code2
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
;
1176 /* Both values must use the same name. */
1180 if (code1
== SSA_NAME
1181 && code2
== SSA_NAME
)
1185 /* If overflow is defined we cannot simplify more. */
1186 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
)))
1189 if (strict_overflow_p
!= NULL
1190 && (code1
== SSA_NAME
|| !TREE_NO_WARNING (val1
))
1191 && (code2
== SSA_NAME
|| !TREE_NO_WARNING (val2
)))
1192 *strict_overflow_p
= true;
1194 if (code1
== SSA_NAME
)
1196 if (code2
== PLUS_EXPR
)
1197 /* NAME < NAME + CST */
1199 else if (code2
== MINUS_EXPR
)
1200 /* NAME > NAME - CST */
1203 else if (code1
== PLUS_EXPR
)
1205 if (code2
== SSA_NAME
)
1206 /* NAME + CST > NAME */
1208 else if (code2
== PLUS_EXPR
)
1209 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1210 return compare_values_warnv (c1
, c2
, strict_overflow_p
);
1211 else if (code2
== MINUS_EXPR
)
1212 /* NAME + CST1 > NAME - CST2 */
1215 else if (code1
== MINUS_EXPR
)
1217 if (code2
== SSA_NAME
)
1218 /* NAME - CST < NAME */
1220 else if (code2
== PLUS_EXPR
)
1221 /* NAME - CST1 < NAME + CST2 */
1223 else if (code2
== MINUS_EXPR
)
1224 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1225 C1 and C2 are swapped in the call to compare_values. */
1226 return compare_values_warnv (c2
, c1
, strict_overflow_p
);
1232 /* We cannot compare non-constants. */
1233 if (!is_gimple_min_invariant (val1
) || !is_gimple_min_invariant (val2
))
1236 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
1238 /* We cannot compare overflowed values, except for overflow
1240 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
1242 if (strict_overflow_p
!= NULL
)
1243 *strict_overflow_p
= true;
1244 if (is_negative_overflow_infinity (val1
))
1245 return is_negative_overflow_infinity (val2
) ? 0 : -1;
1246 else if (is_negative_overflow_infinity (val2
))
1248 else if (is_positive_overflow_infinity (val1
))
1249 return is_positive_overflow_infinity (val2
) ? 0 : 1;
1250 else if (is_positive_overflow_infinity (val2
))
1255 return tree_int_cst_compare (val1
, val2
);
1261 /* First see if VAL1 and VAL2 are not the same. */
1262 if (val1
== val2
|| operand_equal_p (val1
, val2
, 0))
1265 /* If VAL1 is a lower address than VAL2, return -1. */
1266 if (operand_less_p (val1
, val2
) == 1)
1269 /* If VAL1 is a higher address than VAL2, return +1. */
1270 if (operand_less_p (val2
, val1
) == 1)
1273 /* If VAL1 is different than VAL2, return +2.
1274 For integer constants we either have already returned -1 or 1
1275 or they are equivalent. We still might succeed in proving
1276 something about non-trivial operands. */
1277 if (TREE_CODE (val1
) != INTEGER_CST
1278 || TREE_CODE (val2
) != INTEGER_CST
)
1280 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
1281 if (t
&& integer_onep (t
))
1289 /* Compare values like compare_values_warnv, but treat comparisons of
1290 nonconstants which rely on undefined overflow as incomparable. */
1293 compare_values (tree val1
, tree val2
)
1299 ret
= compare_values_warnv (val1
, val2
, &sop
);
1301 && (!is_gimple_min_invariant (val1
) || !is_gimple_min_invariant (val2
)))
1307 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1308 0 if VAL is not inside VR,
1309 -2 if we cannot tell either way.
1311 FIXME, the current semantics of this functions are a bit quirky
1312 when taken in the context of VRP. In here we do not care
1313 about VR's type. If VR is the anti-range ~[3, 5] the call
1314 value_inside_range (4, VR) will return 1.
1316 This is counter-intuitive in a strict sense, but the callers
1317 currently expect this. They are calling the function
1318 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1319 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1322 This also applies to value_ranges_intersect_p and
1323 range_includes_zero_p. The semantics of VR_RANGE and
1324 VR_ANTI_RANGE should be encoded here, but that also means
1325 adapting the users of these functions to the new semantics.
1327 Benchmark compile/20001226-1.c compilation time after changing this
1331 value_inside_range (tree val
, value_range_t
* vr
)
1335 cmp1
= operand_less_p (val
, vr
->min
);
1341 cmp2
= operand_less_p (vr
->max
, val
);
1349 /* Return true if value ranges VR0 and VR1 have a non-empty
1352 Benchmark compile/20001226-1.c compilation time after changing this
1357 value_ranges_intersect_p (value_range_t
*vr0
, value_range_t
*vr1
)
1359 /* The value ranges do not intersect if the maximum of the first range is
1360 less than the minimum of the second range or vice versa.
1361 When those relations are unknown, we can't do any better. */
1362 if (operand_less_p (vr0
->max
, vr1
->min
) != 0)
1364 if (operand_less_p (vr1
->max
, vr0
->min
) != 0)
1370 /* Return true if VR includes the value zero, false otherwise. FIXME,
1371 currently this will return false for an anti-range like ~[-4, 3].
1372 This will be wrong when the semantics of value_inside_range are
1373 modified (currently the users of this function expect these
1377 range_includes_zero_p (value_range_t
*vr
)
1381 gcc_assert (vr
->type
!= VR_UNDEFINED
1382 && vr
->type
!= VR_VARYING
1383 && !symbolic_range_p (vr
));
1385 zero
= build_int_cst (TREE_TYPE (vr
->min
), 0);
1386 return (value_inside_range (zero
, vr
) == 1);
1389 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1390 false otherwise or if no value range information is available. */
1393 ssa_name_nonnegative_p (const_tree t
)
1395 value_range_t
*vr
= get_value_range (t
);
1397 if (INTEGRAL_TYPE_P (t
)
1398 && TYPE_UNSIGNED (t
))
1404 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1405 which would return a useful value should be encoded as a VR_RANGE. */
1406 if (vr
->type
== VR_RANGE
)
1408 int result
= compare_values (vr
->min
, integer_zero_node
);
1410 return (result
== 0 || result
== 1);
1415 /* If OP has a value range with a single constant value return that,
1416 otherwise return NULL_TREE. This returns OP itself if OP is a
1420 op_with_constant_singleton_value_range (tree op
)
1424 if (is_gimple_min_invariant (op
))
1427 if (TREE_CODE (op
) != SSA_NAME
)
1430 vr
= get_value_range (op
);
1431 if (vr
->type
== VR_RANGE
1432 && operand_equal_p (vr
->min
, vr
->max
, 0)
1433 && is_gimple_min_invariant (vr
->min
))
1440 /* Extract value range information from an ASSERT_EXPR EXPR and store
1444 extract_range_from_assert (value_range_t
*vr_p
, tree expr
)
1446 tree var
, cond
, limit
, min
, max
, type
;
1447 value_range_t
*var_vr
, *limit_vr
;
1448 enum tree_code cond_code
;
1450 var
= ASSERT_EXPR_VAR (expr
);
1451 cond
= ASSERT_EXPR_COND (expr
);
1453 gcc_assert (COMPARISON_CLASS_P (cond
));
1455 /* Find VAR in the ASSERT_EXPR conditional. */
1456 if (var
== TREE_OPERAND (cond
, 0)
1457 || TREE_CODE (TREE_OPERAND (cond
, 0)) == PLUS_EXPR
1458 || TREE_CODE (TREE_OPERAND (cond
, 0)) == NOP_EXPR
)
1460 /* If the predicate is of the form VAR COMP LIMIT, then we just
1461 take LIMIT from the RHS and use the same comparison code. */
1462 cond_code
= TREE_CODE (cond
);
1463 limit
= TREE_OPERAND (cond
, 1);
1464 cond
= TREE_OPERAND (cond
, 0);
1468 /* If the predicate is of the form LIMIT COMP VAR, then we need
1469 to flip around the comparison code to create the proper range
1471 cond_code
= swap_tree_comparison (TREE_CODE (cond
));
1472 limit
= TREE_OPERAND (cond
, 0);
1473 cond
= TREE_OPERAND (cond
, 1);
1476 limit
= avoid_overflow_infinity (limit
);
1478 type
= TREE_TYPE (limit
);
1479 gcc_assert (limit
!= var
);
1481 /* For pointer arithmetic, we only keep track of pointer equality
1483 if (POINTER_TYPE_P (type
) && cond_code
!= NE_EXPR
&& cond_code
!= EQ_EXPR
)
1485 set_value_range_to_varying (vr_p
);
1489 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1490 try to use LIMIT's range to avoid creating symbolic ranges
1492 limit_vr
= (TREE_CODE (limit
) == SSA_NAME
) ? get_value_range (limit
) : NULL
;
1494 /* LIMIT's range is only interesting if it has any useful information. */
1496 && (limit_vr
->type
== VR_UNDEFINED
1497 || limit_vr
->type
== VR_VARYING
1498 || symbolic_range_p (limit_vr
)))
1501 /* Initially, the new range has the same set of equivalences of
1502 VAR's range. This will be revised before returning the final
1503 value. Since assertions may be chained via mutually exclusive
1504 predicates, we will need to trim the set of equivalences before
1506 gcc_assert (vr_p
->equiv
== NULL
);
1507 add_equivalence (&vr_p
->equiv
, var
);
1509 /* Extract a new range based on the asserted comparison for VAR and
1510 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1511 will only use it for equality comparisons (EQ_EXPR). For any
1512 other kind of assertion, we cannot derive a range from LIMIT's
1513 anti-range that can be used to describe the new range. For
1514 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1515 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1516 no single range for x_2 that could describe LE_EXPR, so we might
1517 as well build the range [b_4, +INF] for it.
1518 One special case we handle is extracting a range from a
1519 range test encoded as (unsigned)var + CST <= limit. */
1520 if (TREE_CODE (cond
) == NOP_EXPR
1521 || TREE_CODE (cond
) == PLUS_EXPR
)
1523 if (TREE_CODE (cond
) == PLUS_EXPR
)
1525 min
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (TREE_OPERAND (cond
, 1)),
1526 TREE_OPERAND (cond
, 1));
1527 max
= int_const_binop (PLUS_EXPR
, limit
, min
, 0);
1528 cond
= TREE_OPERAND (cond
, 0);
1532 min
= build_int_cst (TREE_TYPE (var
), 0);
1536 /* Make sure to not set TREE_OVERFLOW on the final type
1537 conversion. We are willingly interpreting large positive
1538 unsigned values as negative singed values here. */
1539 min
= force_fit_type_double (TREE_TYPE (var
), tree_to_double_int (min
),
1541 max
= force_fit_type_double (TREE_TYPE (var
), tree_to_double_int (max
),
1544 /* We can transform a max, min range to an anti-range or
1545 vice-versa. Use set_and_canonicalize_value_range which does
1547 if (cond_code
== LE_EXPR
)
1548 set_and_canonicalize_value_range (vr_p
, VR_RANGE
,
1549 min
, max
, vr_p
->equiv
);
1550 else if (cond_code
== GT_EXPR
)
1551 set_and_canonicalize_value_range (vr_p
, VR_ANTI_RANGE
,
1552 min
, max
, vr_p
->equiv
);
1556 else if (cond_code
== EQ_EXPR
)
1558 enum value_range_type range_type
;
1562 range_type
= limit_vr
->type
;
1563 min
= limit_vr
->min
;
1564 max
= limit_vr
->max
;
1568 range_type
= VR_RANGE
;
1573 set_value_range (vr_p
, range_type
, min
, max
, vr_p
->equiv
);
1575 /* When asserting the equality VAR == LIMIT and LIMIT is another
1576 SSA name, the new range will also inherit the equivalence set
1578 if (TREE_CODE (limit
) == SSA_NAME
)
1579 add_equivalence (&vr_p
->equiv
, limit
);
1581 else if (cond_code
== NE_EXPR
)
1583 /* As described above, when LIMIT's range is an anti-range and
1584 this assertion is an inequality (NE_EXPR), then we cannot
1585 derive anything from the anti-range. For instance, if
1586 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1587 not imply that VAR's range is [0, 0]. So, in the case of
1588 anti-ranges, we just assert the inequality using LIMIT and
1591 If LIMIT_VR is a range, we can only use it to build a new
1592 anti-range if LIMIT_VR is a single-valued range. For
1593 instance, if LIMIT_VR is [0, 1], the predicate
1594 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1595 Rather, it means that for value 0 VAR should be ~[0, 0]
1596 and for value 1, VAR should be ~[1, 1]. We cannot
1597 represent these ranges.
1599 The only situation in which we can build a valid
1600 anti-range is when LIMIT_VR is a single-valued range
1601 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1602 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1604 && limit_vr
->type
== VR_RANGE
1605 && compare_values (limit_vr
->min
, limit_vr
->max
) == 0)
1607 min
= limit_vr
->min
;
1608 max
= limit_vr
->max
;
1612 /* In any other case, we cannot use LIMIT's range to build a
1613 valid anti-range. */
1617 /* If MIN and MAX cover the whole range for their type, then
1618 just use the original LIMIT. */
1619 if (INTEGRAL_TYPE_P (type
)
1620 && vrp_val_is_min (min
)
1621 && vrp_val_is_max (max
))
1624 set_value_range (vr_p
, VR_ANTI_RANGE
, min
, max
, vr_p
->equiv
);
1626 else if (cond_code
== LE_EXPR
|| cond_code
== LT_EXPR
)
1628 min
= TYPE_MIN_VALUE (type
);
1630 if (limit_vr
== NULL
|| limit_vr
->type
== VR_ANTI_RANGE
)
1634 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1635 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1637 max
= limit_vr
->max
;
1640 /* If the maximum value forces us to be out of bounds, simply punt.
1641 It would be pointless to try and do anything more since this
1642 all should be optimized away above us. */
1643 if ((cond_code
== LT_EXPR
1644 && compare_values (max
, min
) == 0)
1645 || (CONSTANT_CLASS_P (max
) && TREE_OVERFLOW (max
)))
1646 set_value_range_to_varying (vr_p
);
1649 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1650 if (cond_code
== LT_EXPR
)
1652 tree one
= build_int_cst (type
, 1);
1653 max
= fold_build2 (MINUS_EXPR
, type
, max
, one
);
1655 TREE_NO_WARNING (max
) = 1;
1658 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1661 else if (cond_code
== GE_EXPR
|| cond_code
== GT_EXPR
)
1663 max
= TYPE_MAX_VALUE (type
);
1665 if (limit_vr
== NULL
|| limit_vr
->type
== VR_ANTI_RANGE
)
1669 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1670 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1672 min
= limit_vr
->min
;
1675 /* If the minimum value forces us to be out of bounds, simply punt.
1676 It would be pointless to try and do anything more since this
1677 all should be optimized away above us. */
1678 if ((cond_code
== GT_EXPR
1679 && compare_values (min
, max
) == 0)
1680 || (CONSTANT_CLASS_P (min
) && TREE_OVERFLOW (min
)))
1681 set_value_range_to_varying (vr_p
);
1684 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1685 if (cond_code
== GT_EXPR
)
1687 tree one
= build_int_cst (type
, 1);
1688 min
= fold_build2 (PLUS_EXPR
, type
, min
, one
);
1690 TREE_NO_WARNING (min
) = 1;
1693 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1699 /* If VAR already had a known range, it may happen that the new
1700 range we have computed and VAR's range are not compatible. For
1704 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1706 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1708 While the above comes from a faulty program, it will cause an ICE
1709 later because p_8 and p_6 will have incompatible ranges and at
1710 the same time will be considered equivalent. A similar situation
1714 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1716 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1718 Again i_6 and i_7 will have incompatible ranges. It would be
1719 pointless to try and do anything with i_7's range because
1720 anything dominated by 'if (i_5 < 5)' will be optimized away.
1721 Note, due to the wa in which simulation proceeds, the statement
1722 i_7 = ASSERT_EXPR <...> we would never be visited because the
1723 conditional 'if (i_5 < 5)' always evaluates to false. However,
1724 this extra check does not hurt and may protect against future
1725 changes to VRP that may get into a situation similar to the
1726 NULL pointer dereference example.
1728 Note that these compatibility tests are only needed when dealing
1729 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1730 are both anti-ranges, they will always be compatible, because two
1731 anti-ranges will always have a non-empty intersection. */
1733 var_vr
= get_value_range (var
);
1735 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1736 ranges or anti-ranges. */
1737 if (vr_p
->type
== VR_VARYING
1738 || vr_p
->type
== VR_UNDEFINED
1739 || var_vr
->type
== VR_VARYING
1740 || var_vr
->type
== VR_UNDEFINED
1741 || symbolic_range_p (vr_p
)
1742 || symbolic_range_p (var_vr
))
1745 if (var_vr
->type
== VR_RANGE
&& vr_p
->type
== VR_RANGE
)
1747 /* If the two ranges have a non-empty intersection, we can
1748 refine the resulting range. Since the assert expression
1749 creates an equivalency and at the same time it asserts a
1750 predicate, we can take the intersection of the two ranges to
1751 get better precision. */
1752 if (value_ranges_intersect_p (var_vr
, vr_p
))
1754 /* Use the larger of the two minimums. */
1755 if (compare_values (vr_p
->min
, var_vr
->min
) == -1)
1760 /* Use the smaller of the two maximums. */
1761 if (compare_values (vr_p
->max
, var_vr
->max
) == 1)
1766 set_value_range (vr_p
, vr_p
->type
, min
, max
, vr_p
->equiv
);
1770 /* The two ranges do not intersect, set the new range to
1771 VARYING, because we will not be able to do anything
1772 meaningful with it. */
1773 set_value_range_to_varying (vr_p
);
1776 else if ((var_vr
->type
== VR_RANGE
&& vr_p
->type
== VR_ANTI_RANGE
)
1777 || (var_vr
->type
== VR_ANTI_RANGE
&& vr_p
->type
== VR_RANGE
))
1779 /* A range and an anti-range will cancel each other only if
1780 their ends are the same. For instance, in the example above,
1781 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1782 so VR_P should be set to VR_VARYING. */
1783 if (compare_values (var_vr
->min
, vr_p
->min
) == 0
1784 && compare_values (var_vr
->max
, vr_p
->max
) == 0)
1785 set_value_range_to_varying (vr_p
);
1788 tree min
, max
, anti_min
, anti_max
, real_min
, real_max
;
1791 /* We want to compute the logical AND of the two ranges;
1792 there are three cases to consider.
1795 1. The VR_ANTI_RANGE range is completely within the
1796 VR_RANGE and the endpoints of the ranges are
1797 different. In that case the resulting range
1798 should be whichever range is more precise.
1799 Typically that will be the VR_RANGE.
1801 2. The VR_ANTI_RANGE is completely disjoint from
1802 the VR_RANGE. In this case the resulting range
1803 should be the VR_RANGE.
1805 3. There is some overlap between the VR_ANTI_RANGE
1808 3a. If the high limit of the VR_ANTI_RANGE resides
1809 within the VR_RANGE, then the result is a new
1810 VR_RANGE starting at the high limit of the
1811 VR_ANTI_RANGE + 1 and extending to the
1812 high limit of the original VR_RANGE.
1814 3b. If the low limit of the VR_ANTI_RANGE resides
1815 within the VR_RANGE, then the result is a new
1816 VR_RANGE starting at the low limit of the original
1817 VR_RANGE and extending to the low limit of the
1818 VR_ANTI_RANGE - 1. */
1819 if (vr_p
->type
== VR_ANTI_RANGE
)
1821 anti_min
= vr_p
->min
;
1822 anti_max
= vr_p
->max
;
1823 real_min
= var_vr
->min
;
1824 real_max
= var_vr
->max
;
1828 anti_min
= var_vr
->min
;
1829 anti_max
= var_vr
->max
;
1830 real_min
= vr_p
->min
;
1831 real_max
= vr_p
->max
;
1835 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1836 not including any endpoints. */
1837 if (compare_values (anti_max
, real_max
) == -1
1838 && compare_values (anti_min
, real_min
) == 1)
1840 /* If the range is covering the whole valid range of
1841 the type keep the anti-range. */
1842 if (!vrp_val_is_min (real_min
)
1843 || !vrp_val_is_max (real_max
))
1844 set_value_range (vr_p
, VR_RANGE
, real_min
,
1845 real_max
, vr_p
->equiv
);
1847 /* Case 2, VR_ANTI_RANGE completely disjoint from
1849 else if (compare_values (anti_min
, real_max
) == 1
1850 || compare_values (anti_max
, real_min
) == -1)
1852 set_value_range (vr_p
, VR_RANGE
, real_min
,
1853 real_max
, vr_p
->equiv
);
1855 /* Case 3a, the anti-range extends into the low
1856 part of the real range. Thus creating a new
1857 low for the real range. */
1858 else if (((cmp
= compare_values (anti_max
, real_min
)) == 1
1860 && compare_values (anti_max
, real_max
) == -1)
1862 gcc_assert (!is_positive_overflow_infinity (anti_max
));
1863 if (needs_overflow_infinity (TREE_TYPE (anti_max
))
1864 && vrp_val_is_max (anti_max
))
1866 if (!supports_overflow_infinity (TREE_TYPE (var_vr
->min
)))
1868 set_value_range_to_varying (vr_p
);
1871 min
= positive_overflow_infinity (TREE_TYPE (var_vr
->min
));
1873 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr
->min
)))
1874 min
= fold_build2 (PLUS_EXPR
, TREE_TYPE (var_vr
->min
),
1876 build_int_cst (TREE_TYPE (var_vr
->min
), 1));
1878 min
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (var_vr
->min
),
1879 anti_max
, size_int (1));
1881 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1883 /* Case 3b, the anti-range extends into the high
1884 part of the real range. Thus creating a new
1885 higher for the real range. */
1886 else if (compare_values (anti_min
, real_min
) == 1
1887 && ((cmp
= compare_values (anti_min
, real_max
)) == -1
1890 gcc_assert (!is_negative_overflow_infinity (anti_min
));
1891 if (needs_overflow_infinity (TREE_TYPE (anti_min
))
1892 && vrp_val_is_min (anti_min
))
1894 if (!supports_overflow_infinity (TREE_TYPE (var_vr
->min
)))
1896 set_value_range_to_varying (vr_p
);
1899 max
= negative_overflow_infinity (TREE_TYPE (var_vr
->min
));
1901 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr
->min
)))
1902 max
= fold_build2 (MINUS_EXPR
, TREE_TYPE (var_vr
->min
),
1904 build_int_cst (TREE_TYPE (var_vr
->min
), 1));
1906 max
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (var_vr
->min
),
1910 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1917 /* Extract range information from SSA name VAR and store it in VR. If
1918 VAR has an interesting range, use it. Otherwise, create the
1919 range [VAR, VAR] and return it. This is useful in situations where
1920 we may have conditionals testing values of VARYING names. For
1927 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1931 extract_range_from_ssa_name (value_range_t
*vr
, tree var
)
1933 value_range_t
*var_vr
= get_value_range (var
);
1935 if (var_vr
->type
!= VR_UNDEFINED
&& var_vr
->type
!= VR_VARYING
)
1936 copy_value_range (vr
, var_vr
);
1938 set_value_range (vr
, VR_RANGE
, var
, var
, NULL
);
1940 add_equivalence (&vr
->equiv
, var
);
1944 /* Wrapper around int_const_binop. If the operation overflows and we
1945 are not using wrapping arithmetic, then adjust the result to be
1946 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1947 NULL_TREE if we need to use an overflow infinity representation but
1948 the type does not support it. */
1951 vrp_int_const_binop (enum tree_code code
, tree val1
, tree val2
)
1955 res
= int_const_binop (code
, val1
, val2
, 0);
1957 /* If we are using unsigned arithmetic, operate symbolically
1958 on -INF and +INF as int_const_binop only handles signed overflow. */
1959 if (TYPE_UNSIGNED (TREE_TYPE (val1
)))
1961 int checkz
= compare_values (res
, val1
);
1962 bool overflow
= false;
1964 /* Ensure that res = val1 [+*] val2 >= val1
1965 or that res = val1 - val2 <= val1. */
1966 if ((code
== PLUS_EXPR
1967 && !(checkz
== 1 || checkz
== 0))
1968 || (code
== MINUS_EXPR
1969 && !(checkz
== 0 || checkz
== -1)))
1973 /* Checking for multiplication overflow is done by dividing the
1974 output of the multiplication by the first input of the
1975 multiplication. If the result of that division operation is
1976 not equal to the second input of the multiplication, then the
1977 multiplication overflowed. */
1978 else if (code
== MULT_EXPR
&& !integer_zerop (val1
))
1980 tree tmp
= int_const_binop (TRUNC_DIV_EXPR
,
1983 int check
= compare_values (tmp
, val2
);
1991 res
= copy_node (res
);
1992 TREE_OVERFLOW (res
) = 1;
1996 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1
)))
1997 /* If the singed operation wraps then int_const_binop has done
1998 everything we want. */
2000 else if ((TREE_OVERFLOW (res
)
2001 && !TREE_OVERFLOW (val1
)
2002 && !TREE_OVERFLOW (val2
))
2003 || is_overflow_infinity (val1
)
2004 || is_overflow_infinity (val2
))
2006 /* If the operation overflowed but neither VAL1 nor VAL2 are
2007 overflown, return -INF or +INF depending on the operation
2008 and the combination of signs of the operands. */
2009 int sgn1
= tree_int_cst_sgn (val1
);
2010 int sgn2
= tree_int_cst_sgn (val2
);
2012 if (needs_overflow_infinity (TREE_TYPE (res
))
2013 && !supports_overflow_infinity (TREE_TYPE (res
)))
2016 /* We have to punt on adding infinities of different signs,
2017 since we can't tell what the sign of the result should be.
2018 Likewise for subtracting infinities of the same sign. */
2019 if (((code
== PLUS_EXPR
&& sgn1
!= sgn2
)
2020 || (code
== MINUS_EXPR
&& sgn1
== sgn2
))
2021 && is_overflow_infinity (val1
)
2022 && is_overflow_infinity (val2
))
2025 /* Don't try to handle division or shifting of infinities. */
2026 if ((code
== TRUNC_DIV_EXPR
2027 || code
== FLOOR_DIV_EXPR
2028 || code
== CEIL_DIV_EXPR
2029 || code
== EXACT_DIV_EXPR
2030 || code
== ROUND_DIV_EXPR
2031 || code
== RSHIFT_EXPR
)
2032 && (is_overflow_infinity (val1
)
2033 || is_overflow_infinity (val2
)))
2036 /* Notice that we only need to handle the restricted set of
2037 operations handled by extract_range_from_binary_expr.
2038 Among them, only multiplication, addition and subtraction
2039 can yield overflow without overflown operands because we
2040 are working with integral types only... except in the
2041 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2042 for division too. */
2044 /* For multiplication, the sign of the overflow is given
2045 by the comparison of the signs of the operands. */
2046 if ((code
== MULT_EXPR
&& sgn1
== sgn2
)
2047 /* For addition, the operands must be of the same sign
2048 to yield an overflow. Its sign is therefore that
2049 of one of the operands, for example the first. For
2050 infinite operands X + -INF is negative, not positive. */
2051 || (code
== PLUS_EXPR
2053 ? !is_negative_overflow_infinity (val2
)
2054 : is_positive_overflow_infinity (val2
)))
2055 /* For subtraction, non-infinite operands must be of
2056 different signs to yield an overflow. Its sign is
2057 therefore that of the first operand or the opposite of
2058 that of the second operand. A first operand of 0 counts
2059 as positive here, for the corner case 0 - (-INF), which
2060 overflows, but must yield +INF. For infinite operands 0
2061 - INF is negative, not positive. */
2062 || (code
== MINUS_EXPR
2064 ? !is_positive_overflow_infinity (val2
)
2065 : is_negative_overflow_infinity (val2
)))
2066 /* We only get in here with positive shift count, so the
2067 overflow direction is the same as the sign of val1.
2068 Actually rshift does not overflow at all, but we only
2069 handle the case of shifting overflowed -INF and +INF. */
2070 || (code
== RSHIFT_EXPR
2072 /* For division, the only case is -INF / -1 = +INF. */
2073 || code
== TRUNC_DIV_EXPR
2074 || code
== FLOOR_DIV_EXPR
2075 || code
== CEIL_DIV_EXPR
2076 || code
== EXACT_DIV_EXPR
2077 || code
== ROUND_DIV_EXPR
)
2078 return (needs_overflow_infinity (TREE_TYPE (res
))
2079 ? positive_overflow_infinity (TREE_TYPE (res
))
2080 : TYPE_MAX_VALUE (TREE_TYPE (res
)));
2082 return (needs_overflow_infinity (TREE_TYPE (res
))
2083 ? negative_overflow_infinity (TREE_TYPE (res
))
2084 : TYPE_MIN_VALUE (TREE_TYPE (res
)));
2091 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2092 bitmask if some bit is unset, it means for all numbers in the range
2093 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2094 bitmask if some bit is set, it means for all numbers in the range
2095 the bit is 1, otherwise it might be 0 or 1. */
2098 zero_nonzero_bits_from_vr (value_range_t
*vr
, double_int
*may_be_nonzero
,
2099 double_int
*must_be_nonzero
)
2101 if (range_int_cst_p (vr
))
2103 if (range_int_cst_singleton_p (vr
))
2105 *may_be_nonzero
= tree_to_double_int (vr
->min
);
2106 *must_be_nonzero
= *may_be_nonzero
;
2109 if (tree_int_cst_sgn (vr
->min
) >= 0)
2111 double_int dmin
= tree_to_double_int (vr
->min
);
2112 double_int dmax
= tree_to_double_int (vr
->max
);
2113 double_int xor_mask
= double_int_xor (dmin
, dmax
);
2114 *may_be_nonzero
= double_int_ior (dmin
, dmax
);
2115 *must_be_nonzero
= double_int_and (dmin
, dmax
);
2116 if (xor_mask
.high
!= 0)
2118 unsigned HOST_WIDE_INT mask
2119 = ((unsigned HOST_WIDE_INT
) 1
2120 << floor_log2 (xor_mask
.high
)) - 1;
2121 may_be_nonzero
->low
= ALL_ONES
;
2122 may_be_nonzero
->high
|= mask
;
2123 must_be_nonzero
->low
= 0;
2124 must_be_nonzero
->high
&= ~mask
;
2126 else if (xor_mask
.low
!= 0)
2128 unsigned HOST_WIDE_INT mask
2129 = ((unsigned HOST_WIDE_INT
) 1
2130 << floor_log2 (xor_mask
.low
)) - 1;
2131 may_be_nonzero
->low
|= mask
;
2132 must_be_nonzero
->low
&= ~mask
;
2137 may_be_nonzero
->low
= ALL_ONES
;
2138 may_be_nonzero
->high
= ALL_ONES
;
2139 must_be_nonzero
->low
= 0;
2140 must_be_nonzero
->high
= 0;
2145 /* Extract range information from a binary expression EXPR based on
2146 the ranges of each of its operands and the expression code. */
2149 extract_range_from_binary_expr (value_range_t
*vr
,
2150 enum tree_code code
,
2151 tree expr_type
, tree op0
, tree op1
)
2153 enum value_range_type type
;
2156 value_range_t vr0
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
2157 value_range_t vr1
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
2159 /* Not all binary expressions can be applied to ranges in a
2160 meaningful way. Handle only arithmetic operations. */
2161 if (code
!= PLUS_EXPR
2162 && code
!= MINUS_EXPR
2163 && code
!= POINTER_PLUS_EXPR
2164 && code
!= MULT_EXPR
2165 && code
!= TRUNC_DIV_EXPR
2166 && code
!= FLOOR_DIV_EXPR
2167 && code
!= CEIL_DIV_EXPR
2168 && code
!= EXACT_DIV_EXPR
2169 && code
!= ROUND_DIV_EXPR
2170 && code
!= TRUNC_MOD_EXPR
2171 && code
!= RSHIFT_EXPR
2174 && code
!= BIT_AND_EXPR
2175 && code
!= BIT_IOR_EXPR
2176 && code
!= TRUTH_AND_EXPR
2177 && code
!= TRUTH_OR_EXPR
)
2179 /* We can still do constant propagation here. */
2180 tree const_op0
= op_with_constant_singleton_value_range (op0
);
2181 tree const_op1
= op_with_constant_singleton_value_range (op1
);
2182 if (const_op0
|| const_op1
)
2184 tree tem
= fold_binary (code
, expr_type
,
2185 const_op0
? const_op0
: op0
,
2186 const_op1
? const_op1
: op1
);
2188 && is_gimple_min_invariant (tem
)
2189 && !is_overflow_infinity (tem
))
2191 set_value_range (vr
, VR_RANGE
, tem
, tem
, NULL
);
2195 set_value_range_to_varying (vr
);
2199 /* Get value ranges for each operand. For constant operands, create
2200 a new value range with the operand to simplify processing. */
2201 if (TREE_CODE (op0
) == SSA_NAME
)
2202 vr0
= *(get_value_range (op0
));
2203 else if (is_gimple_min_invariant (op0
))
2204 set_value_range_to_value (&vr0
, op0
, NULL
);
2206 set_value_range_to_varying (&vr0
);
2208 if (TREE_CODE (op1
) == SSA_NAME
)
2209 vr1
= *(get_value_range (op1
));
2210 else if (is_gimple_min_invariant (op1
))
2211 set_value_range_to_value (&vr1
, op1
, NULL
);
2213 set_value_range_to_varying (&vr1
);
2215 /* If either range is UNDEFINED, so is the result. */
2216 if (vr0
.type
== VR_UNDEFINED
|| vr1
.type
== VR_UNDEFINED
)
2218 set_value_range_to_undefined (vr
);
2222 /* The type of the resulting value range defaults to VR0.TYPE. */
2225 /* Refuse to operate on VARYING ranges, ranges of different kinds
2226 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2227 because we may be able to derive a useful range even if one of
2228 the operands is VR_VARYING or symbolic range. Similarly for
2229 divisions. TODO, we may be able to derive anti-ranges in
2231 if (code
!= BIT_AND_EXPR
2232 && code
!= TRUTH_AND_EXPR
2233 && code
!= TRUTH_OR_EXPR
2234 && code
!= TRUNC_DIV_EXPR
2235 && code
!= FLOOR_DIV_EXPR
2236 && code
!= CEIL_DIV_EXPR
2237 && code
!= EXACT_DIV_EXPR
2238 && code
!= ROUND_DIV_EXPR
2239 && code
!= TRUNC_MOD_EXPR
2240 && (vr0
.type
== VR_VARYING
2241 || vr1
.type
== VR_VARYING
2242 || vr0
.type
!= vr1
.type
2243 || symbolic_range_p (&vr0
)
2244 || symbolic_range_p (&vr1
)))
2246 set_value_range_to_varying (vr
);
2250 /* Now evaluate the expression to determine the new range. */
2251 if (POINTER_TYPE_P (expr_type
)
2252 || POINTER_TYPE_P (TREE_TYPE (op0
))
2253 || POINTER_TYPE_P (TREE_TYPE (op1
)))
2255 if (code
== MIN_EXPR
|| code
== MAX_EXPR
)
2257 /* For MIN/MAX expressions with pointers, we only care about
2258 nullness, if both are non null, then the result is nonnull.
2259 If both are null, then the result is null. Otherwise they
2261 if (range_is_nonnull (&vr0
) && range_is_nonnull (&vr1
))
2262 set_value_range_to_nonnull (vr
, expr_type
);
2263 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
2264 set_value_range_to_null (vr
, expr_type
);
2266 set_value_range_to_varying (vr
);
2270 if (code
== POINTER_PLUS_EXPR
)
2272 /* For pointer types, we are really only interested in asserting
2273 whether the expression evaluates to non-NULL. */
2274 if (range_is_nonnull (&vr0
) || range_is_nonnull (&vr1
))
2275 set_value_range_to_nonnull (vr
, expr_type
);
2276 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
2277 set_value_range_to_null (vr
, expr_type
);
2279 set_value_range_to_varying (vr
);
2281 else if (code
== BIT_AND_EXPR
)
2283 /* For pointer types, we are really only interested in asserting
2284 whether the expression evaluates to non-NULL. */
2285 if (range_is_nonnull (&vr0
) && range_is_nonnull (&vr1
))
2286 set_value_range_to_nonnull (vr
, expr_type
);
2287 else if (range_is_null (&vr0
) || range_is_null (&vr1
))
2288 set_value_range_to_null (vr
, expr_type
);
2290 set_value_range_to_varying (vr
);
2298 /* For integer ranges, apply the operation to each end of the
2299 range and see what we end up with. */
2300 if (code
== TRUTH_AND_EXPR
2301 || code
== TRUTH_OR_EXPR
)
2303 /* If one of the operands is zero, we know that the whole
2304 expression evaluates zero. */
2305 if (code
== TRUTH_AND_EXPR
2306 && ((vr0
.type
== VR_RANGE
2307 && integer_zerop (vr0
.min
)
2308 && integer_zerop (vr0
.max
))
2309 || (vr1
.type
== VR_RANGE
2310 && integer_zerop (vr1
.min
)
2311 && integer_zerop (vr1
.max
))))
2314 min
= max
= build_int_cst (expr_type
, 0);
2316 /* If one of the operands is one, we know that the whole
2317 expression evaluates one. */
2318 else if (code
== TRUTH_OR_EXPR
2319 && ((vr0
.type
== VR_RANGE
2320 && integer_onep (vr0
.min
)
2321 && integer_onep (vr0
.max
))
2322 || (vr1
.type
== VR_RANGE
2323 && integer_onep (vr1
.min
)
2324 && integer_onep (vr1
.max
))))
2327 min
= max
= build_int_cst (expr_type
, 1);
2329 else if (vr0
.type
!= VR_VARYING
2330 && vr1
.type
!= VR_VARYING
2331 && vr0
.type
== vr1
.type
2332 && !symbolic_range_p (&vr0
)
2333 && !overflow_infinity_range_p (&vr0
)
2334 && !symbolic_range_p (&vr1
)
2335 && !overflow_infinity_range_p (&vr1
))
2337 /* Boolean expressions cannot be folded with int_const_binop. */
2338 min
= fold_binary (code
, expr_type
, vr0
.min
, vr1
.min
);
2339 max
= fold_binary (code
, expr_type
, vr0
.max
, vr1
.max
);
2343 /* The result of a TRUTH_*_EXPR is always true or false. */
2344 set_value_range_to_truthvalue (vr
, expr_type
);
2348 else if (code
== PLUS_EXPR
2350 || code
== MAX_EXPR
)
2352 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2353 VR_VARYING. It would take more effort to compute a precise
2354 range for such a case. For example, if we have op0 == 1 and
2355 op1 == -1 with their ranges both being ~[0,0], we would have
2356 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2357 Note that we are guaranteed to have vr0.type == vr1.type at
2359 if (code
== PLUS_EXPR
&& vr0
.type
== VR_ANTI_RANGE
)
2361 set_value_range_to_varying (vr
);
2365 /* For operations that make the resulting range directly
2366 proportional to the original ranges, apply the operation to
2367 the same end of each range. */
2368 min
= vrp_int_const_binop (code
, vr0
.min
, vr1
.min
);
2369 max
= vrp_int_const_binop (code
, vr0
.max
, vr1
.max
);
2371 /* If both additions overflowed the range kind is still correct.
2372 This happens regularly with subtracting something in unsigned
2374 ??? See PR30318 for all the cases we do not handle. */
2375 if (code
== PLUS_EXPR
2376 && (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
2377 && (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
2379 min
= build_int_cst_wide (TREE_TYPE (min
),
2380 TREE_INT_CST_LOW (min
),
2381 TREE_INT_CST_HIGH (min
));
2382 max
= build_int_cst_wide (TREE_TYPE (max
),
2383 TREE_INT_CST_LOW (max
),
2384 TREE_INT_CST_HIGH (max
));
2387 else if (code
== MULT_EXPR
2388 || code
== TRUNC_DIV_EXPR
2389 || code
== FLOOR_DIV_EXPR
2390 || code
== CEIL_DIV_EXPR
2391 || code
== EXACT_DIV_EXPR
2392 || code
== ROUND_DIV_EXPR
2393 || code
== RSHIFT_EXPR
)
2399 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2400 drop to VR_VARYING. It would take more effort to compute a
2401 precise range for such a case. For example, if we have
2402 op0 == 65536 and op1 == 65536 with their ranges both being
2403 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2404 we cannot claim that the product is in ~[0,0]. Note that we
2405 are guaranteed to have vr0.type == vr1.type at this
2407 if (code
== MULT_EXPR
2408 && vr0
.type
== VR_ANTI_RANGE
2409 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0
)))
2411 set_value_range_to_varying (vr
);
2415 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2416 then drop to VR_VARYING. Outside of this range we get undefined
2417 behavior from the shift operation. We cannot even trust
2418 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2419 shifts, and the operation at the tree level may be widened. */
2420 if (code
== RSHIFT_EXPR
)
2422 if (vr1
.type
== VR_ANTI_RANGE
2423 || !vrp_expr_computes_nonnegative (op1
, &sop
)
2425 (build_int_cst (TREE_TYPE (vr1
.max
),
2426 TYPE_PRECISION (expr_type
) - 1),
2429 set_value_range_to_varying (vr
);
2434 else if ((code
== TRUNC_DIV_EXPR
2435 || code
== FLOOR_DIV_EXPR
2436 || code
== CEIL_DIV_EXPR
2437 || code
== EXACT_DIV_EXPR
2438 || code
== ROUND_DIV_EXPR
)
2439 && (vr0
.type
!= VR_RANGE
|| symbolic_range_p (&vr0
)))
2441 /* For division, if op1 has VR_RANGE but op0 does not, something
2442 can be deduced just from that range. Say [min, max] / [4, max]
2443 gives [min / 4, max / 4] range. */
2444 if (vr1
.type
== VR_RANGE
2445 && !symbolic_range_p (&vr1
)
2446 && !range_includes_zero_p (&vr1
))
2448 vr0
.type
= type
= VR_RANGE
;
2449 vr0
.min
= vrp_val_min (TREE_TYPE (op0
));
2450 vr0
.max
= vrp_val_max (TREE_TYPE (op1
));
2454 set_value_range_to_varying (vr
);
2459 /* For divisions, if flag_non_call_exceptions is true, we must
2460 not eliminate a division by zero. */
2461 if ((code
== TRUNC_DIV_EXPR
2462 || code
== FLOOR_DIV_EXPR
2463 || code
== CEIL_DIV_EXPR
2464 || code
== EXACT_DIV_EXPR
2465 || code
== ROUND_DIV_EXPR
)
2466 && cfun
->can_throw_non_call_exceptions
2467 && (vr1
.type
!= VR_RANGE
2468 || symbolic_range_p (&vr1
)
2469 || range_includes_zero_p (&vr1
)))
2471 set_value_range_to_varying (vr
);
2475 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2476 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2478 if ((code
== TRUNC_DIV_EXPR
2479 || code
== FLOOR_DIV_EXPR
2480 || code
== CEIL_DIV_EXPR
2481 || code
== EXACT_DIV_EXPR
2482 || code
== ROUND_DIV_EXPR
)
2483 && vr0
.type
== VR_RANGE
2484 && (vr1
.type
!= VR_RANGE
2485 || symbolic_range_p (&vr1
)
2486 || range_includes_zero_p (&vr1
)))
2488 tree zero
= build_int_cst (TREE_TYPE (vr0
.min
), 0);
2494 if (vrp_expr_computes_nonnegative (op1
, &sop
) && !sop
)
2496 /* For unsigned division or when divisor is known
2497 to be non-negative, the range has to cover
2498 all numbers from 0 to max for positive max
2499 and all numbers from min to 0 for negative min. */
2500 cmp
= compare_values (vr0
.max
, zero
);
2503 else if (cmp
== 0 || cmp
== 1)
2507 cmp
= compare_values (vr0
.min
, zero
);
2510 else if (cmp
== 0 || cmp
== -1)
2517 /* Otherwise the range is -max .. max or min .. -min
2518 depending on which bound is bigger in absolute value,
2519 as the division can change the sign. */
2520 abs_extent_range (vr
, vr0
.min
, vr0
.max
);
2523 if (type
== VR_VARYING
)
2525 set_value_range_to_varying (vr
);
2530 /* Multiplications and divisions are a bit tricky to handle,
2531 depending on the mix of signs we have in the two ranges, we
2532 need to operate on different values to get the minimum and
2533 maximum values for the new range. One approach is to figure
2534 out all the variations of range combinations and do the
2537 However, this involves several calls to compare_values and it
2538 is pretty convoluted. It's simpler to do the 4 operations
2539 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2540 MAX1) and then figure the smallest and largest values to form
2544 gcc_assert ((vr0
.type
== VR_RANGE
2545 || (code
== MULT_EXPR
&& vr0
.type
== VR_ANTI_RANGE
))
2546 && vr0
.type
== vr1
.type
);
2548 /* Compute the 4 cross operations. */
2550 val
[0] = vrp_int_const_binop (code
, vr0
.min
, vr1
.min
);
2551 if (val
[0] == NULL_TREE
)
2554 if (vr1
.max
== vr1
.min
)
2558 val
[1] = vrp_int_const_binop (code
, vr0
.min
, vr1
.max
);
2559 if (val
[1] == NULL_TREE
)
2563 if (vr0
.max
== vr0
.min
)
2567 val
[2] = vrp_int_const_binop (code
, vr0
.max
, vr1
.min
);
2568 if (val
[2] == NULL_TREE
)
2572 if (vr0
.min
== vr0
.max
|| vr1
.min
== vr1
.max
)
2576 val
[3] = vrp_int_const_binop (code
, vr0
.max
, vr1
.max
);
2577 if (val
[3] == NULL_TREE
)
2583 set_value_range_to_varying (vr
);
2587 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2591 for (i
= 1; i
< 4; i
++)
2593 if (!is_gimple_min_invariant (min
)
2594 || (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
2595 || !is_gimple_min_invariant (max
)
2596 || (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
2601 if (!is_gimple_min_invariant (val
[i
])
2602 || (TREE_OVERFLOW (val
[i
])
2603 && !is_overflow_infinity (val
[i
])))
2605 /* If we found an overflowed value, set MIN and MAX
2606 to it so that we set the resulting range to
2612 if (compare_values (val
[i
], min
) == -1)
2615 if (compare_values (val
[i
], max
) == 1)
2621 else if (code
== TRUNC_MOD_EXPR
)
2624 if (vr1
.type
!= VR_RANGE
2625 || symbolic_range_p (&vr1
)
2626 || range_includes_zero_p (&vr1
)
2627 || vrp_val_is_min (vr1
.min
))
2629 set_value_range_to_varying (vr
);
2633 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2634 max
= fold_unary_to_constant (ABS_EXPR
, TREE_TYPE (vr1
.min
), vr1
.min
);
2635 if (tree_int_cst_lt (max
, vr1
.max
))
2637 max
= int_const_binop (MINUS_EXPR
, max
, integer_one_node
, 0);
2638 /* If the dividend is non-negative the modulus will be
2639 non-negative as well. */
2640 if (TYPE_UNSIGNED (TREE_TYPE (max
))
2641 || (vrp_expr_computes_nonnegative (op0
, &sop
) && !sop
))
2642 min
= build_int_cst (TREE_TYPE (max
), 0);
2644 min
= fold_unary_to_constant (NEGATE_EXPR
, TREE_TYPE (max
), max
);
2646 else if (code
== MINUS_EXPR
)
2648 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2649 VR_VARYING. It would take more effort to compute a precise
2650 range for such a case. For example, if we have op0 == 1 and
2651 op1 == 1 with their ranges both being ~[0,0], we would have
2652 op0 - op1 == 0, so we cannot claim that the difference is in
2653 ~[0,0]. Note that we are guaranteed to have
2654 vr0.type == vr1.type at this point. */
2655 if (vr0
.type
== VR_ANTI_RANGE
)
2657 set_value_range_to_varying (vr
);
2661 /* For MINUS_EXPR, apply the operation to the opposite ends of
2663 min
= vrp_int_const_binop (code
, vr0
.min
, vr1
.max
);
2664 max
= vrp_int_const_binop (code
, vr0
.max
, vr1
.min
);
2666 else if (code
== BIT_AND_EXPR
|| code
== BIT_IOR_EXPR
)
2668 bool vr0_int_cst_singleton_p
, vr1_int_cst_singleton_p
;
2669 bool int_cst_range0
, int_cst_range1
;
2670 double_int may_be_nonzero0
, may_be_nonzero1
;
2671 double_int must_be_nonzero0
, must_be_nonzero1
;
2673 vr0_int_cst_singleton_p
= range_int_cst_singleton_p (&vr0
);
2674 vr1_int_cst_singleton_p
= range_int_cst_singleton_p (&vr1
);
2675 int_cst_range0
= zero_nonzero_bits_from_vr (&vr0
, &may_be_nonzero0
,
2677 int_cst_range1
= zero_nonzero_bits_from_vr (&vr1
, &may_be_nonzero1
,
2681 if (vr0_int_cst_singleton_p
&& vr1_int_cst_singleton_p
)
2682 min
= max
= int_const_binop (code
, vr0
.max
, vr1
.max
, 0);
2683 else if (!int_cst_range0
&& !int_cst_range1
)
2685 set_value_range_to_varying (vr
);
2688 else if (code
== BIT_AND_EXPR
)
2690 min
= double_int_to_tree (expr_type
,
2691 double_int_and (must_be_nonzero0
,
2693 max
= double_int_to_tree (expr_type
,
2694 double_int_and (may_be_nonzero0
,
2696 if (TREE_OVERFLOW (min
) || tree_int_cst_sgn (min
) < 0)
2698 if (TREE_OVERFLOW (max
) || tree_int_cst_sgn (max
) < 0)
2700 if (int_cst_range0
&& tree_int_cst_sgn (vr0
.min
) >= 0)
2702 if (min
== NULL_TREE
)
2703 min
= build_int_cst (expr_type
, 0);
2704 if (max
== NULL_TREE
|| tree_int_cst_lt (vr0
.max
, max
))
2707 if (int_cst_range1
&& tree_int_cst_sgn (vr1
.min
) >= 0)
2709 if (min
== NULL_TREE
)
2710 min
= build_int_cst (expr_type
, 0);
2711 if (max
== NULL_TREE
|| tree_int_cst_lt (vr1
.max
, max
))
2715 else if (!int_cst_range0
2717 || tree_int_cst_sgn (vr0
.min
) < 0
2718 || tree_int_cst_sgn (vr1
.min
) < 0)
2720 set_value_range_to_varying (vr
);
2725 min
= double_int_to_tree (expr_type
,
2726 double_int_ior (must_be_nonzero0
,
2728 max
= double_int_to_tree (expr_type
,
2729 double_int_ior (may_be_nonzero0
,
2731 if (TREE_OVERFLOW (min
) || tree_int_cst_sgn (min
) < 0)
2734 min
= vrp_int_const_binop (MAX_EXPR
, min
, vr0
.min
);
2735 if (TREE_OVERFLOW (max
) || tree_int_cst_sgn (max
) < 0)
2737 min
= vrp_int_const_binop (MAX_EXPR
, min
, vr1
.min
);
2743 /* If either MIN or MAX overflowed, then set the resulting range to
2744 VARYING. But we do accept an overflow infinity
2746 if (min
== NULL_TREE
2747 || !is_gimple_min_invariant (min
)
2748 || (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
2750 || !is_gimple_min_invariant (max
)
2751 || (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
2753 set_value_range_to_varying (vr
);
2759 2) [-INF, +-INF(OVF)]
2760 3) [+-INF(OVF), +INF]
2761 4) [+-INF(OVF), +-INF(OVF)]
2762 We learn nothing when we have INF and INF(OVF) on both sides.
2763 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2765 if ((vrp_val_is_min (min
) || is_overflow_infinity (min
))
2766 && (vrp_val_is_max (max
) || is_overflow_infinity (max
)))
2768 set_value_range_to_varying (vr
);
2772 cmp
= compare_values (min
, max
);
2773 if (cmp
== -2 || cmp
== 1)
2775 /* If the new range has its limits swapped around (MIN > MAX),
2776 then the operation caused one of them to wrap around, mark
2777 the new range VARYING. */
2778 set_value_range_to_varying (vr
);
2781 set_value_range (vr
, type
, min
, max
, NULL
);
2785 /* Extract range information from a unary expression EXPR based on
2786 the range of its operand and the expression code. */
2789 extract_range_from_unary_expr (value_range_t
*vr
, enum tree_code code
,
2790 tree type
, tree op0
)
2794 value_range_t vr0
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
2796 /* Refuse to operate on certain unary expressions for which we
2797 cannot easily determine a resulting range. */
2798 if (code
== FIX_TRUNC_EXPR
2799 || code
== FLOAT_EXPR
2800 || code
== BIT_NOT_EXPR
2801 || code
== CONJ_EXPR
)
2803 /* We can still do constant propagation here. */
2804 if ((op0
= op_with_constant_singleton_value_range (op0
)) != NULL_TREE
)
2806 tree tem
= fold_unary (code
, type
, op0
);
2808 && is_gimple_min_invariant (tem
)
2809 && !is_overflow_infinity (tem
))
2811 set_value_range (vr
, VR_RANGE
, tem
, tem
, NULL
);
2815 set_value_range_to_varying (vr
);
2819 /* Get value ranges for the operand. For constant operands, create
2820 a new value range with the operand to simplify processing. */
2821 if (TREE_CODE (op0
) == SSA_NAME
)
2822 vr0
= *(get_value_range (op0
));
2823 else if (is_gimple_min_invariant (op0
))
2824 set_value_range_to_value (&vr0
, op0
, NULL
);
2826 set_value_range_to_varying (&vr0
);
2828 /* If VR0 is UNDEFINED, so is the result. */
2829 if (vr0
.type
== VR_UNDEFINED
)
2831 set_value_range_to_undefined (vr
);
2835 /* Refuse to operate on symbolic ranges, or if neither operand is
2836 a pointer or integral type. */
2837 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0
))
2838 && !POINTER_TYPE_P (TREE_TYPE (op0
)))
2839 || (vr0
.type
!= VR_VARYING
2840 && symbolic_range_p (&vr0
)))
2842 set_value_range_to_varying (vr
);
2846 /* If the expression involves pointers, we are only interested in
2847 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2848 if (POINTER_TYPE_P (type
) || POINTER_TYPE_P (TREE_TYPE (op0
)))
2853 if (range_is_nonnull (&vr0
)
2854 || (tree_unary_nonzero_warnv_p (code
, type
, op0
, &sop
)
2856 set_value_range_to_nonnull (vr
, type
);
2857 else if (range_is_null (&vr0
))
2858 set_value_range_to_null (vr
, type
);
2860 set_value_range_to_varying (vr
);
2865 /* Handle unary expressions on integer ranges. */
2866 if (CONVERT_EXPR_CODE_P (code
)
2867 && INTEGRAL_TYPE_P (type
)
2868 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2870 tree inner_type
= TREE_TYPE (op0
);
2871 tree outer_type
= type
;
2873 /* If VR0 is varying and we increase the type precision, assume
2874 a full range for the following transformation. */
2875 if (vr0
.type
== VR_VARYING
2876 && TYPE_PRECISION (inner_type
) < TYPE_PRECISION (outer_type
))
2878 vr0
.type
= VR_RANGE
;
2879 vr0
.min
= TYPE_MIN_VALUE (inner_type
);
2880 vr0
.max
= TYPE_MAX_VALUE (inner_type
);
2883 /* If VR0 is a constant range or anti-range and the conversion is
2884 not truncating we can convert the min and max values and
2885 canonicalize the resulting range. Otherwise we can do the
2886 conversion if the size of the range is less than what the
2887 precision of the target type can represent and the range is
2888 not an anti-range. */
2889 if ((vr0
.type
== VR_RANGE
2890 || vr0
.type
== VR_ANTI_RANGE
)
2891 && TREE_CODE (vr0
.min
) == INTEGER_CST
2892 && TREE_CODE (vr0
.max
) == INTEGER_CST
2893 && (!is_overflow_infinity (vr0
.min
)
2894 || (vr0
.type
== VR_RANGE
2895 && TYPE_PRECISION (outer_type
) > TYPE_PRECISION (inner_type
)
2896 && needs_overflow_infinity (outer_type
)
2897 && supports_overflow_infinity (outer_type
)))
2898 && (!is_overflow_infinity (vr0
.max
)
2899 || (vr0
.type
== VR_RANGE
2900 && TYPE_PRECISION (outer_type
) > TYPE_PRECISION (inner_type
)
2901 && needs_overflow_infinity (outer_type
)
2902 && supports_overflow_infinity (outer_type
)))
2903 && (TYPE_PRECISION (outer_type
) >= TYPE_PRECISION (inner_type
)
2904 || (vr0
.type
== VR_RANGE
2905 && integer_zerop (int_const_binop (RSHIFT_EXPR
,
2906 int_const_binop (MINUS_EXPR
, vr0
.max
, vr0
.min
, 0),
2907 size_int (TYPE_PRECISION (outer_type
)), 0)))))
2909 tree new_min
, new_max
;
2910 new_min
= force_fit_type_double (outer_type
,
2911 tree_to_double_int (vr0
.min
),
2913 new_max
= force_fit_type_double (outer_type
,
2914 tree_to_double_int (vr0
.max
),
2916 if (is_overflow_infinity (vr0
.min
))
2917 new_min
= negative_overflow_infinity (outer_type
);
2918 if (is_overflow_infinity (vr0
.max
))
2919 new_max
= positive_overflow_infinity (outer_type
);
2920 set_and_canonicalize_value_range (vr
, vr0
.type
,
2921 new_min
, new_max
, NULL
);
2925 set_value_range_to_varying (vr
);
2929 /* Conversion of a VR_VARYING value to a wider type can result
2930 in a usable range. So wait until after we've handled conversions
2931 before dropping the result to VR_VARYING if we had a source
2932 operand that is VR_VARYING. */
2933 if (vr0
.type
== VR_VARYING
)
2935 set_value_range_to_varying (vr
);
2939 /* Apply the operation to each end of the range and see what we end
2941 if (code
== NEGATE_EXPR
2942 && !TYPE_UNSIGNED (type
))
2944 /* NEGATE_EXPR flips the range around. We need to treat
2945 TYPE_MIN_VALUE specially. */
2946 if (is_positive_overflow_infinity (vr0
.max
))
2947 min
= negative_overflow_infinity (type
);
2948 else if (is_negative_overflow_infinity (vr0
.max
))
2949 min
= positive_overflow_infinity (type
);
2950 else if (!vrp_val_is_min (vr0
.max
))
2951 min
= fold_unary_to_constant (code
, type
, vr0
.max
);
2952 else if (needs_overflow_infinity (type
))
2954 if (supports_overflow_infinity (type
)
2955 && !is_overflow_infinity (vr0
.min
)
2956 && !vrp_val_is_min (vr0
.min
))
2957 min
= positive_overflow_infinity (type
);
2960 set_value_range_to_varying (vr
);
2965 min
= TYPE_MIN_VALUE (type
);
2967 if (is_positive_overflow_infinity (vr0
.min
))
2968 max
= negative_overflow_infinity (type
);
2969 else if (is_negative_overflow_infinity (vr0
.min
))
2970 max
= positive_overflow_infinity (type
);
2971 else if (!vrp_val_is_min (vr0
.min
))
2972 max
= fold_unary_to_constant (code
, type
, vr0
.min
);
2973 else if (needs_overflow_infinity (type
))
2975 if (supports_overflow_infinity (type
))
2976 max
= positive_overflow_infinity (type
);
2979 set_value_range_to_varying (vr
);
2984 max
= TYPE_MIN_VALUE (type
);
2986 else if (code
== NEGATE_EXPR
2987 && TYPE_UNSIGNED (type
))
2989 if (!range_includes_zero_p (&vr0
))
2991 max
= fold_unary_to_constant (code
, type
, vr0
.min
);
2992 min
= fold_unary_to_constant (code
, type
, vr0
.max
);
2996 if (range_is_null (&vr0
))
2997 set_value_range_to_null (vr
, type
);
2999 set_value_range_to_varying (vr
);
3003 else if (code
== ABS_EXPR
3004 && !TYPE_UNSIGNED (type
))
3006 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3008 if (!TYPE_OVERFLOW_UNDEFINED (type
)
3009 && ((vr0
.type
== VR_RANGE
3010 && vrp_val_is_min (vr0
.min
))
3011 || (vr0
.type
== VR_ANTI_RANGE
3012 && !vrp_val_is_min (vr0
.min
)
3013 && !range_includes_zero_p (&vr0
))))
3015 set_value_range_to_varying (vr
);
3019 /* ABS_EXPR may flip the range around, if the original range
3020 included negative values. */
3021 if (is_overflow_infinity (vr0
.min
))
3022 min
= positive_overflow_infinity (type
);
3023 else if (!vrp_val_is_min (vr0
.min
))
3024 min
= fold_unary_to_constant (code
, type
, vr0
.min
);
3025 else if (!needs_overflow_infinity (type
))
3026 min
= TYPE_MAX_VALUE (type
);
3027 else if (supports_overflow_infinity (type
))
3028 min
= positive_overflow_infinity (type
);
3031 set_value_range_to_varying (vr
);
3035 if (is_overflow_infinity (vr0
.max
))
3036 max
= positive_overflow_infinity (type
);
3037 else if (!vrp_val_is_min (vr0
.max
))
3038 max
= fold_unary_to_constant (code
, type
, vr0
.max
);
3039 else if (!needs_overflow_infinity (type
))
3040 max
= TYPE_MAX_VALUE (type
);
3041 else if (supports_overflow_infinity (type
)
3042 /* We shouldn't generate [+INF, +INF] as set_value_range
3043 doesn't like this and ICEs. */
3044 && !is_positive_overflow_infinity (min
))
3045 max
= positive_overflow_infinity (type
);
3048 set_value_range_to_varying (vr
);
3052 cmp
= compare_values (min
, max
);
3054 /* If a VR_ANTI_RANGEs contains zero, then we have
3055 ~[-INF, min(MIN, MAX)]. */
3056 if (vr0
.type
== VR_ANTI_RANGE
)
3058 if (range_includes_zero_p (&vr0
))
3060 /* Take the lower of the two values. */
3064 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3065 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3066 flag_wrapv is set and the original anti-range doesn't include
3067 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3068 if (TYPE_OVERFLOW_WRAPS (type
))
3070 tree type_min_value
= TYPE_MIN_VALUE (type
);
3072 min
= (vr0
.min
!= type_min_value
3073 ? int_const_binop (PLUS_EXPR
, type_min_value
,
3074 integer_one_node
, 0)
3079 if (overflow_infinity_range_p (&vr0
))
3080 min
= negative_overflow_infinity (type
);
3082 min
= TYPE_MIN_VALUE (type
);
3087 /* All else has failed, so create the range [0, INF], even for
3088 flag_wrapv since TYPE_MIN_VALUE is in the original
3090 vr0
.type
= VR_RANGE
;
3091 min
= build_int_cst (type
, 0);
3092 if (needs_overflow_infinity (type
))
3094 if (supports_overflow_infinity (type
))
3095 max
= positive_overflow_infinity (type
);
3098 set_value_range_to_varying (vr
);
3103 max
= TYPE_MAX_VALUE (type
);
3107 /* If the range contains zero then we know that the minimum value in the
3108 range will be zero. */
3109 else if (range_includes_zero_p (&vr0
))
3113 min
= build_int_cst (type
, 0);
3117 /* If the range was reversed, swap MIN and MAX. */
3128 /* Otherwise, operate on each end of the range. */
3129 min
= fold_unary_to_constant (code
, type
, vr0
.min
);
3130 max
= fold_unary_to_constant (code
, type
, vr0
.max
);
3132 if (needs_overflow_infinity (type
))
3134 gcc_assert (code
!= NEGATE_EXPR
&& code
!= ABS_EXPR
);
3136 /* If both sides have overflowed, we don't know
3138 if ((is_overflow_infinity (vr0
.min
)
3139 || TREE_OVERFLOW (min
))
3140 && (is_overflow_infinity (vr0
.max
)
3141 || TREE_OVERFLOW (max
)))
3143 set_value_range_to_varying (vr
);
3147 if (is_overflow_infinity (vr0
.min
))
3149 else if (TREE_OVERFLOW (min
))
3151 if (supports_overflow_infinity (type
))
3152 min
= (tree_int_cst_sgn (min
) >= 0
3153 ? positive_overflow_infinity (TREE_TYPE (min
))
3154 : negative_overflow_infinity (TREE_TYPE (min
)));
3157 set_value_range_to_varying (vr
);
3162 if (is_overflow_infinity (vr0
.max
))
3164 else if (TREE_OVERFLOW (max
))
3166 if (supports_overflow_infinity (type
))
3167 max
= (tree_int_cst_sgn (max
) >= 0
3168 ? positive_overflow_infinity (TREE_TYPE (max
))
3169 : negative_overflow_infinity (TREE_TYPE (max
)));
3172 set_value_range_to_varying (vr
);
3179 cmp
= compare_values (min
, max
);
3180 if (cmp
== -2 || cmp
== 1)
3182 /* If the new range has its limits swapped around (MIN > MAX),
3183 then the operation caused one of them to wrap around, mark
3184 the new range VARYING. */
3185 set_value_range_to_varying (vr
);
3188 set_value_range (vr
, vr0
.type
, min
, max
, NULL
);
3192 /* Extract range information from a conditional expression EXPR based on
3193 the ranges of each of its operands and the expression code. */
3196 extract_range_from_cond_expr (value_range_t
*vr
, tree expr
)
3199 value_range_t vr0
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
3200 value_range_t vr1
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
3202 /* Get value ranges for each operand. For constant operands, create
3203 a new value range with the operand to simplify processing. */
3204 op0
= COND_EXPR_THEN (expr
);
3205 if (TREE_CODE (op0
) == SSA_NAME
)
3206 vr0
= *(get_value_range (op0
));
3207 else if (is_gimple_min_invariant (op0
))
3208 set_value_range_to_value (&vr0
, op0
, NULL
);
3210 set_value_range_to_varying (&vr0
);
3212 op1
= COND_EXPR_ELSE (expr
);
3213 if (TREE_CODE (op1
) == SSA_NAME
)
3214 vr1
= *(get_value_range (op1
));
3215 else if (is_gimple_min_invariant (op1
))
3216 set_value_range_to_value (&vr1
, op1
, NULL
);
3218 set_value_range_to_varying (&vr1
);
3220 /* The resulting value range is the union of the operand ranges */
3221 vrp_meet (&vr0
, &vr1
);
3222 copy_value_range (vr
, &vr0
);
3226 /* Extract range information from a comparison expression EXPR based
3227 on the range of its operand and the expression code. */
3230 extract_range_from_comparison (value_range_t
*vr
, enum tree_code code
,
3231 tree type
, tree op0
, tree op1
)
3236 val
= vrp_evaluate_conditional_warnv_with_ops (code
, op0
, op1
, false, &sop
,
3239 /* A disadvantage of using a special infinity as an overflow
3240 representation is that we lose the ability to record overflow
3241 when we don't have an infinity. So we have to ignore a result
3242 which relies on overflow. */
3244 if (val
&& !is_overflow_infinity (val
) && !sop
)
3246 /* Since this expression was found on the RHS of an assignment,
3247 its type may be different from _Bool. Convert VAL to EXPR's
3249 val
= fold_convert (type
, val
);
3250 if (is_gimple_min_invariant (val
))
3251 set_value_range_to_value (vr
, val
, vr
->equiv
);
3253 set_value_range (vr
, VR_RANGE
, val
, val
, vr
->equiv
);
3256 /* The result of a comparison is always true or false. */
3257 set_value_range_to_truthvalue (vr
, type
);
3260 /* Try to derive a nonnegative or nonzero range out of STMT relying
3261 primarily on generic routines in fold in conjunction with range data.
3262 Store the result in *VR */
3265 extract_range_basic (value_range_t
*vr
, gimple stmt
)
3268 tree type
= gimple_expr_type (stmt
);
3270 if (INTEGRAL_TYPE_P (type
)
3271 && gimple_stmt_nonnegative_warnv_p (stmt
, &sop
))
3272 set_value_range_to_nonnegative (vr
, type
,
3273 sop
|| stmt_overflow_infinity (stmt
));
3274 else if (vrp_stmt_computes_nonzero (stmt
, &sop
)
3276 set_value_range_to_nonnull (vr
, type
);
3278 set_value_range_to_varying (vr
);
3282 /* Try to compute a useful range out of assignment STMT and store it
3286 extract_range_from_assignment (value_range_t
*vr
, gimple stmt
)
3288 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3290 if (code
== ASSERT_EXPR
)
3291 extract_range_from_assert (vr
, gimple_assign_rhs1 (stmt
));
3292 else if (code
== SSA_NAME
)
3293 extract_range_from_ssa_name (vr
, gimple_assign_rhs1 (stmt
));
3294 else if (TREE_CODE_CLASS (code
) == tcc_binary
3295 || code
== TRUTH_AND_EXPR
3296 || code
== TRUTH_OR_EXPR
3297 || code
== TRUTH_XOR_EXPR
)
3298 extract_range_from_binary_expr (vr
, gimple_assign_rhs_code (stmt
),
3299 gimple_expr_type (stmt
),
3300 gimple_assign_rhs1 (stmt
),
3301 gimple_assign_rhs2 (stmt
));
3302 else if (TREE_CODE_CLASS (code
) == tcc_unary
)
3303 extract_range_from_unary_expr (vr
, gimple_assign_rhs_code (stmt
),
3304 gimple_expr_type (stmt
),
3305 gimple_assign_rhs1 (stmt
));
3306 else if (code
== COND_EXPR
)
3307 extract_range_from_cond_expr (vr
, gimple_assign_rhs1 (stmt
));
3308 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
3309 extract_range_from_comparison (vr
, gimple_assign_rhs_code (stmt
),
3310 gimple_expr_type (stmt
),
3311 gimple_assign_rhs1 (stmt
),
3312 gimple_assign_rhs2 (stmt
));
3313 else if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
3314 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
3315 set_value_range_to_value (vr
, gimple_assign_rhs1 (stmt
), NULL
);
3317 set_value_range_to_varying (vr
);
3319 if (vr
->type
== VR_VARYING
)
3320 extract_range_basic (vr
, stmt
);
3323 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3324 would be profitable to adjust VR using scalar evolution information
3325 for VAR. If so, update VR with the new limits. */
3328 adjust_range_with_scev (value_range_t
*vr
, struct loop
*loop
,
3329 gimple stmt
, tree var
)
3331 tree init
, step
, chrec
, tmin
, tmax
, min
, max
, type
, tem
;
3332 enum ev_direction dir
;
3334 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3335 better opportunities than a regular range, but I'm not sure. */
3336 if (vr
->type
== VR_ANTI_RANGE
)
3339 chrec
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, var
));
3341 /* Like in PR19590, scev can return a constant function. */
3342 if (is_gimple_min_invariant (chrec
))
3344 set_value_range_to_value (vr
, chrec
, vr
->equiv
);
3348 if (TREE_CODE (chrec
) != POLYNOMIAL_CHREC
)
3351 init
= initial_condition_in_loop_num (chrec
, loop
->num
);
3352 tem
= op_with_constant_singleton_value_range (init
);
3355 step
= evolution_part_in_loop_num (chrec
, loop
->num
);
3356 tem
= op_with_constant_singleton_value_range (step
);
3360 /* If STEP is symbolic, we can't know whether INIT will be the
3361 minimum or maximum value in the range. Also, unless INIT is
3362 a simple expression, compare_values and possibly other functions
3363 in tree-vrp won't be able to handle it. */
3364 if (step
== NULL_TREE
3365 || !is_gimple_min_invariant (step
)
3366 || !valid_value_p (init
))
3369 dir
= scev_direction (chrec
);
3370 if (/* Do not adjust ranges if we do not know whether the iv increases
3371 or decreases, ... */
3372 dir
== EV_DIR_UNKNOWN
3373 /* ... or if it may wrap. */
3374 || scev_probably_wraps_p (init
, step
, stmt
, get_chrec_loop (chrec
),
3378 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3379 negative_overflow_infinity and positive_overflow_infinity,
3380 because we have concluded that the loop probably does not
3383 type
= TREE_TYPE (var
);
3384 if (POINTER_TYPE_P (type
) || !TYPE_MIN_VALUE (type
))
3385 tmin
= lower_bound_in_type (type
, type
);
3387 tmin
= TYPE_MIN_VALUE (type
);
3388 if (POINTER_TYPE_P (type
) || !TYPE_MAX_VALUE (type
))
3389 tmax
= upper_bound_in_type (type
, type
);
3391 tmax
= TYPE_MAX_VALUE (type
);
3393 /* Try to use estimated number of iterations for the loop to constrain the
3394 final value in the evolution.
3395 We are interested in the number of executions of the latch, while
3396 nb_iterations_upper_bound includes the last execution of the exit test. */
3397 if (TREE_CODE (step
) == INTEGER_CST
3398 && loop
->any_upper_bound
3399 && !double_int_zero_p (loop
->nb_iterations_upper_bound
)
3400 && is_gimple_val (init
)
3401 && (TREE_CODE (init
) != SSA_NAME
3402 || get_value_range (init
)->type
== VR_RANGE
))
3404 value_range_t maxvr
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
3406 bool unsigned_p
= TYPE_UNSIGNED (TREE_TYPE (step
));
3409 dtmp
= double_int_mul_with_sign (tree_to_double_int (step
),
3411 loop
->nb_iterations_upper_bound
,
3413 unsigned_p
, &overflow
);
3414 tem
= double_int_to_tree (TREE_TYPE (init
), dtmp
);
3415 /* If the multiplication overflowed we can't do a meaningful
3417 if (!overflow
&& double_int_equal_p (dtmp
, tree_to_double_int (tem
)))
3419 extract_range_from_binary_expr (&maxvr
, PLUS_EXPR
,
3420 TREE_TYPE (init
), init
, tem
);
3421 /* Likewise if the addition did. */
3422 if (maxvr
.type
== VR_RANGE
)
3430 if (vr
->type
== VR_VARYING
|| vr
->type
== VR_UNDEFINED
)
3435 /* For VARYING or UNDEFINED ranges, just about anything we get
3436 from scalar evolutions should be better. */
3438 if (dir
== EV_DIR_DECREASES
)
3443 /* If we would create an invalid range, then just assume we
3444 know absolutely nothing. This may be over-conservative,
3445 but it's clearly safe, and should happen only in unreachable
3446 parts of code, or for invalid programs. */
3447 if (compare_values (min
, max
) == 1)
3450 set_value_range (vr
, VR_RANGE
, min
, max
, vr
->equiv
);
3452 else if (vr
->type
== VR_RANGE
)
3457 if (dir
== EV_DIR_DECREASES
)
3459 /* INIT is the maximum value. If INIT is lower than VR->MAX
3460 but no smaller than VR->MIN, set VR->MAX to INIT. */
3461 if (compare_values (init
, max
) == -1)
3464 /* According to the loop information, the variable does not
3465 overflow. If we think it does, probably because of an
3466 overflow due to arithmetic on a different INF value,
3468 if (is_negative_overflow_infinity (min
)
3469 || compare_values (min
, tmin
) == -1)
3475 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3476 if (compare_values (init
, min
) == 1)
3479 if (is_positive_overflow_infinity (max
)
3480 || compare_values (tmax
, max
) == -1)
3484 /* If we just created an invalid range with the minimum
3485 greater than the maximum, we fail conservatively.
3486 This should happen only in unreachable
3487 parts of code, or for invalid programs. */
3488 if (compare_values (min
, max
) == 1)
3491 set_value_range (vr
, VR_RANGE
, min
, max
, vr
->equiv
);
3495 /* Return true if VAR may overflow at STMT. This checks any available
3496 loop information to see if we can determine that VAR does not
3500 vrp_var_may_overflow (tree var
, gimple stmt
)
3503 tree chrec
, init
, step
;
3505 if (current_loops
== NULL
)
3508 l
= loop_containing_stmt (stmt
);
3513 chrec
= instantiate_parameters (l
, analyze_scalar_evolution (l
, var
));
3514 if (TREE_CODE (chrec
) != POLYNOMIAL_CHREC
)
3517 init
= initial_condition_in_loop_num (chrec
, l
->num
);
3518 step
= evolution_part_in_loop_num (chrec
, l
->num
);
3520 if (step
== NULL_TREE
3521 || !is_gimple_min_invariant (step
)
3522 || !valid_value_p (init
))
3525 /* If we get here, we know something useful about VAR based on the
3526 loop information. If it wraps, it may overflow. */
3528 if (scev_probably_wraps_p (init
, step
, stmt
, get_chrec_loop (chrec
),
3532 if (dump_file
&& (dump_flags
& TDF_DETAILS
) != 0)
3534 print_generic_expr (dump_file
, var
, 0);
3535 fprintf (dump_file
, ": loop information indicates does not overflow\n");
3542 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3544 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3545 all the values in the ranges.
3547 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3549 - Return NULL_TREE if it is not always possible to determine the
3550 value of the comparison.
3552 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3553 overflow infinity was used in the test. */
3557 compare_ranges (enum tree_code comp
, value_range_t
*vr0
, value_range_t
*vr1
,
3558 bool *strict_overflow_p
)
3560 /* VARYING or UNDEFINED ranges cannot be compared. */
3561 if (vr0
->type
== VR_VARYING
3562 || vr0
->type
== VR_UNDEFINED
3563 || vr1
->type
== VR_VARYING
3564 || vr1
->type
== VR_UNDEFINED
)
3567 /* Anti-ranges need to be handled separately. */
3568 if (vr0
->type
== VR_ANTI_RANGE
|| vr1
->type
== VR_ANTI_RANGE
)
3570 /* If both are anti-ranges, then we cannot compute any
3572 if (vr0
->type
== VR_ANTI_RANGE
&& vr1
->type
== VR_ANTI_RANGE
)
3575 /* These comparisons are never statically computable. */
3582 /* Equality can be computed only between a range and an
3583 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3584 if (vr0
->type
== VR_RANGE
)
3586 /* To simplify processing, make VR0 the anti-range. */
3587 value_range_t
*tmp
= vr0
;
3592 gcc_assert (comp
== NE_EXPR
|| comp
== EQ_EXPR
);
3594 if (compare_values_warnv (vr0
->min
, vr1
->min
, strict_overflow_p
) == 0
3595 && compare_values_warnv (vr0
->max
, vr1
->max
, strict_overflow_p
) == 0)
3596 return (comp
== NE_EXPR
) ? boolean_true_node
: boolean_false_node
;
3601 if (!usable_range_p (vr0
, strict_overflow_p
)
3602 || !usable_range_p (vr1
, strict_overflow_p
))
3605 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3606 operands around and change the comparison code. */
3607 if (comp
== GT_EXPR
|| comp
== GE_EXPR
)
3610 comp
= (comp
== GT_EXPR
) ? LT_EXPR
: LE_EXPR
;
3616 if (comp
== EQ_EXPR
)
3618 /* Equality may only be computed if both ranges represent
3619 exactly one value. */
3620 if (compare_values_warnv (vr0
->min
, vr0
->max
, strict_overflow_p
) == 0
3621 && compare_values_warnv (vr1
->min
, vr1
->max
, strict_overflow_p
) == 0)
3623 int cmp_min
= compare_values_warnv (vr0
->min
, vr1
->min
,
3625 int cmp_max
= compare_values_warnv (vr0
->max
, vr1
->max
,
3627 if (cmp_min
== 0 && cmp_max
== 0)
3628 return boolean_true_node
;
3629 else if (cmp_min
!= -2 && cmp_max
!= -2)
3630 return boolean_false_node
;
3632 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3633 else if (compare_values_warnv (vr0
->min
, vr1
->max
,
3634 strict_overflow_p
) == 1
3635 || compare_values_warnv (vr1
->min
, vr0
->max
,
3636 strict_overflow_p
) == 1)
3637 return boolean_false_node
;
3641 else if (comp
== NE_EXPR
)
3645 /* If VR0 is completely to the left or completely to the right
3646 of VR1, they are always different. Notice that we need to
3647 make sure that both comparisons yield similar results to
3648 avoid comparing values that cannot be compared at
3650 cmp1
= compare_values_warnv (vr0
->max
, vr1
->min
, strict_overflow_p
);
3651 cmp2
= compare_values_warnv (vr0
->min
, vr1
->max
, strict_overflow_p
);
3652 if ((cmp1
== -1 && cmp2
== -1) || (cmp1
== 1 && cmp2
== 1))
3653 return boolean_true_node
;
3655 /* If VR0 and VR1 represent a single value and are identical,
3657 else if (compare_values_warnv (vr0
->min
, vr0
->max
,
3658 strict_overflow_p
) == 0
3659 && compare_values_warnv (vr1
->min
, vr1
->max
,
3660 strict_overflow_p
) == 0
3661 && compare_values_warnv (vr0
->min
, vr1
->min
,
3662 strict_overflow_p
) == 0
3663 && compare_values_warnv (vr0
->max
, vr1
->max
,
3664 strict_overflow_p
) == 0)
3665 return boolean_false_node
;
3667 /* Otherwise, they may or may not be different. */
3671 else if (comp
== LT_EXPR
|| comp
== LE_EXPR
)
3675 /* If VR0 is to the left of VR1, return true. */
3676 tst
= compare_values_warnv (vr0
->max
, vr1
->min
, strict_overflow_p
);
3677 if ((comp
== LT_EXPR
&& tst
== -1)
3678 || (comp
== LE_EXPR
&& (tst
== -1 || tst
== 0)))
3680 if (overflow_infinity_range_p (vr0
)
3681 || overflow_infinity_range_p (vr1
))
3682 *strict_overflow_p
= true;
3683 return boolean_true_node
;
3686 /* If VR0 is to the right of VR1, return false. */
3687 tst
= compare_values_warnv (vr0
->min
, vr1
->max
, strict_overflow_p
);
3688 if ((comp
== LT_EXPR
&& (tst
== 0 || tst
== 1))
3689 || (comp
== LE_EXPR
&& tst
== 1))
3691 if (overflow_infinity_range_p (vr0
)
3692 || overflow_infinity_range_p (vr1
))
3693 *strict_overflow_p
= true;
3694 return boolean_false_node
;
3697 /* Otherwise, we don't know. */
3705 /* Given a value range VR, a value VAL and a comparison code COMP, return
3706 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3707 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3708 always returns false. Return NULL_TREE if it is not always
3709 possible to determine the value of the comparison. Also set
3710 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3711 infinity was used in the test. */
3714 compare_range_with_value (enum tree_code comp
, value_range_t
*vr
, tree val
,
3715 bool *strict_overflow_p
)
3717 if (vr
->type
== VR_VARYING
|| vr
->type
== VR_UNDEFINED
)
3720 /* Anti-ranges need to be handled separately. */
3721 if (vr
->type
== VR_ANTI_RANGE
)
3723 /* For anti-ranges, the only predicates that we can compute at
3724 compile time are equality and inequality. */
3731 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3732 if (value_inside_range (val
, vr
) == 1)
3733 return (comp
== NE_EXPR
) ? boolean_true_node
: boolean_false_node
;
3738 if (!usable_range_p (vr
, strict_overflow_p
))
3741 if (comp
== EQ_EXPR
)
3743 /* EQ_EXPR may only be computed if VR represents exactly
3745 if (compare_values_warnv (vr
->min
, vr
->max
, strict_overflow_p
) == 0)
3747 int cmp
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
3749 return boolean_true_node
;
3750 else if (cmp
== -1 || cmp
== 1 || cmp
== 2)
3751 return boolean_false_node
;
3753 else if (compare_values_warnv (val
, vr
->min
, strict_overflow_p
) == -1
3754 || compare_values_warnv (vr
->max
, val
, strict_overflow_p
) == -1)
3755 return boolean_false_node
;
3759 else if (comp
== NE_EXPR
)
3761 /* If VAL is not inside VR, then they are always different. */
3762 if (compare_values_warnv (vr
->max
, val
, strict_overflow_p
) == -1
3763 || compare_values_warnv (vr
->min
, val
, strict_overflow_p
) == 1)
3764 return boolean_true_node
;
3766 /* If VR represents exactly one value equal to VAL, then return
3768 if (compare_values_warnv (vr
->min
, vr
->max
, strict_overflow_p
) == 0
3769 && compare_values_warnv (vr
->min
, val
, strict_overflow_p
) == 0)
3770 return boolean_false_node
;
3772 /* Otherwise, they may or may not be different. */
3775 else if (comp
== LT_EXPR
|| comp
== LE_EXPR
)
3779 /* If VR is to the left of VAL, return true. */
3780 tst
= compare_values_warnv (vr
->max
, val
, strict_overflow_p
);
3781 if ((comp
== LT_EXPR
&& tst
== -1)
3782 || (comp
== LE_EXPR
&& (tst
== -1 || tst
== 0)))
3784 if (overflow_infinity_range_p (vr
))
3785 *strict_overflow_p
= true;
3786 return boolean_true_node
;
3789 /* If VR is to the right of VAL, return false. */
3790 tst
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
3791 if ((comp
== LT_EXPR
&& (tst
== 0 || tst
== 1))
3792 || (comp
== LE_EXPR
&& tst
== 1))
3794 if (overflow_infinity_range_p (vr
))
3795 *strict_overflow_p
= true;
3796 return boolean_false_node
;
3799 /* Otherwise, we don't know. */
3802 else if (comp
== GT_EXPR
|| comp
== GE_EXPR
)
3806 /* If VR is to the right of VAL, return true. */
3807 tst
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
3808 if ((comp
== GT_EXPR
&& tst
== 1)
3809 || (comp
== GE_EXPR
&& (tst
== 0 || tst
== 1)))
3811 if (overflow_infinity_range_p (vr
))
3812 *strict_overflow_p
= true;
3813 return boolean_true_node
;
3816 /* If VR is to the left of VAL, return false. */
3817 tst
= compare_values_warnv (vr
->max
, val
, strict_overflow_p
);
3818 if ((comp
== GT_EXPR
&& (tst
== -1 || tst
== 0))
3819 || (comp
== GE_EXPR
&& tst
== -1))
3821 if (overflow_infinity_range_p (vr
))
3822 *strict_overflow_p
= true;
3823 return boolean_false_node
;
3826 /* Otherwise, we don't know. */
3834 /* Debugging dumps. */
3836 void dump_value_range (FILE *, value_range_t
*);
3837 void debug_value_range (value_range_t
*);
3838 void dump_all_value_ranges (FILE *);
3839 void debug_all_value_ranges (void);
3840 void dump_vr_equiv (FILE *, bitmap
);
3841 void debug_vr_equiv (bitmap
);
3844 /* Dump value range VR to FILE. */
3847 dump_value_range (FILE *file
, value_range_t
*vr
)
3850 fprintf (file
, "[]");
3851 else if (vr
->type
== VR_UNDEFINED
)
3852 fprintf (file
, "UNDEFINED");
3853 else if (vr
->type
== VR_RANGE
|| vr
->type
== VR_ANTI_RANGE
)
3855 tree type
= TREE_TYPE (vr
->min
);
3857 fprintf (file
, "%s[", (vr
->type
== VR_ANTI_RANGE
) ? "~" : "");
3859 if (is_negative_overflow_infinity (vr
->min
))
3860 fprintf (file
, "-INF(OVF)");
3861 else if (INTEGRAL_TYPE_P (type
)
3862 && !TYPE_UNSIGNED (type
)
3863 && vrp_val_is_min (vr
->min
))
3864 fprintf (file
, "-INF");
3866 print_generic_expr (file
, vr
->min
, 0);
3868 fprintf (file
, ", ");
3870 if (is_positive_overflow_infinity (vr
->max
))
3871 fprintf (file
, "+INF(OVF)");
3872 else if (INTEGRAL_TYPE_P (type
)
3873 && vrp_val_is_max (vr
->max
))
3874 fprintf (file
, "+INF");
3876 print_generic_expr (file
, vr
->max
, 0);
3878 fprintf (file
, "]");
3885 fprintf (file
, " EQUIVALENCES: { ");
3887 EXECUTE_IF_SET_IN_BITMAP (vr
->equiv
, 0, i
, bi
)
3889 print_generic_expr (file
, ssa_name (i
), 0);
3890 fprintf (file
, " ");
3894 fprintf (file
, "} (%u elements)", c
);
3897 else if (vr
->type
== VR_VARYING
)
3898 fprintf (file
, "VARYING");
3900 fprintf (file
, "INVALID RANGE");
3904 /* Dump value range VR to stderr. */
3907 debug_value_range (value_range_t
*vr
)
3909 dump_value_range (stderr
, vr
);
3910 fprintf (stderr
, "\n");
3914 /* Dump value ranges of all SSA_NAMEs to FILE. */
3917 dump_all_value_ranges (FILE *file
)
3921 for (i
= 0; i
< num_ssa_names
; i
++)
3925 print_generic_expr (file
, ssa_name (i
), 0);
3926 fprintf (file
, ": ");
3927 dump_value_range (file
, vr_value
[i
]);
3928 fprintf (file
, "\n");
3932 fprintf (file
, "\n");
3936 /* Dump all value ranges to stderr. */
3939 debug_all_value_ranges (void)
3941 dump_all_value_ranges (stderr
);
3945 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3946 create a new SSA name N and return the assertion assignment
3947 'V = ASSERT_EXPR <V, V OP W>'. */
3950 build_assert_expr_for (tree cond
, tree v
)
3955 gcc_assert (TREE_CODE (v
) == SSA_NAME
);
3956 n
= duplicate_ssa_name (v
, NULL
);
3958 if (COMPARISON_CLASS_P (cond
))
3960 tree a
= build2 (ASSERT_EXPR
, TREE_TYPE (v
), v
, cond
);
3961 assertion
= gimple_build_assign (n
, a
);
3963 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
3965 /* Given !V, build the assignment N = false. */
3966 tree op0
= TREE_OPERAND (cond
, 0);
3967 gcc_assert (op0
== v
);
3968 assertion
= gimple_build_assign (n
, boolean_false_node
);
3970 else if (TREE_CODE (cond
) == SSA_NAME
)
3972 /* Given V, build the assignment N = true. */
3973 gcc_assert (v
== cond
);
3974 assertion
= gimple_build_assign (n
, boolean_true_node
);
3979 SSA_NAME_DEF_STMT (n
) = assertion
;
3981 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3982 operand of the ASSERT_EXPR. Register the new name and the old one
3983 in the replacement table so that we can fix the SSA web after
3984 adding all the ASSERT_EXPRs. */
3985 register_new_name_mapping (n
, v
);
3991 /* Return false if EXPR is a predicate expression involving floating
3995 fp_predicate (gimple stmt
)
3997 GIMPLE_CHECK (stmt
, GIMPLE_COND
);
3999 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)));
4003 /* If the range of values taken by OP can be inferred after STMT executes,
4004 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4005 describes the inferred range. Return true if a range could be
4009 infer_value_range (gimple stmt
, tree op
, enum tree_code
*comp_code_p
, tree
*val_p
)
4012 *comp_code_p
= ERROR_MARK
;
4014 /* Do not attempt to infer anything in names that flow through
4016 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
4019 /* Similarly, don't infer anything from statements that may throw
4021 if (stmt_could_throw_p (stmt
))
4024 /* If STMT is the last statement of a basic block with no
4025 successors, there is no point inferring anything about any of its
4026 operands. We would not be able to find a proper insertion point
4027 for the assertion, anyway. */
4028 if (stmt_ends_bb_p (stmt
) && EDGE_COUNT (gimple_bb (stmt
)->succs
) == 0)
4031 /* We can only assume that a pointer dereference will yield
4032 non-NULL if -fdelete-null-pointer-checks is enabled. */
4033 if (flag_delete_null_pointer_checks
4034 && POINTER_TYPE_P (TREE_TYPE (op
))
4035 && gimple_code (stmt
) != GIMPLE_ASM
)
4037 unsigned num_uses
, num_loads
, num_stores
;
4039 count_uses_and_derefs (op
, stmt
, &num_uses
, &num_loads
, &num_stores
);
4040 if (num_loads
+ num_stores
> 0)
4042 *val_p
= build_int_cst (TREE_TYPE (op
), 0);
4043 *comp_code_p
= NE_EXPR
;
4052 void dump_asserts_for (FILE *, tree
);
4053 void debug_asserts_for (tree
);
4054 void dump_all_asserts (FILE *);
4055 void debug_all_asserts (void);
4057 /* Dump all the registered assertions for NAME to FILE. */
4060 dump_asserts_for (FILE *file
, tree name
)
4064 fprintf (file
, "Assertions to be inserted for ");
4065 print_generic_expr (file
, name
, 0);
4066 fprintf (file
, "\n");
4068 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
4071 fprintf (file
, "\t");
4072 print_gimple_stmt (file
, gsi_stmt (loc
->si
), 0, 0);
4073 fprintf (file
, "\n\tBB #%d", loc
->bb
->index
);
4076 fprintf (file
, "\n\tEDGE %d->%d", loc
->e
->src
->index
,
4077 loc
->e
->dest
->index
);
4078 dump_edge_info (file
, loc
->e
, 0);
4080 fprintf (file
, "\n\tPREDICATE: ");
4081 print_generic_expr (file
, name
, 0);
4082 fprintf (file
, " %s ", tree_code_name
[(int)loc
->comp_code
]);
4083 print_generic_expr (file
, loc
->val
, 0);
4084 fprintf (file
, "\n\n");
4088 fprintf (file
, "\n");
4092 /* Dump all the registered assertions for NAME to stderr. */
4095 debug_asserts_for (tree name
)
4097 dump_asserts_for (stderr
, name
);
4101 /* Dump all the registered assertions for all the names to FILE. */
4104 dump_all_asserts (FILE *file
)
4109 fprintf (file
, "\nASSERT_EXPRs to be inserted\n\n");
4110 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
4111 dump_asserts_for (file
, ssa_name (i
));
4112 fprintf (file
, "\n");
4116 /* Dump all the registered assertions for all the names to stderr. */
4119 debug_all_asserts (void)
4121 dump_all_asserts (stderr
);
4125 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4126 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4127 E->DEST, then register this location as a possible insertion point
4128 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4130 BB, E and SI provide the exact insertion point for the new
4131 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4132 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4133 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4134 must not be NULL. */
4137 register_new_assert_for (tree name
, tree expr
,
4138 enum tree_code comp_code
,
4142 gimple_stmt_iterator si
)
4144 assert_locus_t n
, loc
, last_loc
;
4145 basic_block dest_bb
;
4147 gcc_checking_assert (bb
== NULL
|| e
== NULL
);
4150 gcc_checking_assert (gimple_code (gsi_stmt (si
)) != GIMPLE_COND
4151 && gimple_code (gsi_stmt (si
)) != GIMPLE_SWITCH
);
4153 /* Never build an assert comparing against an integer constant with
4154 TREE_OVERFLOW set. This confuses our undefined overflow warning
4156 if (TREE_CODE (val
) == INTEGER_CST
4157 && TREE_OVERFLOW (val
))
4158 val
= build_int_cst_wide (TREE_TYPE (val
),
4159 TREE_INT_CST_LOW (val
), TREE_INT_CST_HIGH (val
));
4161 /* The new assertion A will be inserted at BB or E. We need to
4162 determine if the new location is dominated by a previously
4163 registered location for A. If we are doing an edge insertion,
4164 assume that A will be inserted at E->DEST. Note that this is not
4167 If E is a critical edge, it will be split. But even if E is
4168 split, the new block will dominate the same set of blocks that
4171 The reverse, however, is not true, blocks dominated by E->DEST
4172 will not be dominated by the new block created to split E. So,
4173 if the insertion location is on a critical edge, we will not use
4174 the new location to move another assertion previously registered
4175 at a block dominated by E->DEST. */
4176 dest_bb
= (bb
) ? bb
: e
->dest
;
4178 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4179 VAL at a block dominating DEST_BB, then we don't need to insert a new
4180 one. Similarly, if the same assertion already exists at a block
4181 dominated by DEST_BB and the new location is not on a critical
4182 edge, then update the existing location for the assertion (i.e.,
4183 move the assertion up in the dominance tree).
4185 Note, this is implemented as a simple linked list because there
4186 should not be more than a handful of assertions registered per
4187 name. If this becomes a performance problem, a table hashed by
4188 COMP_CODE and VAL could be implemented. */
4189 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
4193 if (loc
->comp_code
== comp_code
4195 || operand_equal_p (loc
->val
, val
, 0))
4196 && (loc
->expr
== expr
4197 || operand_equal_p (loc
->expr
, expr
, 0)))
4199 /* If the assertion NAME COMP_CODE VAL has already been
4200 registered at a basic block that dominates DEST_BB, then
4201 we don't need to insert the same assertion again. Note
4202 that we don't check strict dominance here to avoid
4203 replicating the same assertion inside the same basic
4204 block more than once (e.g., when a pointer is
4205 dereferenced several times inside a block).
4207 An exception to this rule are edge insertions. If the
4208 new assertion is to be inserted on edge E, then it will
4209 dominate all the other insertions that we may want to
4210 insert in DEST_BB. So, if we are doing an edge
4211 insertion, don't do this dominance check. */
4213 && dominated_by_p (CDI_DOMINATORS
, dest_bb
, loc
->bb
))
4216 /* Otherwise, if E is not a critical edge and DEST_BB
4217 dominates the existing location for the assertion, move
4218 the assertion up in the dominance tree by updating its
4219 location information. */
4220 if ((e
== NULL
|| !EDGE_CRITICAL_P (e
))
4221 && dominated_by_p (CDI_DOMINATORS
, loc
->bb
, dest_bb
))
4230 /* Update the last node of the list and move to the next one. */
4235 /* If we didn't find an assertion already registered for
4236 NAME COMP_CODE VAL, add a new one at the end of the list of
4237 assertions associated with NAME. */
4238 n
= XNEW (struct assert_locus_d
);
4242 n
->comp_code
= comp_code
;
4250 asserts_for
[SSA_NAME_VERSION (name
)] = n
;
4252 bitmap_set_bit (need_assert_for
, SSA_NAME_VERSION (name
));
4255 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4256 Extract a suitable test code and value and store them into *CODE_P and
4257 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4259 If no extraction was possible, return FALSE, otherwise return TRUE.
4261 If INVERT is true, then we invert the result stored into *CODE_P. */
4264 extract_code_and_val_from_cond_with_ops (tree name
, enum tree_code cond_code
,
4265 tree cond_op0
, tree cond_op1
,
4266 bool invert
, enum tree_code
*code_p
,
4269 enum tree_code comp_code
;
4272 /* Otherwise, we have a comparison of the form NAME COMP VAL
4273 or VAL COMP NAME. */
4274 if (name
== cond_op1
)
4276 /* If the predicate is of the form VAL COMP NAME, flip
4277 COMP around because we need to register NAME as the
4278 first operand in the predicate. */
4279 comp_code
= swap_tree_comparison (cond_code
);
4284 /* The comparison is of the form NAME COMP VAL, so the
4285 comparison code remains unchanged. */
4286 comp_code
= cond_code
;
4290 /* Invert the comparison code as necessary. */
4292 comp_code
= invert_tree_comparison (comp_code
, 0);
4294 /* VRP does not handle float types. */
4295 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val
)))
4298 /* Do not register always-false predicates.
4299 FIXME: this works around a limitation in fold() when dealing with
4300 enumerations. Given 'enum { N1, N2 } x;', fold will not
4301 fold 'if (x > N2)' to 'if (0)'. */
4302 if ((comp_code
== GT_EXPR
|| comp_code
== LT_EXPR
)
4303 && INTEGRAL_TYPE_P (TREE_TYPE (val
)))
4305 tree min
= TYPE_MIN_VALUE (TREE_TYPE (val
));
4306 tree max
= TYPE_MAX_VALUE (TREE_TYPE (val
));
4308 if (comp_code
== GT_EXPR
4310 || compare_values (val
, max
) == 0))
4313 if (comp_code
== LT_EXPR
4315 || compare_values (val
, min
) == 0))
4318 *code_p
= comp_code
;
4323 /* Try to register an edge assertion for SSA name NAME on edge E for
4324 the condition COND contributing to the conditional jump pointed to by BSI.
4325 Invert the condition COND if INVERT is true.
4326 Return true if an assertion for NAME could be registered. */
4329 register_edge_assert_for_2 (tree name
, edge e
, gimple_stmt_iterator bsi
,
4330 enum tree_code cond_code
,
4331 tree cond_op0
, tree cond_op1
, bool invert
)
4334 enum tree_code comp_code
;
4335 bool retval
= false;
4337 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
4340 invert
, &comp_code
, &val
))
4343 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4344 reachable from E. */
4345 if (live_on_edge (e
, name
)
4346 && !has_single_use (name
))
4348 register_new_assert_for (name
, name
, comp_code
, val
, NULL
, e
, bsi
);
4352 /* In the case of NAME <= CST and NAME being defined as
4353 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4354 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4355 This catches range and anti-range tests. */
4356 if ((comp_code
== LE_EXPR
4357 || comp_code
== GT_EXPR
)
4358 && TREE_CODE (val
) == INTEGER_CST
4359 && TYPE_UNSIGNED (TREE_TYPE (val
)))
4361 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
4362 tree cst2
= NULL_TREE
, name2
= NULL_TREE
, name3
= NULL_TREE
;
4364 /* Extract CST2 from the (optional) addition. */
4365 if (is_gimple_assign (def_stmt
)
4366 && gimple_assign_rhs_code (def_stmt
) == PLUS_EXPR
)
4368 name2
= gimple_assign_rhs1 (def_stmt
);
4369 cst2
= gimple_assign_rhs2 (def_stmt
);
4370 if (TREE_CODE (name2
) == SSA_NAME
4371 && TREE_CODE (cst2
) == INTEGER_CST
)
4372 def_stmt
= SSA_NAME_DEF_STMT (name2
);
4375 /* Extract NAME2 from the (optional) sign-changing cast. */
4376 if (gimple_assign_cast_p (def_stmt
))
4378 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
))
4379 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
4380 && (TYPE_PRECISION (gimple_expr_type (def_stmt
))
4381 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))))
4382 name3
= gimple_assign_rhs1 (def_stmt
);
4385 /* If name3 is used later, create an ASSERT_EXPR for it. */
4386 if (name3
!= NULL_TREE
4387 && TREE_CODE (name3
) == SSA_NAME
4388 && (cst2
== NULL_TREE
4389 || TREE_CODE (cst2
) == INTEGER_CST
)
4390 && INTEGRAL_TYPE_P (TREE_TYPE (name3
))
4391 && live_on_edge (e
, name3
)
4392 && !has_single_use (name3
))
4396 /* Build an expression for the range test. */
4397 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), name3
);
4398 if (cst2
!= NULL_TREE
)
4399 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
4403 fprintf (dump_file
, "Adding assert for ");
4404 print_generic_expr (dump_file
, name3
, 0);
4405 fprintf (dump_file
, " from ");
4406 print_generic_expr (dump_file
, tmp
, 0);
4407 fprintf (dump_file
, "\n");
4410 register_new_assert_for (name3
, tmp
, comp_code
, val
, NULL
, e
, bsi
);
4415 /* If name2 is used later, create an ASSERT_EXPR for it. */
4416 if (name2
!= NULL_TREE
4417 && TREE_CODE (name2
) == SSA_NAME
4418 && TREE_CODE (cst2
) == INTEGER_CST
4419 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
4420 && live_on_edge (e
, name2
)
4421 && !has_single_use (name2
))
4425 /* Build an expression for the range test. */
4427 if (TREE_TYPE (name
) != TREE_TYPE (name2
))
4428 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), tmp
);
4429 if (cst2
!= NULL_TREE
)
4430 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
4434 fprintf (dump_file
, "Adding assert for ");
4435 print_generic_expr (dump_file
, name2
, 0);
4436 fprintf (dump_file
, " from ");
4437 print_generic_expr (dump_file
, tmp
, 0);
4438 fprintf (dump_file
, "\n");
4441 register_new_assert_for (name2
, tmp
, comp_code
, val
, NULL
, e
, bsi
);
4450 /* OP is an operand of a truth value expression which is known to have
4451 a particular value. Register any asserts for OP and for any
4452 operands in OP's defining statement.
4454 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4455 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4458 register_edge_assert_for_1 (tree op
, enum tree_code code
,
4459 edge e
, gimple_stmt_iterator bsi
)
4461 bool retval
= false;
4464 enum tree_code rhs_code
;
4466 /* We only care about SSA_NAMEs. */
4467 if (TREE_CODE (op
) != SSA_NAME
)
4470 /* We know that OP will have a zero or nonzero value. If OP is used
4471 more than once go ahead and register an assert for OP.
4473 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4474 it will always be set for OP (because OP is used in a COND_EXPR in
4476 if (!has_single_use (op
))
4478 val
= build_int_cst (TREE_TYPE (op
), 0);
4479 register_new_assert_for (op
, op
, code
, val
, NULL
, e
, bsi
);
4483 /* Now look at how OP is set. If it's set from a comparison,
4484 a truth operation or some bit operations, then we may be able
4485 to register information about the operands of that assignment. */
4486 op_def
= SSA_NAME_DEF_STMT (op
);
4487 if (gimple_code (op_def
) != GIMPLE_ASSIGN
)
4490 rhs_code
= gimple_assign_rhs_code (op_def
);
4492 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
4494 bool invert
= (code
== EQ_EXPR
? true : false);
4495 tree op0
= gimple_assign_rhs1 (op_def
);
4496 tree op1
= gimple_assign_rhs2 (op_def
);
4498 if (TREE_CODE (op0
) == SSA_NAME
)
4499 retval
|= register_edge_assert_for_2 (op0
, e
, bsi
, rhs_code
, op0
, op1
,
4501 if (TREE_CODE (op1
) == SSA_NAME
)
4502 retval
|= register_edge_assert_for_2 (op1
, e
, bsi
, rhs_code
, op0
, op1
,
4505 else if ((code
== NE_EXPR
4506 && (gimple_assign_rhs_code (op_def
) == TRUTH_AND_EXPR
4507 || gimple_assign_rhs_code (op_def
) == BIT_AND_EXPR
))
4509 && (gimple_assign_rhs_code (op_def
) == TRUTH_OR_EXPR
4510 || gimple_assign_rhs_code (op_def
) == BIT_IOR_EXPR
)))
4512 /* Recurse on each operand. */
4513 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
4515 retval
|= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def
),
4518 else if (gimple_assign_rhs_code (op_def
) == TRUTH_NOT_EXPR
)
4520 /* Recurse, flipping CODE. */
4521 code
= invert_tree_comparison (code
, false);
4522 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
4525 else if (gimple_assign_rhs_code (op_def
) == SSA_NAME
)
4527 /* Recurse through the copy. */
4528 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
4531 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def
)))
4533 /* Recurse through the type conversion. */
4534 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
4541 /* Try to register an edge assertion for SSA name NAME on edge E for
4542 the condition COND contributing to the conditional jump pointed to by SI.
4543 Return true if an assertion for NAME could be registered. */
4546 register_edge_assert_for (tree name
, edge e
, gimple_stmt_iterator si
,
4547 enum tree_code cond_code
, tree cond_op0
,
4551 enum tree_code comp_code
;
4552 bool retval
= false;
4553 bool is_else_edge
= (e
->flags
& EDGE_FALSE_VALUE
) != 0;
4555 /* Do not attempt to infer anything in names that flow through
4557 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
4560 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
4566 /* Register ASSERT_EXPRs for name. */
4567 retval
|= register_edge_assert_for_2 (name
, e
, si
, cond_code
, cond_op0
,
4568 cond_op1
, is_else_edge
);
4571 /* If COND is effectively an equality test of an SSA_NAME against
4572 the value zero or one, then we may be able to assert values
4573 for SSA_NAMEs which flow into COND. */
4575 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4576 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4577 have nonzero value. */
4578 if (((comp_code
== EQ_EXPR
&& integer_onep (val
))
4579 || (comp_code
== NE_EXPR
&& integer_zerop (val
))))
4581 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
4583 if (is_gimple_assign (def_stmt
)
4584 && (gimple_assign_rhs_code (def_stmt
) == TRUTH_AND_EXPR
4585 || gimple_assign_rhs_code (def_stmt
) == BIT_AND_EXPR
))
4587 tree op0
= gimple_assign_rhs1 (def_stmt
);
4588 tree op1
= gimple_assign_rhs2 (def_stmt
);
4589 retval
|= register_edge_assert_for_1 (op0
, NE_EXPR
, e
, si
);
4590 retval
|= register_edge_assert_for_1 (op1
, NE_EXPR
, e
, si
);
4594 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4595 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4597 if (((comp_code
== EQ_EXPR
&& integer_zerop (val
))
4598 || (comp_code
== NE_EXPR
&& integer_onep (val
))))
4600 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
4602 if (is_gimple_assign (def_stmt
)
4603 && (gimple_assign_rhs_code (def_stmt
) == TRUTH_OR_EXPR
4604 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4605 necessarily zero value. */
4606 || (comp_code
== EQ_EXPR
4607 && (gimple_assign_rhs_code (def_stmt
) == BIT_IOR_EXPR
))))
4609 tree op0
= gimple_assign_rhs1 (def_stmt
);
4610 tree op1
= gimple_assign_rhs2 (def_stmt
);
4611 retval
|= register_edge_assert_for_1 (op0
, EQ_EXPR
, e
, si
);
4612 retval
|= register_edge_assert_for_1 (op1
, EQ_EXPR
, e
, si
);
4620 /* Determine whether the outgoing edges of BB should receive an
4621 ASSERT_EXPR for each of the operands of BB's LAST statement.
4622 The last statement of BB must be a COND_EXPR.
4624 If any of the sub-graphs rooted at BB have an interesting use of
4625 the predicate operands, an assert location node is added to the
4626 list of assertions for the corresponding operands. */
4629 find_conditional_asserts (basic_block bb
, gimple last
)
4632 gimple_stmt_iterator bsi
;
4638 need_assert
= false;
4639 bsi
= gsi_for_stmt (last
);
4641 /* Look for uses of the operands in each of the sub-graphs
4642 rooted at BB. We need to check each of the outgoing edges
4643 separately, so that we know what kind of ASSERT_EXPR to
4645 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4650 /* Register the necessary assertions for each operand in the
4651 conditional predicate. */
4652 FOR_EACH_SSA_TREE_OPERAND (op
, last
, iter
, SSA_OP_USE
)
4654 need_assert
|= register_edge_assert_for (op
, e
, bsi
,
4655 gimple_cond_code (last
),
4656 gimple_cond_lhs (last
),
4657 gimple_cond_rhs (last
));
4664 /* Compare two case labels sorting first by the destination label uid
4665 and then by the case value. */
4668 compare_case_labels (const void *p1
, const void *p2
)
4670 const_tree
const case1
= *(const_tree
const*)p1
;
4671 const_tree
const case2
= *(const_tree
const*)p2
;
4672 unsigned int uid1
= DECL_UID (CASE_LABEL (case1
));
4673 unsigned int uid2
= DECL_UID (CASE_LABEL (case2
));
4677 else if (uid1
== uid2
)
4679 /* Make sure the default label is first in a group. */
4680 if (!CASE_LOW (case1
))
4682 else if (!CASE_LOW (case2
))
4685 return tree_int_cst_compare (CASE_LOW (case1
), CASE_LOW (case2
));
4691 /* Determine whether the outgoing edges of BB should receive an
4692 ASSERT_EXPR for each of the operands of BB's LAST statement.
4693 The last statement of BB must be a SWITCH_EXPR.
4695 If any of the sub-graphs rooted at BB have an interesting use of
4696 the predicate operands, an assert location node is added to the
4697 list of assertions for the corresponding operands. */
4700 find_switch_asserts (basic_block bb
, gimple last
)
4703 gimple_stmt_iterator bsi
;
4707 size_t n
= gimple_switch_num_labels(last
);
4708 #if GCC_VERSION >= 4000
4711 /* Work around GCC 3.4 bug (PR 37086). */
4712 volatile unsigned int idx
;
4715 need_assert
= false;
4716 bsi
= gsi_for_stmt (last
);
4717 op
= gimple_switch_index (last
);
4718 if (TREE_CODE (op
) != SSA_NAME
)
4721 /* Build a vector of case labels sorted by destination label. */
4722 vec2
= make_tree_vec (n
);
4723 for (idx
= 0; idx
< n
; ++idx
)
4724 TREE_VEC_ELT (vec2
, idx
) = gimple_switch_label (last
, idx
);
4725 qsort (&TREE_VEC_ELT (vec2
, 0), n
, sizeof (tree
), compare_case_labels
);
4727 for (idx
= 0; idx
< n
; ++idx
)
4730 tree cl
= TREE_VEC_ELT (vec2
, idx
);
4732 min
= CASE_LOW (cl
);
4733 max
= CASE_HIGH (cl
);
4735 /* If there are multiple case labels with the same destination
4736 we need to combine them to a single value range for the edge. */
4738 && CASE_LABEL (cl
) == CASE_LABEL (TREE_VEC_ELT (vec2
, idx
+ 1)))
4740 /* Skip labels until the last of the group. */
4744 && CASE_LABEL (cl
) == CASE_LABEL (TREE_VEC_ELT (vec2
, idx
)));
4747 /* Pick up the maximum of the case label range. */
4748 if (CASE_HIGH (TREE_VEC_ELT (vec2
, idx
)))
4749 max
= CASE_HIGH (TREE_VEC_ELT (vec2
, idx
));
4751 max
= CASE_LOW (TREE_VEC_ELT (vec2
, idx
));
4754 /* Nothing to do if the range includes the default label until we
4755 can register anti-ranges. */
4756 if (min
== NULL_TREE
)
4759 /* Find the edge to register the assert expr on. */
4760 e
= find_edge (bb
, label_to_block (CASE_LABEL (cl
)));
4762 /* Register the necessary assertions for the operand in the
4764 need_assert
|= register_edge_assert_for (op
, e
, bsi
,
4765 max
? GE_EXPR
: EQ_EXPR
,
4767 fold_convert (TREE_TYPE (op
),
4771 need_assert
|= register_edge_assert_for (op
, e
, bsi
, LE_EXPR
,
4773 fold_convert (TREE_TYPE (op
),
4782 /* Traverse all the statements in block BB looking for statements that
4783 may generate useful assertions for the SSA names in their operand.
4784 If a statement produces a useful assertion A for name N_i, then the
4785 list of assertions already generated for N_i is scanned to
4786 determine if A is actually needed.
4788 If N_i already had the assertion A at a location dominating the
4789 current location, then nothing needs to be done. Otherwise, the
4790 new location for A is recorded instead.
4792 1- For every statement S in BB, all the variables used by S are
4793 added to bitmap FOUND_IN_SUBGRAPH.
4795 2- If statement S uses an operand N in a way that exposes a known
4796 value range for N, then if N was not already generated by an
4797 ASSERT_EXPR, create a new assert location for N. For instance,
4798 if N is a pointer and the statement dereferences it, we can
4799 assume that N is not NULL.
4801 3- COND_EXPRs are a special case of #2. We can derive range
4802 information from the predicate but need to insert different
4803 ASSERT_EXPRs for each of the sub-graphs rooted at the
4804 conditional block. If the last statement of BB is a conditional
4805 expression of the form 'X op Y', then
4807 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4809 b) If the conditional is the only entry point to the sub-graph
4810 corresponding to the THEN_CLAUSE, recurse into it. On
4811 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4812 an ASSERT_EXPR is added for the corresponding variable.
4814 c) Repeat step (b) on the ELSE_CLAUSE.
4816 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4825 In this case, an assertion on the THEN clause is useful to
4826 determine that 'a' is always 9 on that edge. However, an assertion
4827 on the ELSE clause would be unnecessary.
4829 4- If BB does not end in a conditional expression, then we recurse
4830 into BB's dominator children.
4832 At the end of the recursive traversal, every SSA name will have a
4833 list of locations where ASSERT_EXPRs should be added. When a new
4834 location for name N is found, it is registered by calling
4835 register_new_assert_for. That function keeps track of all the
4836 registered assertions to prevent adding unnecessary assertions.
4837 For instance, if a pointer P_4 is dereferenced more than once in a
4838 dominator tree, only the location dominating all the dereference of
4839 P_4 will receive an ASSERT_EXPR.
4841 If this function returns true, then it means that there are names
4842 for which we need to generate ASSERT_EXPRs. Those assertions are
4843 inserted by process_assert_insertions. */
4846 find_assert_locations_1 (basic_block bb
, sbitmap live
)
4848 gimple_stmt_iterator si
;
4853 need_assert
= false;
4854 last
= last_stmt (bb
);
4856 /* If BB's last statement is a conditional statement involving integer
4857 operands, determine if we need to add ASSERT_EXPRs. */
4859 && gimple_code (last
) == GIMPLE_COND
4860 && !fp_predicate (last
)
4861 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
4862 need_assert
|= find_conditional_asserts (bb
, last
);
4864 /* If BB's last statement is a switch statement involving integer
4865 operands, determine if we need to add ASSERT_EXPRs. */
4867 && gimple_code (last
) == GIMPLE_SWITCH
4868 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
4869 need_assert
|= find_switch_asserts (bb
, last
);
4871 /* Traverse all the statements in BB marking used names and looking
4872 for statements that may infer assertions for their used operands. */
4873 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
4879 stmt
= gsi_stmt (si
);
4881 if (is_gimple_debug (stmt
))
4884 /* See if we can derive an assertion for any of STMT's operands. */
4885 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
4888 enum tree_code comp_code
;
4890 /* Mark OP in our live bitmap. */
4891 SET_BIT (live
, SSA_NAME_VERSION (op
));
4893 /* If OP is used in such a way that we can infer a value
4894 range for it, and we don't find a previous assertion for
4895 it, create a new assertion location node for OP. */
4896 if (infer_value_range (stmt
, op
, &comp_code
, &value
))
4898 /* If we are able to infer a nonzero value range for OP,
4899 then walk backwards through the use-def chain to see if OP
4900 was set via a typecast.
4902 If so, then we can also infer a nonzero value range
4903 for the operand of the NOP_EXPR. */
4904 if (comp_code
== NE_EXPR
&& integer_zerop (value
))
4907 gimple def_stmt
= SSA_NAME_DEF_STMT (t
);
4909 while (is_gimple_assign (def_stmt
)
4910 && gimple_assign_rhs_code (def_stmt
) == NOP_EXPR
4912 (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
4914 (TREE_TYPE (gimple_assign_rhs1 (def_stmt
))))
4916 t
= gimple_assign_rhs1 (def_stmt
);
4917 def_stmt
= SSA_NAME_DEF_STMT (t
);
4919 /* Note we want to register the assert for the
4920 operand of the NOP_EXPR after SI, not after the
4922 if (! has_single_use (t
))
4924 register_new_assert_for (t
, t
, comp_code
, value
,
4931 /* If OP is used only once, namely in this STMT, don't
4932 bother creating an ASSERT_EXPR for it. Such an
4933 ASSERT_EXPR would do nothing but increase compile time. */
4934 if (!has_single_use (op
))
4936 register_new_assert_for (op
, op
, comp_code
, value
,
4944 /* Traverse all PHI nodes in BB marking used operands. */
4945 for (si
= gsi_start_phis (bb
); !gsi_end_p(si
); gsi_next (&si
))
4947 use_operand_p arg_p
;
4949 phi
= gsi_stmt (si
);
4951 FOR_EACH_PHI_ARG (arg_p
, phi
, i
, SSA_OP_USE
)
4953 tree arg
= USE_FROM_PTR (arg_p
);
4954 if (TREE_CODE (arg
) == SSA_NAME
)
4955 SET_BIT (live
, SSA_NAME_VERSION (arg
));
4962 /* Do an RPO walk over the function computing SSA name liveness
4963 on-the-fly and deciding on assert expressions to insert.
4964 Returns true if there are assert expressions to be inserted. */
4967 find_assert_locations (void)
4969 int *rpo
= XCNEWVEC (int, last_basic_block
+ NUM_FIXED_BLOCKS
);
4970 int *bb_rpo
= XCNEWVEC (int, last_basic_block
+ NUM_FIXED_BLOCKS
);
4971 int *last_rpo
= XCNEWVEC (int, last_basic_block
+ NUM_FIXED_BLOCKS
);
4975 live
= XCNEWVEC (sbitmap
, last_basic_block
+ NUM_FIXED_BLOCKS
);
4976 rpo_cnt
= pre_and_rev_post_order_compute (NULL
, rpo
, false);
4977 for (i
= 0; i
< rpo_cnt
; ++i
)
4980 need_asserts
= false;
4981 for (i
= rpo_cnt
-1; i
>= 0; --i
)
4983 basic_block bb
= BASIC_BLOCK (rpo
[i
]);
4989 live
[rpo
[i
]] = sbitmap_alloc (num_ssa_names
);
4990 sbitmap_zero (live
[rpo
[i
]]);
4993 /* Process BB and update the live information with uses in
4995 need_asserts
|= find_assert_locations_1 (bb
, live
[rpo
[i
]]);
4997 /* Merge liveness into the predecessor blocks and free it. */
4998 if (!sbitmap_empty_p (live
[rpo
[i
]]))
5001 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
5003 int pred
= e
->src
->index
;
5004 if (e
->flags
& EDGE_DFS_BACK
)
5009 live
[pred
] = sbitmap_alloc (num_ssa_names
);
5010 sbitmap_zero (live
[pred
]);
5012 sbitmap_a_or_b (live
[pred
], live
[pred
], live
[rpo
[i
]]);
5014 if (bb_rpo
[pred
] < pred_rpo
)
5015 pred_rpo
= bb_rpo
[pred
];
5018 /* Record the RPO number of the last visited block that needs
5019 live information from this block. */
5020 last_rpo
[rpo
[i
]] = pred_rpo
;
5024 sbitmap_free (live
[rpo
[i
]]);
5025 live
[rpo
[i
]] = NULL
;
5028 /* We can free all successors live bitmaps if all their
5029 predecessors have been visited already. */
5030 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5031 if (last_rpo
[e
->dest
->index
] == i
5032 && live
[e
->dest
->index
])
5034 sbitmap_free (live
[e
->dest
->index
]);
5035 live
[e
->dest
->index
] = NULL
;
5040 XDELETEVEC (bb_rpo
);
5041 XDELETEVEC (last_rpo
);
5042 for (i
= 0; i
< last_basic_block
+ NUM_FIXED_BLOCKS
; ++i
)
5044 sbitmap_free (live
[i
]);
5047 return need_asserts
;
5050 /* Create an ASSERT_EXPR for NAME and insert it in the location
5051 indicated by LOC. Return true if we made any edge insertions. */
5054 process_assert_insertions_for (tree name
, assert_locus_t loc
)
5056 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5063 /* If we have X <=> X do not insert an assert expr for that. */
5064 if (loc
->expr
== loc
->val
)
5067 cond
= build2 (loc
->comp_code
, boolean_type_node
, loc
->expr
, loc
->val
);
5068 assert_stmt
= build_assert_expr_for (cond
, name
);
5071 /* We have been asked to insert the assertion on an edge. This
5072 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5073 gcc_checking_assert (gimple_code (gsi_stmt (loc
->si
)) == GIMPLE_COND
5074 || (gimple_code (gsi_stmt (loc
->si
))
5077 gsi_insert_on_edge (loc
->e
, assert_stmt
);
5081 /* Otherwise, we can insert right after LOC->SI iff the
5082 statement must not be the last statement in the block. */
5083 stmt
= gsi_stmt (loc
->si
);
5084 if (!stmt_ends_bb_p (stmt
))
5086 gsi_insert_after (&loc
->si
, assert_stmt
, GSI_SAME_STMT
);
5090 /* If STMT must be the last statement in BB, we can only insert new
5091 assertions on the non-abnormal edge out of BB. Note that since
5092 STMT is not control flow, there may only be one non-abnormal edge
5094 FOR_EACH_EDGE (e
, ei
, loc
->bb
->succs
)
5095 if (!(e
->flags
& EDGE_ABNORMAL
))
5097 gsi_insert_on_edge (e
, assert_stmt
);
5105 /* Process all the insertions registered for every name N_i registered
5106 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5107 found in ASSERTS_FOR[i]. */
5110 process_assert_insertions (void)
5114 bool update_edges_p
= false;
5115 int num_asserts
= 0;
5117 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5118 dump_all_asserts (dump_file
);
5120 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
5122 assert_locus_t loc
= asserts_for
[i
];
5127 assert_locus_t next
= loc
->next
;
5128 update_edges_p
|= process_assert_insertions_for (ssa_name (i
), loc
);
5136 gsi_commit_edge_inserts ();
5138 statistics_counter_event (cfun
, "Number of ASSERT_EXPR expressions inserted",
5143 /* Traverse the flowgraph looking for conditional jumps to insert range
5144 expressions. These range expressions are meant to provide information
5145 to optimizations that need to reason in terms of value ranges. They
5146 will not be expanded into RTL. For instance, given:
5155 this pass will transform the code into:
5161 x = ASSERT_EXPR <x, x < y>
5166 y = ASSERT_EXPR <y, x <= y>
5170 The idea is that once copy and constant propagation have run, other
5171 optimizations will be able to determine what ranges of values can 'x'
5172 take in different paths of the code, simply by checking the reaching
5173 definition of 'x'. */
5176 insert_range_assertions (void)
5178 need_assert_for
= BITMAP_ALLOC (NULL
);
5179 asserts_for
= XCNEWVEC (assert_locus_t
, num_ssa_names
);
5181 calculate_dominance_info (CDI_DOMINATORS
);
5183 if (find_assert_locations ())
5185 process_assert_insertions ();
5186 update_ssa (TODO_update_ssa_no_phi
);
5189 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5191 fprintf (dump_file
, "\nSSA form after inserting ASSERT_EXPRs\n");
5192 dump_function_to_file (current_function_decl
, dump_file
, dump_flags
);
5196 BITMAP_FREE (need_assert_for
);
5199 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5200 and "struct" hacks. If VRP can determine that the
5201 array subscript is a constant, check if it is outside valid
5202 range. If the array subscript is a RANGE, warn if it is
5203 non-overlapping with valid range.
5204 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5207 check_array_ref (location_t location
, tree ref
, bool ignore_off_by_one
)
5209 value_range_t
* vr
= NULL
;
5210 tree low_sub
, up_sub
;
5211 tree low_bound
, up_bound
, up_bound_p1
;
5214 if (TREE_NO_WARNING (ref
))
5217 low_sub
= up_sub
= TREE_OPERAND (ref
, 1);
5218 up_bound
= array_ref_up_bound (ref
);
5220 /* Can not check flexible arrays. */
5222 || TREE_CODE (up_bound
) != INTEGER_CST
)
5225 /* Accesses to trailing arrays via pointers may access storage
5226 beyond the types array bounds. */
5227 base
= get_base_address (ref
);
5228 if (base
&& TREE_CODE (base
) == MEM_REF
)
5230 tree cref
, next
= NULL_TREE
;
5232 if (TREE_CODE (TREE_OPERAND (ref
, 0)) != COMPONENT_REF
)
5235 cref
= TREE_OPERAND (ref
, 0);
5236 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref
, 0))) == RECORD_TYPE
)
5237 for (next
= DECL_CHAIN (TREE_OPERAND (cref
, 1));
5238 next
&& TREE_CODE (next
) != FIELD_DECL
;
5239 next
= DECL_CHAIN (next
))
5242 /* If this is the last field in a struct type or a field in a
5243 union type do not warn. */
5248 low_bound
= array_ref_low_bound (ref
);
5249 up_bound_p1
= int_const_binop (PLUS_EXPR
, up_bound
, integer_one_node
, 0);
5251 if (TREE_CODE (low_sub
) == SSA_NAME
)
5253 vr
= get_value_range (low_sub
);
5254 if (vr
->type
== VR_RANGE
|| vr
->type
== VR_ANTI_RANGE
)
5256 low_sub
= vr
->type
== VR_RANGE
? vr
->max
: vr
->min
;
5257 up_sub
= vr
->type
== VR_RANGE
? vr
->min
: vr
->max
;
5261 if (vr
&& vr
->type
== VR_ANTI_RANGE
)
5263 if (TREE_CODE (up_sub
) == INTEGER_CST
5264 && tree_int_cst_lt (up_bound
, up_sub
)
5265 && TREE_CODE (low_sub
) == INTEGER_CST
5266 && tree_int_cst_lt (low_sub
, low_bound
))
5268 warning_at (location
, OPT_Warray_bounds
,
5269 "array subscript is outside array bounds");
5270 TREE_NO_WARNING (ref
) = 1;
5273 else if (TREE_CODE (up_sub
) == INTEGER_CST
5274 && (ignore_off_by_one
5275 ? (tree_int_cst_lt (up_bound
, up_sub
)
5276 && !tree_int_cst_equal (up_bound_p1
, up_sub
))
5277 : (tree_int_cst_lt (up_bound
, up_sub
)
5278 || tree_int_cst_equal (up_bound_p1
, up_sub
))))
5280 warning_at (location
, OPT_Warray_bounds
,
5281 "array subscript is above array bounds");
5282 TREE_NO_WARNING (ref
) = 1;
5284 else if (TREE_CODE (low_sub
) == INTEGER_CST
5285 && tree_int_cst_lt (low_sub
, low_bound
))
5287 warning_at (location
, OPT_Warray_bounds
,
5288 "array subscript is below array bounds");
5289 TREE_NO_WARNING (ref
) = 1;
5293 /* Searches if the expr T, located at LOCATION computes
5294 address of an ARRAY_REF, and call check_array_ref on it. */
5297 search_for_addr_array (tree t
, location_t location
)
5299 while (TREE_CODE (t
) == SSA_NAME
)
5301 gimple g
= SSA_NAME_DEF_STMT (t
);
5303 if (gimple_code (g
) != GIMPLE_ASSIGN
)
5306 if (get_gimple_rhs_class (gimple_assign_rhs_code (g
))
5307 != GIMPLE_SINGLE_RHS
)
5310 t
= gimple_assign_rhs1 (g
);
5314 /* We are only interested in addresses of ARRAY_REF's. */
5315 if (TREE_CODE (t
) != ADDR_EXPR
)
5318 /* Check each ARRAY_REFs in the reference chain. */
5321 if (TREE_CODE (t
) == ARRAY_REF
)
5322 check_array_ref (location
, t
, true /*ignore_off_by_one*/);
5324 t
= TREE_OPERAND (t
, 0);
5326 while (handled_component_p (t
));
5328 if (TREE_CODE (t
) == MEM_REF
5329 && TREE_CODE (TREE_OPERAND (t
, 0)) == ADDR_EXPR
5330 && !TREE_NO_WARNING (t
))
5332 tree tem
= TREE_OPERAND (TREE_OPERAND (t
, 0), 0);
5333 tree low_bound
, up_bound
, el_sz
;
5335 if (TREE_CODE (TREE_TYPE (tem
)) != ARRAY_TYPE
5336 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem
))) == ARRAY_TYPE
5337 || !TYPE_DOMAIN (TREE_TYPE (tem
)))
5340 low_bound
= TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
5341 up_bound
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
5342 el_sz
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem
)));
5344 || TREE_CODE (low_bound
) != INTEGER_CST
5346 || TREE_CODE (up_bound
) != INTEGER_CST
5348 || TREE_CODE (el_sz
) != INTEGER_CST
)
5351 idx
= mem_ref_offset (t
);
5352 idx
= double_int_sdiv (idx
, tree_to_double_int (el_sz
), TRUNC_DIV_EXPR
);
5353 if (double_int_scmp (idx
, double_int_zero
) < 0)
5355 warning_at (location
, OPT_Warray_bounds
,
5356 "array subscript is below array bounds");
5357 TREE_NO_WARNING (t
) = 1;
5359 else if (double_int_scmp (idx
,
5362 (tree_to_double_int (up_bound
),
5364 (tree_to_double_int (low_bound
))),
5365 double_int_one
)) > 0)
5367 warning_at (location
, OPT_Warray_bounds
,
5368 "array subscript is above array bounds");
5369 TREE_NO_WARNING (t
) = 1;
5374 /* walk_tree() callback that checks if *TP is
5375 an ARRAY_REF inside an ADDR_EXPR (in which an array
5376 subscript one outside the valid range is allowed). Call
5377 check_array_ref for each ARRAY_REF found. The location is
5381 check_array_bounds (tree
*tp
, int *walk_subtree
, void *data
)
5384 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
5385 location_t location
;
5387 if (EXPR_HAS_LOCATION (t
))
5388 location
= EXPR_LOCATION (t
);
5391 location_t
*locp
= (location_t
*) wi
->info
;
5395 *walk_subtree
= TRUE
;
5397 if (TREE_CODE (t
) == ARRAY_REF
)
5398 check_array_ref (location
, t
, false /*ignore_off_by_one*/);
5400 if (TREE_CODE (t
) == MEM_REF
5401 || (TREE_CODE (t
) == RETURN_EXPR
&& TREE_OPERAND (t
, 0)))
5402 search_for_addr_array (TREE_OPERAND (t
, 0), location
);
5404 if (TREE_CODE (t
) == ADDR_EXPR
)
5405 *walk_subtree
= FALSE
;
5410 /* Walk over all statements of all reachable BBs and call check_array_bounds
5414 check_all_array_refs (void)
5417 gimple_stmt_iterator si
;
5423 bool executable
= false;
5425 /* Skip blocks that were found to be unreachable. */
5426 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
5427 executable
|= !!(e
->flags
& EDGE_EXECUTABLE
);
5431 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
5433 gimple stmt
= gsi_stmt (si
);
5434 struct walk_stmt_info wi
;
5435 if (!gimple_has_location (stmt
))
5438 if (is_gimple_call (stmt
))
5441 size_t n
= gimple_call_num_args (stmt
);
5442 for (i
= 0; i
< n
; i
++)
5444 tree arg
= gimple_call_arg (stmt
, i
);
5445 search_for_addr_array (arg
, gimple_location (stmt
));
5450 memset (&wi
, 0, sizeof (wi
));
5451 wi
.info
= CONST_CAST (void *, (const void *)
5452 gimple_location_ptr (stmt
));
5454 walk_gimple_op (gsi_stmt (si
),
5462 /* Convert range assertion expressions into the implied copies and
5463 copy propagate away the copies. Doing the trivial copy propagation
5464 here avoids the need to run the full copy propagation pass after
5467 FIXME, this will eventually lead to copy propagation removing the
5468 names that had useful range information attached to them. For
5469 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5470 then N_i will have the range [3, +INF].
5472 However, by converting the assertion into the implied copy
5473 operation N_i = N_j, we will then copy-propagate N_j into the uses
5474 of N_i and lose the range information. We may want to hold on to
5475 ASSERT_EXPRs a little while longer as the ranges could be used in
5476 things like jump threading.
5478 The problem with keeping ASSERT_EXPRs around is that passes after
5479 VRP need to handle them appropriately.
5481 Another approach would be to make the range information a first
5482 class property of the SSA_NAME so that it can be queried from
5483 any pass. This is made somewhat more complex by the need for
5484 multiple ranges to be associated with one SSA_NAME. */
5487 remove_range_assertions (void)
5490 gimple_stmt_iterator si
;
5492 /* Note that the BSI iterator bump happens at the bottom of the
5493 loop and no bump is necessary if we're removing the statement
5494 referenced by the current BSI. */
5496 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
);)
5498 gimple stmt
= gsi_stmt (si
);
5501 if (is_gimple_assign (stmt
)
5502 && gimple_assign_rhs_code (stmt
) == ASSERT_EXPR
)
5504 tree rhs
= gimple_assign_rhs1 (stmt
);
5506 tree cond
= fold (ASSERT_EXPR_COND (rhs
));
5507 use_operand_p use_p
;
5508 imm_use_iterator iter
;
5510 gcc_assert (cond
!= boolean_false_node
);
5512 /* Propagate the RHS into every use of the LHS. */
5513 var
= ASSERT_EXPR_VAR (rhs
);
5514 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
,
5515 gimple_assign_lhs (stmt
))
5516 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
5518 SET_USE (use_p
, var
);
5519 gcc_assert (TREE_CODE (var
) == SSA_NAME
);
5522 /* And finally, remove the copy, it is not needed. */
5523 gsi_remove (&si
, true);
5524 release_defs (stmt
);
5532 /* Return true if STMT is interesting for VRP. */
5535 stmt_interesting_for_vrp (gimple stmt
)
5537 if (gimple_code (stmt
) == GIMPLE_PHI
5538 && is_gimple_reg (gimple_phi_result (stmt
))
5539 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt
)))
5540 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt
)))))
5542 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
5544 tree lhs
= gimple_get_lhs (stmt
);
5546 /* In general, assignments with virtual operands are not useful
5547 for deriving ranges, with the obvious exception of calls to
5548 builtin functions. */
5549 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
5550 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
5551 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
5552 && ((is_gimple_call (stmt
)
5553 && gimple_call_fndecl (stmt
) != NULL_TREE
5554 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt
)))
5555 || !gimple_vuse (stmt
)))
5558 else if (gimple_code (stmt
) == GIMPLE_COND
5559 || gimple_code (stmt
) == GIMPLE_SWITCH
)
5566 /* Initialize local data structures for VRP. */
5569 vrp_initialize (void)
5573 vr_value
= XCNEWVEC (value_range_t
*, num_ssa_names
);
5574 vr_phi_edge_counts
= XCNEWVEC (int, num_ssa_names
);
5578 gimple_stmt_iterator si
;
5580 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
5582 gimple phi
= gsi_stmt (si
);
5583 if (!stmt_interesting_for_vrp (phi
))
5585 tree lhs
= PHI_RESULT (phi
);
5586 set_value_range_to_varying (get_value_range (lhs
));
5587 prop_set_simulate_again (phi
, false);
5590 prop_set_simulate_again (phi
, true);
5593 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
5595 gimple stmt
= gsi_stmt (si
);
5597 /* If the statement is a control insn, then we do not
5598 want to avoid simulating the statement once. Failure
5599 to do so means that those edges will never get added. */
5600 if (stmt_ends_bb_p (stmt
))
5601 prop_set_simulate_again (stmt
, true);
5602 else if (!stmt_interesting_for_vrp (stmt
))
5606 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, i
, SSA_OP_DEF
)
5607 set_value_range_to_varying (get_value_range (def
));
5608 prop_set_simulate_again (stmt
, false);
5611 prop_set_simulate_again (stmt
, true);
5617 /* Visit assignment STMT. If it produces an interesting range, record
5618 the SSA name in *OUTPUT_P. */
5620 static enum ssa_prop_result
5621 vrp_visit_assignment_or_call (gimple stmt
, tree
*output_p
)
5625 enum gimple_code code
= gimple_code (stmt
);
5626 lhs
= gimple_get_lhs (stmt
);
5628 /* We only keep track of ranges in integral and pointer types. */
5629 if (TREE_CODE (lhs
) == SSA_NAME
5630 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
5631 /* It is valid to have NULL MIN/MAX values on a type. See
5632 build_range_type. */
5633 && TYPE_MIN_VALUE (TREE_TYPE (lhs
))
5634 && TYPE_MAX_VALUE (TREE_TYPE (lhs
)))
5635 || POINTER_TYPE_P (TREE_TYPE (lhs
))))
5637 value_range_t new_vr
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
5639 if (code
== GIMPLE_CALL
)
5640 extract_range_basic (&new_vr
, stmt
);
5642 extract_range_from_assignment (&new_vr
, stmt
);
5644 if (update_value_range (lhs
, &new_vr
))
5648 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5650 fprintf (dump_file
, "Found new range for ");
5651 print_generic_expr (dump_file
, lhs
, 0);
5652 fprintf (dump_file
, ": ");
5653 dump_value_range (dump_file
, &new_vr
);
5654 fprintf (dump_file
, "\n\n");
5657 if (new_vr
.type
== VR_VARYING
)
5658 return SSA_PROP_VARYING
;
5660 return SSA_PROP_INTERESTING
;
5663 return SSA_PROP_NOT_INTERESTING
;
5666 /* Every other statement produces no useful ranges. */
5667 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_DEF
)
5668 set_value_range_to_varying (get_value_range (def
));
5670 return SSA_PROP_VARYING
;
5673 /* Helper that gets the value range of the SSA_NAME with version I
5674 or a symbolic range containing the SSA_NAME only if the value range
5675 is varying or undefined. */
5677 static inline value_range_t
5678 get_vr_for_comparison (int i
)
5680 value_range_t vr
= *(vr_value
[i
]);
5682 /* If name N_i does not have a valid range, use N_i as its own
5683 range. This allows us to compare against names that may
5684 have N_i in their ranges. */
5685 if (vr
.type
== VR_VARYING
|| vr
.type
== VR_UNDEFINED
)
5688 vr
.min
= ssa_name (i
);
5689 vr
.max
= ssa_name (i
);
5695 /* Compare all the value ranges for names equivalent to VAR with VAL
5696 using comparison code COMP. Return the same value returned by
5697 compare_range_with_value, including the setting of
5698 *STRICT_OVERFLOW_P. */
5701 compare_name_with_value (enum tree_code comp
, tree var
, tree val
,
5702 bool *strict_overflow_p
)
5708 int used_strict_overflow
;
5710 value_range_t equiv_vr
;
5712 /* Get the set of equivalences for VAR. */
5713 e
= get_value_range (var
)->equiv
;
5715 /* Start at -1. Set it to 0 if we do a comparison without relying
5716 on overflow, or 1 if all comparisons rely on overflow. */
5717 used_strict_overflow
= -1;
5719 /* Compare vars' value range with val. */
5720 equiv_vr
= get_vr_for_comparison (SSA_NAME_VERSION (var
));
5722 retval
= compare_range_with_value (comp
, &equiv_vr
, val
, &sop
);
5724 used_strict_overflow
= sop
? 1 : 0;
5726 /* If the equiv set is empty we have done all work we need to do. */
5730 && used_strict_overflow
> 0)
5731 *strict_overflow_p
= true;
5735 EXECUTE_IF_SET_IN_BITMAP (e
, 0, i
, bi
)
5737 equiv_vr
= get_vr_for_comparison (i
);
5739 t
= compare_range_with_value (comp
, &equiv_vr
, val
, &sop
);
5742 /* If we get different answers from different members
5743 of the equivalence set this check must be in a dead
5744 code region. Folding it to a trap representation
5745 would be correct here. For now just return don't-know. */
5755 used_strict_overflow
= 0;
5756 else if (used_strict_overflow
< 0)
5757 used_strict_overflow
= 1;
5762 && used_strict_overflow
> 0)
5763 *strict_overflow_p
= true;
5769 /* Given a comparison code COMP and names N1 and N2, compare all the
5770 ranges equivalent to N1 against all the ranges equivalent to N2
5771 to determine the value of N1 COMP N2. Return the same value
5772 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5773 whether we relied on an overflow infinity in the comparison. */
5777 compare_names (enum tree_code comp
, tree n1
, tree n2
,
5778 bool *strict_overflow_p
)
5782 bitmap_iterator bi1
, bi2
;
5784 int used_strict_overflow
;
5785 static bitmap_obstack
*s_obstack
= NULL
;
5786 static bitmap s_e1
= NULL
, s_e2
= NULL
;
5788 /* Compare the ranges of every name equivalent to N1 against the
5789 ranges of every name equivalent to N2. */
5790 e1
= get_value_range (n1
)->equiv
;
5791 e2
= get_value_range (n2
)->equiv
;
5793 /* Use the fake bitmaps if e1 or e2 are not available. */
5794 if (s_obstack
== NULL
)
5796 s_obstack
= XNEW (bitmap_obstack
);
5797 bitmap_obstack_initialize (s_obstack
);
5798 s_e1
= BITMAP_ALLOC (s_obstack
);
5799 s_e2
= BITMAP_ALLOC (s_obstack
);
5806 /* Add N1 and N2 to their own set of equivalences to avoid
5807 duplicating the body of the loop just to check N1 and N2
5809 bitmap_set_bit (e1
, SSA_NAME_VERSION (n1
));
5810 bitmap_set_bit (e2
, SSA_NAME_VERSION (n2
));
5812 /* If the equivalence sets have a common intersection, then the two
5813 names can be compared without checking their ranges. */
5814 if (bitmap_intersect_p (e1
, e2
))
5816 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
5817 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
5819 return (comp
== EQ_EXPR
|| comp
== GE_EXPR
|| comp
== LE_EXPR
)
5821 : boolean_false_node
;
5824 /* Start at -1. Set it to 0 if we do a comparison without relying
5825 on overflow, or 1 if all comparisons rely on overflow. */
5826 used_strict_overflow
= -1;
5828 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5829 N2 to their own set of equivalences to avoid duplicating the body
5830 of the loop just to check N1 and N2 ranges. */
5831 EXECUTE_IF_SET_IN_BITMAP (e1
, 0, i1
, bi1
)
5833 value_range_t vr1
= get_vr_for_comparison (i1
);
5835 t
= retval
= NULL_TREE
;
5836 EXECUTE_IF_SET_IN_BITMAP (e2
, 0, i2
, bi2
)
5840 value_range_t vr2
= get_vr_for_comparison (i2
);
5842 t
= compare_ranges (comp
, &vr1
, &vr2
, &sop
);
5845 /* If we get different answers from different members
5846 of the equivalence set this check must be in a dead
5847 code region. Folding it to a trap representation
5848 would be correct here. For now just return don't-know. */
5852 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
5853 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
5859 used_strict_overflow
= 0;
5860 else if (used_strict_overflow
< 0)
5861 used_strict_overflow
= 1;
5867 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
5868 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
5869 if (used_strict_overflow
> 0)
5870 *strict_overflow_p
= true;
5875 /* None of the equivalent ranges are useful in computing this
5877 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
5878 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
5882 /* Helper function for vrp_evaluate_conditional_warnv. */
5885 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code
,
5887 bool * strict_overflow_p
)
5889 value_range_t
*vr0
, *vr1
;
5891 vr0
= (TREE_CODE (op0
) == SSA_NAME
) ? get_value_range (op0
) : NULL
;
5892 vr1
= (TREE_CODE (op1
) == SSA_NAME
) ? get_value_range (op1
) : NULL
;
5895 return compare_ranges (code
, vr0
, vr1
, strict_overflow_p
);
5896 else if (vr0
&& vr1
== NULL
)
5897 return compare_range_with_value (code
, vr0
, op1
, strict_overflow_p
);
5898 else if (vr0
== NULL
&& vr1
)
5899 return (compare_range_with_value
5900 (swap_tree_comparison (code
), vr1
, op0
, strict_overflow_p
));
5904 /* Helper function for vrp_evaluate_conditional_warnv. */
5907 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code
, tree op0
,
5908 tree op1
, bool use_equiv_p
,
5909 bool *strict_overflow_p
, bool *only_ranges
)
5913 *only_ranges
= true;
5915 /* We only deal with integral and pointer types. */
5916 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0
))
5917 && !POINTER_TYPE_P (TREE_TYPE (op0
)))
5923 && (ret
= vrp_evaluate_conditional_warnv_with_ops_using_ranges
5924 (code
, op0
, op1
, strict_overflow_p
)))
5926 *only_ranges
= false;
5927 if (TREE_CODE (op0
) == SSA_NAME
&& TREE_CODE (op1
) == SSA_NAME
)
5928 return compare_names (code
, op0
, op1
, strict_overflow_p
);
5929 else if (TREE_CODE (op0
) == SSA_NAME
)
5930 return compare_name_with_value (code
, op0
, op1
, strict_overflow_p
);
5931 else if (TREE_CODE (op1
) == SSA_NAME
)
5932 return (compare_name_with_value
5933 (swap_tree_comparison (code
), op1
, op0
, strict_overflow_p
));
5936 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code
, op0
, op1
,
5941 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5942 information. Return NULL if the conditional can not be evaluated.
5943 The ranges of all the names equivalent with the operands in COND
5944 will be used when trying to compute the value. If the result is
5945 based on undefined signed overflow, issue a warning if
5949 vrp_evaluate_conditional (enum tree_code code
, tree op0
, tree op1
, gimple stmt
)
5955 /* Some passes and foldings leak constants with overflow flag set
5956 into the IL. Avoid doing wrong things with these and bail out. */
5957 if ((TREE_CODE (op0
) == INTEGER_CST
5958 && TREE_OVERFLOW (op0
))
5959 || (TREE_CODE (op1
) == INTEGER_CST
5960 && TREE_OVERFLOW (op1
)))
5964 ret
= vrp_evaluate_conditional_warnv_with_ops (code
, op0
, op1
, true, &sop
,
5969 enum warn_strict_overflow_code wc
;
5970 const char* warnmsg
;
5972 if (is_gimple_min_invariant (ret
))
5974 wc
= WARN_STRICT_OVERFLOW_CONDITIONAL
;
5975 warnmsg
= G_("assuming signed overflow does not occur when "
5976 "simplifying conditional to constant");
5980 wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
5981 warnmsg
= G_("assuming signed overflow does not occur when "
5982 "simplifying conditional");
5985 if (issue_strict_overflow_warning (wc
))
5987 location_t location
;
5989 if (!gimple_has_location (stmt
))
5990 location
= input_location
;
5992 location
= gimple_location (stmt
);
5993 warning_at (location
, OPT_Wstrict_overflow
, "%s", warnmsg
);
5997 if (warn_type_limits
5998 && ret
&& only_ranges
5999 && TREE_CODE_CLASS (code
) == tcc_comparison
6000 && TREE_CODE (op0
) == SSA_NAME
)
6002 /* If the comparison is being folded and the operand on the LHS
6003 is being compared against a constant value that is outside of
6004 the natural range of OP0's type, then the predicate will
6005 always fold regardless of the value of OP0. If -Wtype-limits
6006 was specified, emit a warning. */
6007 tree type
= TREE_TYPE (op0
);
6008 value_range_t
*vr0
= get_value_range (op0
);
6010 if (vr0
->type
!= VR_VARYING
6011 && INTEGRAL_TYPE_P (type
)
6012 && vrp_val_is_min (vr0
->min
)
6013 && vrp_val_is_max (vr0
->max
)
6014 && is_gimple_min_invariant (op1
))
6016 location_t location
;
6018 if (!gimple_has_location (stmt
))
6019 location
= input_location
;
6021 location
= gimple_location (stmt
);
6023 warning_at (location
, OPT_Wtype_limits
,
6025 ? G_("comparison always false "
6026 "due to limited range of data type")
6027 : G_("comparison always true "
6028 "due to limited range of data type"));
6036 /* Visit conditional statement STMT. If we can determine which edge
6037 will be taken out of STMT's basic block, record it in
6038 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6039 SSA_PROP_VARYING. */
6041 static enum ssa_prop_result
6042 vrp_visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
6047 *taken_edge_p
= NULL
;
6049 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6054 fprintf (dump_file
, "\nVisiting conditional with predicate: ");
6055 print_gimple_stmt (dump_file
, stmt
, 0, 0);
6056 fprintf (dump_file
, "\nWith known ranges\n");
6058 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, i
, SSA_OP_USE
)
6060 fprintf (dump_file
, "\t");
6061 print_generic_expr (dump_file
, use
, 0);
6062 fprintf (dump_file
, ": ");
6063 dump_value_range (dump_file
, vr_value
[SSA_NAME_VERSION (use
)]);
6066 fprintf (dump_file
, "\n");
6069 /* Compute the value of the predicate COND by checking the known
6070 ranges of each of its operands.
6072 Note that we cannot evaluate all the equivalent ranges here
6073 because those ranges may not yet be final and with the current
6074 propagation strategy, we cannot determine when the value ranges
6075 of the names in the equivalence set have changed.
6077 For instance, given the following code fragment
6081 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6085 Assume that on the first visit to i_14, i_5 has the temporary
6086 range [8, 8] because the second argument to the PHI function is
6087 not yet executable. We derive the range ~[0, 0] for i_14 and the
6088 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6089 the first time, since i_14 is equivalent to the range [8, 8], we
6090 determine that the predicate is always false.
6092 On the next round of propagation, i_13 is determined to be
6093 VARYING, which causes i_5 to drop down to VARYING. So, another
6094 visit to i_14 is scheduled. In this second visit, we compute the
6095 exact same range and equivalence set for i_14, namely ~[0, 0] and
6096 { i_5 }. But we did not have the previous range for i_5
6097 registered, so vrp_visit_assignment thinks that the range for
6098 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6099 is not visited again, which stops propagation from visiting
6100 statements in the THEN clause of that if().
6102 To properly fix this we would need to keep the previous range
6103 value for the names in the equivalence set. This way we would've
6104 discovered that from one visit to the other i_5 changed from
6105 range [8, 8] to VR_VARYING.
6107 However, fixing this apparent limitation may not be worth the
6108 additional checking. Testing on several code bases (GCC, DLV,
6109 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6110 4 more predicates folded in SPEC. */
6113 val
= vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt
),
6114 gimple_cond_lhs (stmt
),
6115 gimple_cond_rhs (stmt
),
6120 *taken_edge_p
= find_taken_edge (gimple_bb (stmt
), val
);
6123 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6125 "\nIgnoring predicate evaluation because "
6126 "it assumes that signed overflow is undefined");
6131 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6133 fprintf (dump_file
, "\nPredicate evaluates to: ");
6134 if (val
== NULL_TREE
)
6135 fprintf (dump_file
, "DON'T KNOW\n");
6137 print_generic_stmt (dump_file
, val
, 0);
6140 return (*taken_edge_p
) ? SSA_PROP_INTERESTING
: SSA_PROP_VARYING
;
6143 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6144 that includes the value VAL. The search is restricted to the range
6145 [START_IDX, n - 1] where n is the size of VEC.
6147 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6150 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6151 it is placed in IDX and false is returned.
6153 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6157 find_case_label_index (gimple stmt
, size_t start_idx
, tree val
, size_t *idx
)
6159 size_t n
= gimple_switch_num_labels (stmt
);
6162 /* Find case label for minimum of the value range or the next one.
6163 At each iteration we are searching in [low, high - 1]. */
6165 for (low
= start_idx
, high
= n
; high
!= low
; )
6169 /* Note that i != high, so we never ask for n. */
6170 size_t i
= (high
+ low
) / 2;
6171 t
= gimple_switch_label (stmt
, i
);
6173 /* Cache the result of comparing CASE_LOW and val. */
6174 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
6178 /* Ranges cannot be empty. */
6187 if (CASE_HIGH (t
) != NULL
6188 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
6200 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6201 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6202 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6203 then MAX_IDX < MIN_IDX.
6204 Returns true if the default label is not needed. */
6207 find_case_label_range (gimple stmt
, tree min
, tree max
, size_t *min_idx
,
6211 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
6212 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
6216 && max_take_default
)
6218 /* Only the default case label reached.
6219 Return an empty range. */
6226 bool take_default
= min_take_default
|| max_take_default
;
6230 if (max_take_default
)
6233 /* If the case label range is continuous, we do not need
6234 the default case label. Verify that. */
6235 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
6236 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
6237 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
6238 for (k
= i
+ 1; k
<= j
; ++k
)
6240 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
6241 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
, 0)))
6243 take_default
= true;
6247 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
6248 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
6253 return !take_default
;
6257 /* Visit switch statement STMT. If we can determine which edge
6258 will be taken out of STMT's basic block, record it in
6259 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6260 SSA_PROP_VARYING. */
6262 static enum ssa_prop_result
6263 vrp_visit_switch_stmt (gimple stmt
, edge
*taken_edge_p
)
6267 size_t i
= 0, j
= 0;
6270 *taken_edge_p
= NULL
;
6271 op
= gimple_switch_index (stmt
);
6272 if (TREE_CODE (op
) != SSA_NAME
)
6273 return SSA_PROP_VARYING
;
6275 vr
= get_value_range (op
);
6276 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6278 fprintf (dump_file
, "\nVisiting switch expression with operand ");
6279 print_generic_expr (dump_file
, op
, 0);
6280 fprintf (dump_file
, " with known range ");
6281 dump_value_range (dump_file
, vr
);
6282 fprintf (dump_file
, "\n");
6285 if (vr
->type
!= VR_RANGE
6286 || symbolic_range_p (vr
))
6287 return SSA_PROP_VARYING
;
6289 /* Find the single edge that is taken from the switch expression. */
6290 take_default
= !find_case_label_range (stmt
, vr
->min
, vr
->max
, &i
, &j
);
6292 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6296 gcc_assert (take_default
);
6297 val
= gimple_switch_default_label (stmt
);
6301 /* Check if labels with index i to j and maybe the default label
6302 are all reaching the same label. */
6304 val
= gimple_switch_label (stmt
, i
);
6306 && CASE_LABEL (gimple_switch_default_label (stmt
))
6307 != CASE_LABEL (val
))
6309 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6310 fprintf (dump_file
, " not a single destination for this "
6312 return SSA_PROP_VARYING
;
6314 for (++i
; i
<= j
; ++i
)
6316 if (CASE_LABEL (gimple_switch_label (stmt
, i
)) != CASE_LABEL (val
))
6318 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6319 fprintf (dump_file
, " not a single destination for this "
6321 return SSA_PROP_VARYING
;
6326 *taken_edge_p
= find_edge (gimple_bb (stmt
),
6327 label_to_block (CASE_LABEL (val
)));
6329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6331 fprintf (dump_file
, " will take edge to ");
6332 print_generic_stmt (dump_file
, CASE_LABEL (val
), 0);
6335 return SSA_PROP_INTERESTING
;
6339 /* Evaluate statement STMT. If the statement produces a useful range,
6340 return SSA_PROP_INTERESTING and record the SSA name with the
6341 interesting range into *OUTPUT_P.
6343 If STMT is a conditional branch and we can determine its truth
6344 value, the taken edge is recorded in *TAKEN_EDGE_P.
6346 If STMT produces a varying value, return SSA_PROP_VARYING. */
6348 static enum ssa_prop_result
6349 vrp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
6354 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6356 fprintf (dump_file
, "\nVisiting statement:\n");
6357 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
6358 fprintf (dump_file
, "\n");
6361 if (!stmt_interesting_for_vrp (stmt
))
6362 gcc_assert (stmt_ends_bb_p (stmt
));
6363 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
6365 /* In general, assignments with virtual operands are not useful
6366 for deriving ranges, with the obvious exception of calls to
6367 builtin functions. */
6369 if ((is_gimple_call (stmt
)
6370 && gimple_call_fndecl (stmt
) != NULL_TREE
6371 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt
)))
6372 || !gimple_vuse (stmt
))
6373 return vrp_visit_assignment_or_call (stmt
, output_p
);
6375 else if (gimple_code (stmt
) == GIMPLE_COND
)
6376 return vrp_visit_cond_stmt (stmt
, taken_edge_p
);
6377 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
6378 return vrp_visit_switch_stmt (stmt
, taken_edge_p
);
6380 /* All other statements produce nothing of interest for VRP, so mark
6381 their outputs varying and prevent further simulation. */
6382 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_DEF
)
6383 set_value_range_to_varying (get_value_range (def
));
6385 return SSA_PROP_VARYING
;
6389 /* Meet operation for value ranges. Given two value ranges VR0 and
6390 VR1, store in VR0 a range that contains both VR0 and VR1. This
6391 may not be the smallest possible such range. */
6394 vrp_meet (value_range_t
*vr0
, value_range_t
*vr1
)
6396 if (vr0
->type
== VR_UNDEFINED
)
6398 copy_value_range (vr0
, vr1
);
6402 if (vr1
->type
== VR_UNDEFINED
)
6404 /* Nothing to do. VR0 already has the resulting range. */
6408 if (vr0
->type
== VR_VARYING
)
6410 /* Nothing to do. VR0 already has the resulting range. */
6414 if (vr1
->type
== VR_VARYING
)
6416 set_value_range_to_varying (vr0
);
6420 if (vr0
->type
== VR_RANGE
&& vr1
->type
== VR_RANGE
)
6425 /* Compute the convex hull of the ranges. The lower limit of
6426 the new range is the minimum of the two ranges. If they
6427 cannot be compared, then give up. */
6428 cmp
= compare_values (vr0
->min
, vr1
->min
);
6429 if (cmp
== 0 || cmp
== 1)
6436 /* Similarly, the upper limit of the new range is the maximum
6437 of the two ranges. If they cannot be compared, then
6439 cmp
= compare_values (vr0
->max
, vr1
->max
);
6440 if (cmp
== 0 || cmp
== -1)
6447 /* Check for useless ranges. */
6448 if (INTEGRAL_TYPE_P (TREE_TYPE (min
))
6449 && ((vrp_val_is_min (min
) || is_overflow_infinity (min
))
6450 && (vrp_val_is_max (max
) || is_overflow_infinity (max
))))
6453 /* The resulting set of equivalences is the intersection of
6455 if (vr0
->equiv
&& vr1
->equiv
&& vr0
->equiv
!= vr1
->equiv
)
6456 bitmap_and_into (vr0
->equiv
, vr1
->equiv
);
6457 else if (vr0
->equiv
&& !vr1
->equiv
)
6458 bitmap_clear (vr0
->equiv
);
6460 set_value_range (vr0
, vr0
->type
, min
, max
, vr0
->equiv
);
6462 else if (vr0
->type
== VR_ANTI_RANGE
&& vr1
->type
== VR_ANTI_RANGE
)
6464 /* Two anti-ranges meet only if their complements intersect.
6465 Only handle the case of identical ranges. */
6466 if (compare_values (vr0
->min
, vr1
->min
) == 0
6467 && compare_values (vr0
->max
, vr1
->max
) == 0
6468 && compare_values (vr0
->min
, vr0
->max
) == 0)
6470 /* The resulting set of equivalences is the intersection of
6472 if (vr0
->equiv
&& vr1
->equiv
&& vr0
->equiv
!= vr1
->equiv
)
6473 bitmap_and_into (vr0
->equiv
, vr1
->equiv
);
6474 else if (vr0
->equiv
&& !vr1
->equiv
)
6475 bitmap_clear (vr0
->equiv
);
6480 else if (vr0
->type
== VR_ANTI_RANGE
|| vr1
->type
== VR_ANTI_RANGE
)
6482 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6483 only handle the case where the ranges have an empty intersection.
6484 The result of the meet operation is the anti-range. */
6485 if (!symbolic_range_p (vr0
)
6486 && !symbolic_range_p (vr1
)
6487 && !value_ranges_intersect_p (vr0
, vr1
))
6489 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6490 set. We need to compute the intersection of the two
6491 equivalence sets. */
6492 if (vr1
->type
== VR_ANTI_RANGE
)
6493 set_value_range (vr0
, vr1
->type
, vr1
->min
, vr1
->max
, vr0
->equiv
);
6495 /* The resulting set of equivalences is the intersection of
6497 if (vr0
->equiv
&& vr1
->equiv
&& vr0
->equiv
!= vr1
->equiv
)
6498 bitmap_and_into (vr0
->equiv
, vr1
->equiv
);
6499 else if (vr0
->equiv
&& !vr1
->equiv
)
6500 bitmap_clear (vr0
->equiv
);
6511 /* Failed to find an efficient meet. Before giving up and setting
6512 the result to VARYING, see if we can at least derive a useful
6513 anti-range. FIXME, all this nonsense about distinguishing
6514 anti-ranges from ranges is necessary because of the odd
6515 semantics of range_includes_zero_p and friends. */
6516 if (!symbolic_range_p (vr0
)
6517 && ((vr0
->type
== VR_RANGE
&& !range_includes_zero_p (vr0
))
6518 || (vr0
->type
== VR_ANTI_RANGE
&& range_includes_zero_p (vr0
)))
6519 && !symbolic_range_p (vr1
)
6520 && ((vr1
->type
== VR_RANGE
&& !range_includes_zero_p (vr1
))
6521 || (vr1
->type
== VR_ANTI_RANGE
&& range_includes_zero_p (vr1
))))
6523 set_value_range_to_nonnull (vr0
, TREE_TYPE (vr0
->min
));
6525 /* Since this meet operation did not result from the meeting of
6526 two equivalent names, VR0 cannot have any equivalences. */
6528 bitmap_clear (vr0
->equiv
);
6531 set_value_range_to_varying (vr0
);
6535 /* Visit all arguments for PHI node PHI that flow through executable
6536 edges. If a valid value range can be derived from all the incoming
6537 value ranges, set a new range for the LHS of PHI. */
6539 static enum ssa_prop_result
6540 vrp_visit_phi_node (gimple phi
)
6543 tree lhs
= PHI_RESULT (phi
);
6544 value_range_t
*lhs_vr
= get_value_range (lhs
);
6545 value_range_t vr_result
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
6546 int edges
, old_edges
;
6549 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6551 fprintf (dump_file
, "\nVisiting PHI node: ");
6552 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
6556 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
6558 edge e
= gimple_phi_arg_edge (phi
, i
);
6560 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6563 "\n Argument #%d (%d -> %d %sexecutable)\n",
6564 (int) i
, e
->src
->index
, e
->dest
->index
,
6565 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
6568 if (e
->flags
& EDGE_EXECUTABLE
)
6570 tree arg
= PHI_ARG_DEF (phi
, i
);
6571 value_range_t vr_arg
;
6575 if (TREE_CODE (arg
) == SSA_NAME
)
6577 vr_arg
= *(get_value_range (arg
));
6581 if (is_overflow_infinity (arg
))
6583 arg
= copy_node (arg
);
6584 TREE_OVERFLOW (arg
) = 0;
6587 vr_arg
.type
= VR_RANGE
;
6590 vr_arg
.equiv
= NULL
;
6593 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6595 fprintf (dump_file
, "\t");
6596 print_generic_expr (dump_file
, arg
, dump_flags
);
6597 fprintf (dump_file
, "\n\tValue: ");
6598 dump_value_range (dump_file
, &vr_arg
);
6599 fprintf (dump_file
, "\n");
6602 vrp_meet (&vr_result
, &vr_arg
);
6604 if (vr_result
.type
== VR_VARYING
)
6609 if (vr_result
.type
== VR_VARYING
)
6612 old_edges
= vr_phi_edge_counts
[SSA_NAME_VERSION (lhs
)];
6613 vr_phi_edge_counts
[SSA_NAME_VERSION (lhs
)] = edges
;
6615 /* To prevent infinite iterations in the algorithm, derive ranges
6616 when the new value is slightly bigger or smaller than the
6617 previous one. We don't do this if we have seen a new executable
6618 edge; this helps us avoid an overflow infinity for conditionals
6619 which are not in a loop. */
6621 && edges
== old_edges
)
6623 int cmp_min
= compare_values (lhs_vr
->min
, vr_result
.min
);
6624 int cmp_max
= compare_values (lhs_vr
->max
, vr_result
.max
);
6626 /* For non VR_RANGE or for pointers fall back to varying if
6627 the range changed. */
6628 if ((lhs_vr
->type
!= VR_RANGE
|| vr_result
.type
!= VR_RANGE
6629 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
6630 && (cmp_min
!= 0 || cmp_max
!= 0))
6633 /* If the new minimum is smaller or larger than the previous
6634 one, go all the way to -INF. In the first case, to avoid
6635 iterating millions of times to reach -INF, and in the
6636 other case to avoid infinite bouncing between different
6638 if (cmp_min
> 0 || cmp_min
< 0)
6640 if (!needs_overflow_infinity (TREE_TYPE (vr_result
.min
))
6641 || !vrp_var_may_overflow (lhs
, phi
))
6642 vr_result
.min
= TYPE_MIN_VALUE (TREE_TYPE (vr_result
.min
));
6643 else if (supports_overflow_infinity (TREE_TYPE (vr_result
.min
)))
6645 negative_overflow_infinity (TREE_TYPE (vr_result
.min
));
6648 /* Similarly, if the new maximum is smaller or larger than
6649 the previous one, go all the way to +INF. */
6650 if (cmp_max
< 0 || cmp_max
> 0)
6652 if (!needs_overflow_infinity (TREE_TYPE (vr_result
.max
))
6653 || !vrp_var_may_overflow (lhs
, phi
))
6654 vr_result
.max
= TYPE_MAX_VALUE (TREE_TYPE (vr_result
.max
));
6655 else if (supports_overflow_infinity (TREE_TYPE (vr_result
.max
)))
6657 positive_overflow_infinity (TREE_TYPE (vr_result
.max
));
6660 /* If we dropped either bound to +-INF then if this is a loop
6661 PHI node SCEV may known more about its value-range. */
6662 if ((cmp_min
> 0 || cmp_min
< 0
6663 || cmp_max
< 0 || cmp_max
> 0)
6665 && (l
= loop_containing_stmt (phi
))
6666 && l
->header
== gimple_bb (phi
))
6667 adjust_range_with_scev (&vr_result
, l
, phi
, lhs
);
6669 /* If we will end up with a (-INF, +INF) range, set it to
6670 VARYING. Same if the previous max value was invalid for
6671 the type and we end up with vr_result.min > vr_result.max. */
6672 if ((vrp_val_is_max (vr_result
.max
)
6673 && vrp_val_is_min (vr_result
.min
))
6674 || compare_values (vr_result
.min
,
6679 /* If the new range is different than the previous value, keep
6681 if (update_value_range (lhs
, &vr_result
))
6683 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6685 fprintf (dump_file
, "Found new range for ");
6686 print_generic_expr (dump_file
, lhs
, 0);
6687 fprintf (dump_file
, ": ");
6688 dump_value_range (dump_file
, &vr_result
);
6689 fprintf (dump_file
, "\n\n");
6692 return SSA_PROP_INTERESTING
;
6695 /* Nothing changed, don't add outgoing edges. */
6696 return SSA_PROP_NOT_INTERESTING
;
6698 /* No match found. Set the LHS to VARYING. */
6700 set_value_range_to_varying (lhs_vr
);
6701 return SSA_PROP_VARYING
;
6704 /* Simplify boolean operations if the source is known
6705 to be already a boolean. */
6707 simplify_truth_ops_using_ranges (gimple_stmt_iterator
*gsi
, gimple stmt
)
6709 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
6714 bool need_conversion
;
6716 op0
= gimple_assign_rhs1 (stmt
);
6717 if (TYPE_PRECISION (TREE_TYPE (op0
)) != 1)
6719 if (TREE_CODE (op0
) != SSA_NAME
)
6721 vr
= get_value_range (op0
);
6723 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
, &sop
);
6724 if (!val
|| !integer_onep (val
))
6727 val
= compare_range_with_value (LE_EXPR
, vr
, integer_one_node
, &sop
);
6728 if (!val
|| !integer_onep (val
))
6732 if (rhs_code
== TRUTH_NOT_EXPR
)
6735 op1
= build_int_cst (TREE_TYPE (op0
), 1);
6739 op1
= gimple_assign_rhs2 (stmt
);
6741 /* Reduce number of cases to handle. */
6742 if (is_gimple_min_invariant (op1
))
6744 /* Exclude anything that should have been already folded. */
6745 if (rhs_code
!= EQ_EXPR
6746 && rhs_code
!= NE_EXPR
6747 && rhs_code
!= TRUTH_XOR_EXPR
)
6750 if (!integer_zerop (op1
)
6751 && !integer_onep (op1
)
6752 && !integer_all_onesp (op1
))
6755 /* Limit the number of cases we have to consider. */
6756 if (rhs_code
== EQ_EXPR
)
6759 op1
= fold_unary (TRUTH_NOT_EXPR
, TREE_TYPE (op1
), op1
);
6764 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6765 if (rhs_code
== EQ_EXPR
)
6768 if (TYPE_PRECISION (TREE_TYPE (op1
)) != 1)
6770 vr
= get_value_range (op1
);
6771 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
, &sop
);
6772 if (!val
|| !integer_onep (val
))
6775 val
= compare_range_with_value (LE_EXPR
, vr
, integer_one_node
, &sop
);
6776 if (!val
|| !integer_onep (val
))
6782 if (sop
&& issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC
))
6784 location_t location
;
6786 if (!gimple_has_location (stmt
))
6787 location
= input_location
;
6789 location
= gimple_location (stmt
);
6791 if (rhs_code
== TRUTH_AND_EXPR
|| rhs_code
== TRUTH_OR_EXPR
)
6792 warning_at (location
, OPT_Wstrict_overflow
,
6793 _("assuming signed overflow does not occur when "
6794 "simplifying && or || to & or |"));
6796 warning_at (location
, OPT_Wstrict_overflow
,
6797 _("assuming signed overflow does not occur when "
6798 "simplifying ==, != or ! to identity or ^"));
6802 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt
)),
6805 /* Make sure to not sign-extend -1 as a boolean value. */
6807 && !TYPE_UNSIGNED (TREE_TYPE (op0
))
6808 && TYPE_PRECISION (TREE_TYPE (op0
)) == 1)
6813 case TRUTH_AND_EXPR
:
6814 rhs_code
= BIT_AND_EXPR
;
6817 rhs_code
= BIT_IOR_EXPR
;
6819 case TRUTH_XOR_EXPR
:
6821 if (integer_zerop (op1
))
6823 gimple_assign_set_rhs_with_ops (gsi
,
6824 need_conversion
? NOP_EXPR
: SSA_NAME
,
6826 update_stmt (gsi_stmt (*gsi
));
6830 rhs_code
= BIT_XOR_EXPR
;
6836 if (need_conversion
)
6839 gimple_assign_set_rhs_with_ops (gsi
, rhs_code
, op0
, op1
);
6840 update_stmt (gsi_stmt (*gsi
));
6844 /* Simplify a division or modulo operator to a right shift or
6845 bitwise and if the first operand is unsigned or is greater
6846 than zero and the second operand is an exact power of two. */
6849 simplify_div_or_mod_using_ranges (gimple stmt
)
6851 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
6853 tree op0
= gimple_assign_rhs1 (stmt
);
6854 tree op1
= gimple_assign_rhs2 (stmt
);
6855 value_range_t
*vr
= get_value_range (gimple_assign_rhs1 (stmt
));
6857 if (TYPE_UNSIGNED (TREE_TYPE (op0
)))
6859 val
= integer_one_node
;
6865 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
, &sop
);
6869 && integer_onep (val
)
6870 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC
))
6872 location_t location
;
6874 if (!gimple_has_location (stmt
))
6875 location
= input_location
;
6877 location
= gimple_location (stmt
);
6878 warning_at (location
, OPT_Wstrict_overflow
,
6879 "assuming signed overflow does not occur when "
6880 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6884 if (val
&& integer_onep (val
))
6888 if (rhs_code
== TRUNC_DIV_EXPR
)
6890 t
= build_int_cst (NULL_TREE
, tree_log2 (op1
));
6891 gimple_assign_set_rhs_code (stmt
, RSHIFT_EXPR
);
6892 gimple_assign_set_rhs1 (stmt
, op0
);
6893 gimple_assign_set_rhs2 (stmt
, t
);
6897 t
= build_int_cst (TREE_TYPE (op1
), 1);
6898 t
= int_const_binop (MINUS_EXPR
, op1
, t
, 0);
6899 t
= fold_convert (TREE_TYPE (op0
), t
);
6901 gimple_assign_set_rhs_code (stmt
, BIT_AND_EXPR
);
6902 gimple_assign_set_rhs1 (stmt
, op0
);
6903 gimple_assign_set_rhs2 (stmt
, t
);
6913 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6914 ABS_EXPR. If the operand is <= 0, then simplify the
6915 ABS_EXPR into a NEGATE_EXPR. */
6918 simplify_abs_using_ranges (gimple stmt
)
6921 tree op
= gimple_assign_rhs1 (stmt
);
6922 tree type
= TREE_TYPE (op
);
6923 value_range_t
*vr
= get_value_range (op
);
6925 if (TYPE_UNSIGNED (type
))
6927 val
= integer_zero_node
;
6933 val
= compare_range_with_value (LE_EXPR
, vr
, integer_zero_node
, &sop
);
6937 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
,
6942 if (integer_zerop (val
))
6943 val
= integer_one_node
;
6944 else if (integer_onep (val
))
6945 val
= integer_zero_node
;
6950 && (integer_onep (val
) || integer_zerop (val
)))
6952 if (sop
&& issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC
))
6954 location_t location
;
6956 if (!gimple_has_location (stmt
))
6957 location
= input_location
;
6959 location
= gimple_location (stmt
);
6960 warning_at (location
, OPT_Wstrict_overflow
,
6961 "assuming signed overflow does not occur when "
6962 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6965 gimple_assign_set_rhs1 (stmt
, op
);
6966 if (integer_onep (val
))
6967 gimple_assign_set_rhs_code (stmt
, NEGATE_EXPR
);
6969 gimple_assign_set_rhs_code (stmt
, SSA_NAME
);
6978 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
6979 If all the bits that are being cleared by & are already
6980 known to be zero from VR, or all the bits that are being
6981 set by | are already known to be one from VR, the bit
6982 operation is redundant. */
6985 simplify_bit_ops_using_ranges (gimple_stmt_iterator
*gsi
, gimple stmt
)
6987 tree op0
= gimple_assign_rhs1 (stmt
);
6988 tree op1
= gimple_assign_rhs2 (stmt
);
6989 tree op
= NULL_TREE
;
6990 value_range_t vr0
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
6991 value_range_t vr1
= { VR_UNDEFINED
, NULL_TREE
, NULL_TREE
, NULL
};
6992 double_int may_be_nonzero0
, may_be_nonzero1
;
6993 double_int must_be_nonzero0
, must_be_nonzero1
;
6996 if (TREE_CODE (op0
) == SSA_NAME
)
6997 vr0
= *(get_value_range (op0
));
6998 else if (is_gimple_min_invariant (op0
))
6999 set_value_range_to_value (&vr0
, op0
, NULL
);
7003 if (TREE_CODE (op1
) == SSA_NAME
)
7004 vr1
= *(get_value_range (op1
));
7005 else if (is_gimple_min_invariant (op1
))
7006 set_value_range_to_value (&vr1
, op1
, NULL
);
7010 if (!zero_nonzero_bits_from_vr (&vr0
, &may_be_nonzero0
, &must_be_nonzero0
))
7012 if (!zero_nonzero_bits_from_vr (&vr1
, &may_be_nonzero1
, &must_be_nonzero1
))
7015 switch (gimple_assign_rhs_code (stmt
))
7018 mask
= double_int_and_not (may_be_nonzero0
, must_be_nonzero1
);
7019 if (double_int_zero_p (mask
))
7024 mask
= double_int_and_not (may_be_nonzero1
, must_be_nonzero0
);
7025 if (double_int_zero_p (mask
))
7032 mask
= double_int_and_not (may_be_nonzero0
, must_be_nonzero1
);
7033 if (double_int_zero_p (mask
))
7038 mask
= double_int_and_not (may_be_nonzero1
, must_be_nonzero0
);
7039 if (double_int_zero_p (mask
))
7049 if (op
== NULL_TREE
)
7052 gimple_assign_set_rhs_with_ops (gsi
, TREE_CODE (op
), op
, NULL
);
7053 update_stmt (gsi_stmt (*gsi
));
7057 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7058 a known value range VR.
7060 If there is one and only one value which will satisfy the
7061 conditional, then return that value. Else return NULL. */
7064 test_for_singularity (enum tree_code cond_code
, tree op0
,
7065 tree op1
, value_range_t
*vr
)
7070 /* Extract minimum/maximum values which satisfy the
7071 the conditional as it was written. */
7072 if (cond_code
== LE_EXPR
|| cond_code
== LT_EXPR
)
7074 /* This should not be negative infinity; there is no overflow
7076 min
= TYPE_MIN_VALUE (TREE_TYPE (op0
));
7079 if (cond_code
== LT_EXPR
&& !is_overflow_infinity (max
))
7081 tree one
= build_int_cst (TREE_TYPE (op0
), 1);
7082 max
= fold_build2 (MINUS_EXPR
, TREE_TYPE (op0
), max
, one
);
7084 TREE_NO_WARNING (max
) = 1;
7087 else if (cond_code
== GE_EXPR
|| cond_code
== GT_EXPR
)
7089 /* This should not be positive infinity; there is no overflow
7091 max
= TYPE_MAX_VALUE (TREE_TYPE (op0
));
7094 if (cond_code
== GT_EXPR
&& !is_overflow_infinity (min
))
7096 tree one
= build_int_cst (TREE_TYPE (op0
), 1);
7097 min
= fold_build2 (PLUS_EXPR
, TREE_TYPE (op0
), min
, one
);
7099 TREE_NO_WARNING (min
) = 1;
7103 /* Now refine the minimum and maximum values using any
7104 value range information we have for op0. */
7107 if (compare_values (vr
->min
, min
) == 1)
7109 if (compare_values (vr
->max
, max
) == -1)
7112 /* If the new min/max values have converged to a single value,
7113 then there is only one value which can satisfy the condition,
7114 return that value. */
7115 if (operand_equal_p (min
, max
, 0) && is_gimple_min_invariant (min
))
7121 /* Simplify a conditional using a relational operator to an equality
7122 test if the range information indicates only one value can satisfy
7123 the original conditional. */
7126 simplify_cond_using_ranges (gimple stmt
)
7128 tree op0
= gimple_cond_lhs (stmt
);
7129 tree op1
= gimple_cond_rhs (stmt
);
7130 enum tree_code cond_code
= gimple_cond_code (stmt
);
7132 if (cond_code
!= NE_EXPR
7133 && cond_code
!= EQ_EXPR
7134 && TREE_CODE (op0
) == SSA_NAME
7135 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
7136 && is_gimple_min_invariant (op1
))
7138 value_range_t
*vr
= get_value_range (op0
);
7140 /* If we have range information for OP0, then we might be
7141 able to simplify this conditional. */
7142 if (vr
->type
== VR_RANGE
)
7144 tree new_tree
= test_for_singularity (cond_code
, op0
, op1
, vr
);
7150 fprintf (dump_file
, "Simplified relational ");
7151 print_gimple_stmt (dump_file
, stmt
, 0, 0);
7152 fprintf (dump_file
, " into ");
7155 gimple_cond_set_code (stmt
, EQ_EXPR
);
7156 gimple_cond_set_lhs (stmt
, op0
);
7157 gimple_cond_set_rhs (stmt
, new_tree
);
7163 print_gimple_stmt (dump_file
, stmt
, 0, 0);
7164 fprintf (dump_file
, "\n");
7170 /* Try again after inverting the condition. We only deal
7171 with integral types here, so no need to worry about
7172 issues with inverting FP comparisons. */
7173 cond_code
= invert_tree_comparison (cond_code
, false);
7174 new_tree
= test_for_singularity (cond_code
, op0
, op1
, vr
);
7180 fprintf (dump_file
, "Simplified relational ");
7181 print_gimple_stmt (dump_file
, stmt
, 0, 0);
7182 fprintf (dump_file
, " into ");
7185 gimple_cond_set_code (stmt
, NE_EXPR
);
7186 gimple_cond_set_lhs (stmt
, op0
);
7187 gimple_cond_set_rhs (stmt
, new_tree
);
7193 print_gimple_stmt (dump_file
, stmt
, 0, 0);
7194 fprintf (dump_file
, "\n");
7205 /* Simplify a switch statement using the value range of the switch
7209 simplify_switch_using_ranges (gimple stmt
)
7211 tree op
= gimple_switch_index (stmt
);
7216 size_t i
= 0, j
= 0, n
, n2
;
7220 if (TREE_CODE (op
) == SSA_NAME
)
7222 vr
= get_value_range (op
);
7224 /* We can only handle integer ranges. */
7225 if (vr
->type
!= VR_RANGE
7226 || symbolic_range_p (vr
))
7229 /* Find case label for min/max of the value range. */
7230 take_default
= !find_case_label_range (stmt
, vr
->min
, vr
->max
, &i
, &j
);
7232 else if (TREE_CODE (op
) == INTEGER_CST
)
7234 take_default
= !find_case_label_index (stmt
, 1, op
, &i
);
7248 n
= gimple_switch_num_labels (stmt
);
7250 /* Bail out if this is just all edges taken. */
7256 /* Build a new vector of taken case labels. */
7257 vec2
= make_tree_vec (j
- i
+ 1 + (int)take_default
);
7260 /* Add the default edge, if necessary. */
7262 TREE_VEC_ELT (vec2
, n2
++) = gimple_switch_default_label (stmt
);
7264 for (; i
<= j
; ++i
, ++n2
)
7265 TREE_VEC_ELT (vec2
, n2
) = gimple_switch_label (stmt
, i
);
7267 /* Mark needed edges. */
7268 for (i
= 0; i
< n2
; ++i
)
7270 e
= find_edge (gimple_bb (stmt
),
7271 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2
, i
))));
7272 e
->aux
= (void *)-1;
7275 /* Queue not needed edges for later removal. */
7276 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
7278 if (e
->aux
== (void *)-1)
7284 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7286 fprintf (dump_file
, "removing unreachable case label\n");
7288 VEC_safe_push (edge
, heap
, to_remove_edges
, e
);
7289 e
->flags
&= ~EDGE_EXECUTABLE
;
7292 /* And queue an update for the stmt. */
7295 VEC_safe_push (switch_update
, heap
, to_update_switch_stmts
, &su
);
7299 /* Simplify STMT using ranges if possible. */
7302 simplify_stmt_using_ranges (gimple_stmt_iterator
*gsi
)
7304 gimple stmt
= gsi_stmt (*gsi
);
7305 if (is_gimple_assign (stmt
))
7307 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
7313 case TRUTH_NOT_EXPR
:
7314 case TRUTH_AND_EXPR
:
7316 case TRUTH_XOR_EXPR
:
7317 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7318 or identity if the RHS is zero or one, and the LHS are known
7319 to be boolean values. Transform all TRUTH_*_EXPR into
7320 BIT_*_EXPR if both arguments are known to be boolean values. */
7321 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
7322 return simplify_truth_ops_using_ranges (gsi
, stmt
);
7325 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7326 and BIT_AND_EXPR respectively if the first operand is greater
7327 than zero and the second operand is an exact power of two. */
7328 case TRUNC_DIV_EXPR
:
7329 case TRUNC_MOD_EXPR
:
7330 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt
)))
7331 && integer_pow2p (gimple_assign_rhs2 (stmt
)))
7332 return simplify_div_or_mod_using_ranges (stmt
);
7335 /* Transform ABS (X) into X or -X as appropriate. */
7337 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
7338 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
7339 return simplify_abs_using_ranges (stmt
);
7344 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7345 if all the bits being cleared are already cleared or
7346 all the bits being set are already set. */
7347 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
7348 return simplify_bit_ops_using_ranges (gsi
, stmt
);
7355 else if (gimple_code (stmt
) == GIMPLE_COND
)
7356 return simplify_cond_using_ranges (stmt
);
7357 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
7358 return simplify_switch_using_ranges (stmt
);
7363 /* If the statement pointed by SI has a predicate whose value can be
7364 computed using the value range information computed by VRP, compute
7365 its value and return true. Otherwise, return false. */
7368 fold_predicate_in (gimple_stmt_iterator
*si
)
7370 bool assignment_p
= false;
7372 gimple stmt
= gsi_stmt (*si
);
7374 if (is_gimple_assign (stmt
)
7375 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
7377 assignment_p
= true;
7378 val
= vrp_evaluate_conditional (gimple_assign_rhs_code (stmt
),
7379 gimple_assign_rhs1 (stmt
),
7380 gimple_assign_rhs2 (stmt
),
7383 else if (gimple_code (stmt
) == GIMPLE_COND
)
7384 val
= vrp_evaluate_conditional (gimple_cond_code (stmt
),
7385 gimple_cond_lhs (stmt
),
7386 gimple_cond_rhs (stmt
),
7394 val
= fold_convert (gimple_expr_type (stmt
), val
);
7398 fprintf (dump_file
, "Folding predicate ");
7399 print_gimple_expr (dump_file
, stmt
, 0, 0);
7400 fprintf (dump_file
, " to ");
7401 print_generic_expr (dump_file
, val
, 0);
7402 fprintf (dump_file
, "\n");
7405 if (is_gimple_assign (stmt
))
7406 gimple_assign_set_rhs_from_tree (si
, val
);
7409 gcc_assert (gimple_code (stmt
) == GIMPLE_COND
);
7410 if (integer_zerop (val
))
7411 gimple_cond_make_false (stmt
);
7412 else if (integer_onep (val
))
7413 gimple_cond_make_true (stmt
);
7424 /* Callback for substitute_and_fold folding the stmt at *SI. */
7427 vrp_fold_stmt (gimple_stmt_iterator
*si
)
7429 if (fold_predicate_in (si
))
7432 return simplify_stmt_using_ranges (si
);
7435 /* Stack of dest,src equivalency pairs that need to be restored after
7436 each attempt to thread a block's incoming edge to an outgoing edge.
7438 A NULL entry is used to mark the end of pairs which need to be
7440 static VEC(tree
,heap
) *stack
;
7442 /* A trivial wrapper so that we can present the generic jump threading
7443 code with a simple API for simplifying statements. STMT is the
7444 statement we want to simplify, WITHIN_STMT provides the location
7445 for any overflow warnings. */
7448 simplify_stmt_for_jump_threading (gimple stmt
, gimple within_stmt
)
7450 /* We only use VRP information to simplify conditionals. This is
7451 overly conservative, but it's unclear if doing more would be
7452 worth the compile time cost. */
7453 if (gimple_code (stmt
) != GIMPLE_COND
)
7456 return vrp_evaluate_conditional (gimple_cond_code (stmt
),
7457 gimple_cond_lhs (stmt
),
7458 gimple_cond_rhs (stmt
), within_stmt
);
7461 /* Blocks which have more than one predecessor and more than
7462 one successor present jump threading opportunities, i.e.,
7463 when the block is reached from a specific predecessor, we
7464 may be able to determine which of the outgoing edges will
7465 be traversed. When this optimization applies, we are able
7466 to avoid conditionals at runtime and we may expose secondary
7467 optimization opportunities.
7469 This routine is effectively a driver for the generic jump
7470 threading code. It basically just presents the generic code
7471 with edges that may be suitable for jump threading.
7473 Unlike DOM, we do not iterate VRP if jump threading was successful.
7474 While iterating may expose new opportunities for VRP, it is expected
7475 those opportunities would be very limited and the compile time cost
7476 to expose those opportunities would be significant.
7478 As jump threading opportunities are discovered, they are registered
7479 for later realization. */
7482 identify_jump_threads (void)
7489 /* Ugh. When substituting values earlier in this pass we can
7490 wipe the dominance information. So rebuild the dominator
7491 information as we need it within the jump threading code. */
7492 calculate_dominance_info (CDI_DOMINATORS
);
7494 /* We do not allow VRP information to be used for jump threading
7495 across a back edge in the CFG. Otherwise it becomes too
7496 difficult to avoid eliminating loop exit tests. Of course
7497 EDGE_DFS_BACK is not accurate at this time so we have to
7499 mark_dfs_back_edges ();
7501 /* Do not thread across edges we are about to remove. Just marking
7502 them as EDGE_DFS_BACK will do. */
7503 FOR_EACH_VEC_ELT (edge
, to_remove_edges
, i
, e
)
7504 e
->flags
|= EDGE_DFS_BACK
;
7506 /* Allocate our unwinder stack to unwind any temporary equivalences
7507 that might be recorded. */
7508 stack
= VEC_alloc (tree
, heap
, 20);
7510 /* To avoid lots of silly node creation, we create a single
7511 conditional and just modify it in-place when attempting to
7513 dummy
= gimple_build_cond (EQ_EXPR
,
7514 integer_zero_node
, integer_zero_node
,
7517 /* Walk through all the blocks finding those which present a
7518 potential jump threading opportunity. We could set this up
7519 as a dominator walker and record data during the walk, but
7520 I doubt it's worth the effort for the classes of jump
7521 threading opportunities we are trying to identify at this
7522 point in compilation. */
7527 /* If the generic jump threading code does not find this block
7528 interesting, then there is nothing to do. */
7529 if (! potentially_threadable_block (bb
))
7532 /* We only care about blocks ending in a COND_EXPR. While there
7533 may be some value in handling SWITCH_EXPR here, I doubt it's
7534 terribly important. */
7535 last
= gsi_stmt (gsi_last_bb (bb
));
7536 if (gimple_code (last
) != GIMPLE_COND
)
7539 /* We're basically looking for any kind of conditional with
7540 integral type arguments. */
7541 if (TREE_CODE (gimple_cond_lhs (last
)) == SSA_NAME
7542 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last
)))
7543 && (TREE_CODE (gimple_cond_rhs (last
)) == SSA_NAME
7544 || is_gimple_min_invariant (gimple_cond_rhs (last
)))
7545 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last
))))
7549 /* We've got a block with multiple predecessors and multiple
7550 successors which also ends in a suitable conditional. For
7551 each predecessor, see if we can thread it to a specific
7553 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
7555 /* Do not thread across back edges or abnormal edges
7557 if (e
->flags
& (EDGE_DFS_BACK
| EDGE_COMPLEX
))
7560 thread_across_edge (dummy
, e
, true, &stack
,
7561 simplify_stmt_for_jump_threading
);
7566 /* We do not actually update the CFG or SSA graphs at this point as
7567 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7568 handle ASSERT_EXPRs gracefully. */
7571 /* We identified all the jump threading opportunities earlier, but could
7572 not transform the CFG at that time. This routine transforms the
7573 CFG and arranges for the dominator tree to be rebuilt if necessary.
7575 Note the SSA graph update will occur during the normal TODO
7576 processing by the pass manager. */
7578 finalize_jump_threads (void)
7580 thread_through_all_blocks (false);
7581 VEC_free (tree
, heap
, stack
);
7585 /* Traverse all the blocks folding conditionals with known ranges. */
7591 unsigned num
= num_ssa_names
;
7595 fprintf (dump_file
, "\nValue ranges after VRP:\n\n");
7596 dump_all_value_ranges (dump_file
);
7597 fprintf (dump_file
, "\n");
7600 substitute_and_fold (op_with_constant_singleton_value_range
,
7601 vrp_fold_stmt
, false);
7603 if (warn_array_bounds
)
7604 check_all_array_refs ();
7606 /* We must identify jump threading opportunities before we release
7607 the datastructures built by VRP. */
7608 identify_jump_threads ();
7610 /* Free allocated memory. */
7611 for (i
= 0; i
< num
; i
++)
7614 BITMAP_FREE (vr_value
[i
]->equiv
);
7619 free (vr_phi_edge_counts
);
7621 /* So that we can distinguish between VRP data being available
7622 and not available. */
7624 vr_phi_edge_counts
= NULL
;
7628 /* Main entry point to VRP (Value Range Propagation). This pass is
7629 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7630 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7631 Programming Language Design and Implementation, pp. 67-78, 1995.
7632 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7634 This is essentially an SSA-CCP pass modified to deal with ranges
7635 instead of constants.
7637 While propagating ranges, we may find that two or more SSA name
7638 have equivalent, though distinct ranges. For instance,
7641 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7643 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7647 In the code above, pointer p_5 has range [q_2, q_2], but from the
7648 code we can also determine that p_5 cannot be NULL and, if q_2 had
7649 a non-varying range, p_5's range should also be compatible with it.
7651 These equivalences are created by two expressions: ASSERT_EXPR and
7652 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7653 result of another assertion, then we can use the fact that p_5 and
7654 p_4 are equivalent when evaluating p_5's range.
7656 Together with value ranges, we also propagate these equivalences
7657 between names so that we can take advantage of information from
7658 multiple ranges when doing final replacement. Note that this
7659 equivalency relation is transitive but not symmetric.
7661 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7662 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7663 in contexts where that assertion does not hold (e.g., in line 6).
7665 TODO, the main difference between this pass and Patterson's is that
7666 we do not propagate edge probabilities. We only compute whether
7667 edges can be taken or not. That is, instead of having a spectrum
7668 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7669 DON'T KNOW. In the future, it may be worthwhile to propagate
7670 probabilities to aid branch prediction. */
7679 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
7680 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
7683 /* Estimate number of iterations - but do not use undefined behavior
7684 for this. We can't do this lazily as other functions may compute
7685 this using undefined behavior. */
7686 free_numbers_of_iterations_estimates ();
7687 estimate_numbers_of_iterations (false);
7689 insert_range_assertions ();
7691 to_remove_edges
= VEC_alloc (edge
, heap
, 10);
7692 to_update_switch_stmts
= VEC_alloc (switch_update
, heap
, 5);
7693 threadedge_initialize_values ();
7696 ssa_propagate (vrp_visit_stmt
, vrp_visit_phi_node
);
7699 /* ASSERT_EXPRs must be removed before finalizing jump threads
7700 as finalizing jump threads calls the CFG cleanup code which
7701 does not properly handle ASSERT_EXPRs. */
7702 remove_range_assertions ();
7704 /* If we exposed any new variables, go ahead and put them into
7705 SSA form now, before we handle jump threading. This simplifies
7706 interactions between rewriting of _DECL nodes into SSA form
7707 and rewriting SSA_NAME nodes into SSA form after block
7708 duplication and CFG manipulation. */
7709 update_ssa (TODO_update_ssa
);
7711 finalize_jump_threads ();
7713 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7714 CFG in a broken state and requires a cfg_cleanup run. */
7715 FOR_EACH_VEC_ELT (edge
, to_remove_edges
, i
, e
)
7717 /* Update SWITCH_EXPR case label vector. */
7718 FOR_EACH_VEC_ELT (switch_update
, to_update_switch_stmts
, i
, su
)
7721 size_t n
= TREE_VEC_LENGTH (su
->vec
);
7723 gimple_switch_set_num_labels (su
->stmt
, n
);
7724 for (j
= 0; j
< n
; j
++)
7725 gimple_switch_set_label (su
->stmt
, j
, TREE_VEC_ELT (su
->vec
, j
));
7726 /* As we may have replaced the default label with a regular one
7727 make sure to make it a real default label again. This ensures
7728 optimal expansion. */
7729 label
= gimple_switch_default_label (su
->stmt
);
7730 CASE_LOW (label
) = NULL_TREE
;
7731 CASE_HIGH (label
) = NULL_TREE
;
7734 if (VEC_length (edge
, to_remove_edges
) > 0)
7735 free_dominance_info (CDI_DOMINATORS
);
7737 VEC_free (edge
, heap
, to_remove_edges
);
7738 VEC_free (switch_update
, heap
, to_update_switch_stmts
);
7739 threadedge_finalize_values ();
7742 loop_optimizer_finalize ();
7749 return flag_tree_vrp
!= 0;
7752 struct gimple_opt_pass pass_vrp
=
7757 gate_vrp
, /* gate */
7758 execute_vrp
, /* execute */
7761 0, /* static_pass_number */
7762 TV_TREE_VRP
, /* tv_id */
7763 PROP_ssa
, /* properties_required */
7764 0, /* properties_provided */
7765 0, /* properties_destroyed */
7766 0, /* todo_flags_start */
7771 | TODO_update_ssa
/* todo_flags_finish */