* tree-vrp.c (find_assert_locations): Skip also edges
[official-gcc.git] / gcc / tree-vrp.c
blob0aa1dc7e2ed16132ba5b262b04214f1acb3e96b5
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "intl.h"
36 #include "cfgloop.h"
37 #include "tree-scalar-evolution.h"
38 #include "tree-ssa-propagate.h"
39 #include "tree-chrec.h"
40 #include "gimple-fold.h"
41 #include "expr.h"
42 #include "optabs.h"
45 /* Type of value ranges. See value_range_d for a description of these
46 types. */
47 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
49 /* Range of values that can be associated with an SSA_NAME after VRP
50 has executed. */
51 struct value_range_d
53 /* Lattice value represented by this range. */
54 enum value_range_type type;
56 /* Minimum and maximum values represented by this range. These
57 values should be interpreted as follows:
59 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
60 be NULL.
62 - If TYPE == VR_RANGE then MIN holds the minimum value and
63 MAX holds the maximum value of the range [MIN, MAX].
65 - If TYPE == ANTI_RANGE the variable is known to NOT
66 take any values in the range [MIN, MAX]. */
67 tree min;
68 tree max;
70 /* Set of SSA names whose value ranges are equivalent to this one.
71 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
72 bitmap equiv;
75 typedef struct value_range_d value_range_t;
77 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
79 /* Set of SSA names found live during the RPO traversal of the function
80 for still active basic-blocks. */
81 static sbitmap *live;
83 /* Return true if the SSA name NAME is live on the edge E. */
85 static bool
86 live_on_edge (edge e, tree name)
88 return (live[e->dest->index]
89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
92 /* Local functions. */
93 static int compare_values (tree val1, tree val2);
94 static int compare_values_warnv (tree val1, tree val2, bool *);
95 static void vrp_meet (value_range_t *, value_range_t *);
96 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
97 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
98 tree, tree, bool, bool *,
99 bool *);
101 /* Location information for ASSERT_EXPRs. Each instance of this
102 structure describes an ASSERT_EXPR for an SSA name. Since a single
103 SSA name may have more than one assertion associated with it, these
104 locations are kept in a linked list attached to the corresponding
105 SSA name. */
106 struct assert_locus_d
108 /* Basic block where the assertion would be inserted. */
109 basic_block bb;
111 /* Some assertions need to be inserted on an edge (e.g., assertions
112 generated by COND_EXPRs). In those cases, BB will be NULL. */
113 edge e;
115 /* Pointer to the statement that generated this assertion. */
116 gimple_stmt_iterator si;
118 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
119 enum tree_code comp_code;
121 /* Value being compared against. */
122 tree val;
124 /* Expression to compare. */
125 tree expr;
127 /* Next node in the linked list. */
128 struct assert_locus_d *next;
131 typedef struct assert_locus_d *assert_locus_t;
133 /* If bit I is present, it means that SSA name N_i has a list of
134 assertions that should be inserted in the IL. */
135 static bitmap need_assert_for;
137 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
138 holds a list of ASSERT_LOCUS_T nodes that describe where
139 ASSERT_EXPRs for SSA name N_I should be inserted. */
140 static assert_locus_t *asserts_for;
142 /* Value range array. After propagation, VR_VALUE[I] holds the range
143 of values that SSA name N_I may take. */
144 static unsigned num_vr_values;
145 static value_range_t **vr_value;
146 static bool values_propagated;
148 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
149 number of executable edges we saw the last time we visited the
150 node. */
151 static int *vr_phi_edge_counts;
153 typedef struct {
154 gimple stmt;
155 tree vec;
156 } switch_update;
158 static VEC (edge, heap) *to_remove_edges;
159 DEF_VEC_O(switch_update);
160 DEF_VEC_ALLOC_O(switch_update, heap);
161 static VEC (switch_update, heap) *to_update_switch_stmts;
164 /* Return the maximum value for TYPE. */
166 static inline tree
167 vrp_val_max (const_tree type)
169 if (!INTEGRAL_TYPE_P (type))
170 return NULL_TREE;
172 return TYPE_MAX_VALUE (type);
175 /* Return the minimum value for TYPE. */
177 static inline tree
178 vrp_val_min (const_tree type)
180 if (!INTEGRAL_TYPE_P (type))
181 return NULL_TREE;
183 return TYPE_MIN_VALUE (type);
186 /* Return whether VAL is equal to the maximum value of its type. This
187 will be true for a positive overflow infinity. We can't do a
188 simple equality comparison with TYPE_MAX_VALUE because C typedefs
189 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
190 to the integer constant with the same value in the type. */
192 static inline bool
193 vrp_val_is_max (const_tree val)
195 tree type_max = vrp_val_max (TREE_TYPE (val));
196 return (val == type_max
197 || (type_max != NULL_TREE
198 && operand_equal_p (val, type_max, 0)));
201 /* Return whether VAL is equal to the minimum value of its type. This
202 will be true for a negative overflow infinity. */
204 static inline bool
205 vrp_val_is_min (const_tree val)
207 tree type_min = vrp_val_min (TREE_TYPE (val));
208 return (val == type_min
209 || (type_min != NULL_TREE
210 && operand_equal_p (val, type_min, 0)));
214 /* Return whether TYPE should use an overflow infinity distinct from
215 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
216 represent a signed overflow during VRP computations. An infinity
217 is distinct from a half-range, which will go from some number to
218 TYPE_{MIN,MAX}_VALUE. */
220 static inline bool
221 needs_overflow_infinity (const_tree type)
223 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
226 /* Return whether TYPE can support our overflow infinity
227 representation: we use the TREE_OVERFLOW flag, which only exists
228 for constants. If TYPE doesn't support this, we don't optimize
229 cases which would require signed overflow--we drop them to
230 VARYING. */
232 static inline bool
233 supports_overflow_infinity (const_tree type)
235 tree min = vrp_val_min (type), max = vrp_val_max (type);
236 #ifdef ENABLE_CHECKING
237 gcc_assert (needs_overflow_infinity (type));
238 #endif
239 return (min != NULL_TREE
240 && CONSTANT_CLASS_P (min)
241 && max != NULL_TREE
242 && CONSTANT_CLASS_P (max));
245 /* VAL is the maximum or minimum value of a type. Return a
246 corresponding overflow infinity. */
248 static inline tree
249 make_overflow_infinity (tree val)
251 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
252 val = copy_node (val);
253 TREE_OVERFLOW (val) = 1;
254 return val;
257 /* Return a negative overflow infinity for TYPE. */
259 static inline tree
260 negative_overflow_infinity (tree type)
262 gcc_checking_assert (supports_overflow_infinity (type));
263 return make_overflow_infinity (vrp_val_min (type));
266 /* Return a positive overflow infinity for TYPE. */
268 static inline tree
269 positive_overflow_infinity (tree type)
271 gcc_checking_assert (supports_overflow_infinity (type));
272 return make_overflow_infinity (vrp_val_max (type));
275 /* Return whether VAL is a negative overflow infinity. */
277 static inline bool
278 is_negative_overflow_infinity (const_tree val)
280 return (needs_overflow_infinity (TREE_TYPE (val))
281 && CONSTANT_CLASS_P (val)
282 && TREE_OVERFLOW (val)
283 && vrp_val_is_min (val));
286 /* Return whether VAL is a positive overflow infinity. */
288 static inline bool
289 is_positive_overflow_infinity (const_tree val)
291 return (needs_overflow_infinity (TREE_TYPE (val))
292 && CONSTANT_CLASS_P (val)
293 && TREE_OVERFLOW (val)
294 && vrp_val_is_max (val));
297 /* Return whether VAL is a positive or negative overflow infinity. */
299 static inline bool
300 is_overflow_infinity (const_tree val)
302 return (needs_overflow_infinity (TREE_TYPE (val))
303 && CONSTANT_CLASS_P (val)
304 && TREE_OVERFLOW (val)
305 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
308 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
310 static inline bool
311 stmt_overflow_infinity (gimple stmt)
313 if (is_gimple_assign (stmt)
314 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
315 GIMPLE_SINGLE_RHS)
316 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
317 return false;
320 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
321 the same value with TREE_OVERFLOW clear. This can be used to avoid
322 confusing a regular value with an overflow value. */
324 static inline tree
325 avoid_overflow_infinity (tree val)
327 if (!is_overflow_infinity (val))
328 return val;
330 if (vrp_val_is_max (val))
331 return vrp_val_max (TREE_TYPE (val));
332 else
334 gcc_checking_assert (vrp_val_is_min (val));
335 return vrp_val_min (TREE_TYPE (val));
340 /* Return true if ARG is marked with the nonnull attribute in the
341 current function signature. */
343 static bool
344 nonnull_arg_p (const_tree arg)
346 tree t, attrs, fntype;
347 unsigned HOST_WIDE_INT arg_num;
349 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
351 /* The static chain decl is always non null. */
352 if (arg == cfun->static_chain_decl)
353 return true;
355 fntype = TREE_TYPE (current_function_decl);
356 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
358 attrs = lookup_attribute ("nonnull", attrs);
360 /* If "nonnull" wasn't specified, we know nothing about the argument. */
361 if (attrs == NULL_TREE)
362 return false;
364 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
365 if (TREE_VALUE (attrs) == NULL_TREE)
366 return true;
368 /* Get the position number for ARG in the function signature. */
369 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
371 t = DECL_CHAIN (t), arg_num++)
373 if (t == arg)
374 break;
377 gcc_assert (t == arg);
379 /* Now see if ARG_NUM is mentioned in the nonnull list. */
380 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
382 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
383 return true;
387 return false;
391 /* Set value range VR to VR_UNDEFINED. */
393 static inline void
394 set_value_range_to_undefined (value_range_t *vr)
396 vr->type = VR_UNDEFINED;
397 vr->min = vr->max = NULL_TREE;
398 if (vr->equiv)
399 bitmap_clear (vr->equiv);
403 /* Set value range VR to VR_VARYING. */
405 static inline void
406 set_value_range_to_varying (value_range_t *vr)
408 vr->type = VR_VARYING;
409 vr->min = vr->max = NULL_TREE;
410 if (vr->equiv)
411 bitmap_clear (vr->equiv);
415 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
417 static void
418 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
419 tree max, bitmap equiv)
421 #if defined ENABLE_CHECKING
422 /* Check the validity of the range. */
423 if (t == VR_RANGE || t == VR_ANTI_RANGE)
425 int cmp;
427 gcc_assert (min && max);
429 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
430 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
432 cmp = compare_values (min, max);
433 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
435 if (needs_overflow_infinity (TREE_TYPE (min)))
436 gcc_assert (!is_overflow_infinity (min)
437 || !is_overflow_infinity (max));
440 if (t == VR_UNDEFINED || t == VR_VARYING)
441 gcc_assert (min == NULL_TREE && max == NULL_TREE);
443 if (t == VR_UNDEFINED || t == VR_VARYING)
444 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
445 #endif
447 vr->type = t;
448 vr->min = min;
449 vr->max = max;
451 /* Since updating the equivalence set involves deep copying the
452 bitmaps, only do it if absolutely necessary. */
453 if (vr->equiv == NULL
454 && equiv != NULL)
455 vr->equiv = BITMAP_ALLOC (NULL);
457 if (equiv != vr->equiv)
459 if (equiv && !bitmap_empty_p (equiv))
460 bitmap_copy (vr->equiv, equiv);
461 else
462 bitmap_clear (vr->equiv);
467 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
468 This means adjusting T, MIN and MAX representing the case of a
469 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
470 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
471 In corner cases where MAX+1 or MIN-1 wraps this will fall back
472 to varying.
473 This routine exists to ease canonicalization in the case where we
474 extract ranges from var + CST op limit. */
476 static void
477 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
478 tree min, tree max, bitmap equiv)
480 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
481 if (t == VR_UNDEFINED)
483 set_value_range_to_undefined (vr);
484 return;
486 else if (t == VR_VARYING)
488 set_value_range_to_varying (vr);
489 return;
492 /* Nothing to canonicalize for symbolic ranges. */
493 if (TREE_CODE (min) != INTEGER_CST
494 || TREE_CODE (max) != INTEGER_CST)
496 set_value_range (vr, t, min, max, equiv);
497 return;
500 /* Wrong order for min and max, to swap them and the VR type we need
501 to adjust them. */
502 if (tree_int_cst_lt (max, min))
504 tree one = build_int_cst (TREE_TYPE (min), 1);
505 tree tmp = int_const_binop (PLUS_EXPR, max, one);
506 max = int_const_binop (MINUS_EXPR, min, one);
507 min = tmp;
509 /* There's one corner case, if we had [C+1, C] before we now have
510 that again. But this represents an empty value range, so drop
511 to varying in this case. */
512 if (tree_int_cst_lt (max, min))
514 set_value_range_to_varying (vr);
515 return;
518 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
521 /* Anti-ranges that can be represented as ranges should be so. */
522 if (t == VR_ANTI_RANGE)
524 bool is_min = vrp_val_is_min (min);
525 bool is_max = vrp_val_is_max (max);
527 if (is_min && is_max)
529 /* We cannot deal with empty ranges, drop to varying.
530 ??? This could be VR_UNDEFINED instead. */
531 set_value_range_to_varying (vr);
532 return;
534 else if (is_min
535 /* As a special exception preserve non-null ranges. */
536 && !(TYPE_UNSIGNED (TREE_TYPE (min))
537 && integer_zerop (max)))
539 tree one = build_int_cst (TREE_TYPE (max), 1);
540 min = int_const_binop (PLUS_EXPR, max, one);
541 max = vrp_val_max (TREE_TYPE (max));
542 t = VR_RANGE;
544 else if (is_max)
546 tree one = build_int_cst (TREE_TYPE (min), 1);
547 max = int_const_binop (MINUS_EXPR, min, one);
548 min = vrp_val_min (TREE_TYPE (min));
549 t = VR_RANGE;
553 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
554 if (needs_overflow_infinity (TREE_TYPE (min))
555 && is_overflow_infinity (min)
556 && is_overflow_infinity (max))
558 set_value_range_to_varying (vr);
559 return;
562 set_value_range (vr, t, min, max, equiv);
565 /* Copy value range FROM into value range TO. */
567 static inline void
568 copy_value_range (value_range_t *to, value_range_t *from)
570 set_value_range (to, from->type, from->min, from->max, from->equiv);
573 /* Set value range VR to a single value. This function is only called
574 with values we get from statements, and exists to clear the
575 TREE_OVERFLOW flag so that we don't think we have an overflow
576 infinity when we shouldn't. */
578 static inline void
579 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
581 gcc_assert (is_gimple_min_invariant (val));
582 val = avoid_overflow_infinity (val);
583 set_value_range (vr, VR_RANGE, val, val, equiv);
586 /* Set value range VR to a non-negative range of type TYPE.
587 OVERFLOW_INFINITY indicates whether to use an overflow infinity
588 rather than TYPE_MAX_VALUE; this should be true if we determine
589 that the range is nonnegative based on the assumption that signed
590 overflow does not occur. */
592 static inline void
593 set_value_range_to_nonnegative (value_range_t *vr, tree type,
594 bool overflow_infinity)
596 tree zero;
598 if (overflow_infinity && !supports_overflow_infinity (type))
600 set_value_range_to_varying (vr);
601 return;
604 zero = build_int_cst (type, 0);
605 set_value_range (vr, VR_RANGE, zero,
606 (overflow_infinity
607 ? positive_overflow_infinity (type)
608 : TYPE_MAX_VALUE (type)),
609 vr->equiv);
612 /* Set value range VR to a non-NULL range of type TYPE. */
614 static inline void
615 set_value_range_to_nonnull (value_range_t *vr, tree type)
617 tree zero = build_int_cst (type, 0);
618 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
622 /* Set value range VR to a NULL range of type TYPE. */
624 static inline void
625 set_value_range_to_null (value_range_t *vr, tree type)
627 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
631 /* Set value range VR to a range of a truthvalue of type TYPE. */
633 static inline void
634 set_value_range_to_truthvalue (value_range_t *vr, tree type)
636 if (TYPE_PRECISION (type) == 1)
637 set_value_range_to_varying (vr);
638 else
639 set_value_range (vr, VR_RANGE,
640 build_int_cst (type, 0), build_int_cst (type, 1),
641 vr->equiv);
645 /* If abs (min) < abs (max), set VR to [-max, max], if
646 abs (min) >= abs (max), set VR to [-min, min]. */
648 static void
649 abs_extent_range (value_range_t *vr, tree min, tree max)
651 int cmp;
653 gcc_assert (TREE_CODE (min) == INTEGER_CST);
654 gcc_assert (TREE_CODE (max) == INTEGER_CST);
655 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
656 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
657 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
658 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
659 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
661 set_value_range_to_varying (vr);
662 return;
664 cmp = compare_values (min, max);
665 if (cmp == -1)
666 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
667 else if (cmp == 0 || cmp == 1)
669 max = min;
670 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
672 else
674 set_value_range_to_varying (vr);
675 return;
677 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
681 /* Return value range information for VAR.
683 If we have no values ranges recorded (ie, VRP is not running), then
684 return NULL. Otherwise create an empty range if none existed for VAR. */
686 static value_range_t *
687 get_value_range (const_tree var)
689 static const struct value_range_d vr_const_varying
690 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
691 value_range_t *vr;
692 tree sym;
693 unsigned ver = SSA_NAME_VERSION (var);
695 /* If we have no recorded ranges, then return NULL. */
696 if (! vr_value)
697 return NULL;
699 /* If we query the range for a new SSA name return an unmodifiable VARYING.
700 We should get here at most from the substitute-and-fold stage which
701 will never try to change values. */
702 if (ver >= num_vr_values)
703 return CONST_CAST (value_range_t *, &vr_const_varying);
705 vr = vr_value[ver];
706 if (vr)
707 return vr;
709 /* After propagation finished do not allocate new value-ranges. */
710 if (values_propagated)
711 return CONST_CAST (value_range_t *, &vr_const_varying);
713 /* Create a default value range. */
714 vr_value[ver] = vr = XCNEW (value_range_t);
716 /* Defer allocating the equivalence set. */
717 vr->equiv = NULL;
719 /* If VAR is a default definition of a parameter, the variable can
720 take any value in VAR's type. */
721 if (SSA_NAME_IS_DEFAULT_DEF (var))
723 sym = SSA_NAME_VAR (var);
724 if (TREE_CODE (sym) == PARM_DECL)
726 /* Try to use the "nonnull" attribute to create ~[0, 0]
727 anti-ranges for pointers. Note that this is only valid with
728 default definitions of PARM_DECLs. */
729 if (POINTER_TYPE_P (TREE_TYPE (sym))
730 && nonnull_arg_p (sym))
731 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
732 else
733 set_value_range_to_varying (vr);
735 else if (TREE_CODE (sym) == RESULT_DECL
736 && DECL_BY_REFERENCE (sym))
737 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
740 return vr;
743 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
745 static inline bool
746 vrp_operand_equal_p (const_tree val1, const_tree val2)
748 if (val1 == val2)
749 return true;
750 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
751 return false;
752 if (is_overflow_infinity (val1))
753 return is_overflow_infinity (val2);
754 return true;
757 /* Return true, if the bitmaps B1 and B2 are equal. */
759 static inline bool
760 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
762 return (b1 == b2
763 || ((!b1 || bitmap_empty_p (b1))
764 && (!b2 || bitmap_empty_p (b2)))
765 || (b1 && b2
766 && bitmap_equal_p (b1, b2)));
769 /* Update the value range and equivalence set for variable VAR to
770 NEW_VR. Return true if NEW_VR is different from VAR's previous
771 value.
773 NOTE: This function assumes that NEW_VR is a temporary value range
774 object created for the sole purpose of updating VAR's range. The
775 storage used by the equivalence set from NEW_VR will be freed by
776 this function. Do not call update_value_range when NEW_VR
777 is the range object associated with another SSA name. */
779 static inline bool
780 update_value_range (const_tree var, value_range_t *new_vr)
782 value_range_t *old_vr;
783 bool is_new;
785 /* Update the value range, if necessary. */
786 old_vr = get_value_range (var);
787 is_new = old_vr->type != new_vr->type
788 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
789 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
790 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
792 if (is_new)
793 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
794 new_vr->equiv);
796 BITMAP_FREE (new_vr->equiv);
798 return is_new;
802 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
803 point where equivalence processing can be turned on/off. */
805 static void
806 add_equivalence (bitmap *equiv, const_tree var)
808 unsigned ver = SSA_NAME_VERSION (var);
809 value_range_t *vr = vr_value[ver];
811 if (*equiv == NULL)
812 *equiv = BITMAP_ALLOC (NULL);
813 bitmap_set_bit (*equiv, ver);
814 if (vr && vr->equiv)
815 bitmap_ior_into (*equiv, vr->equiv);
819 /* Return true if VR is ~[0, 0]. */
821 static inline bool
822 range_is_nonnull (value_range_t *vr)
824 return vr->type == VR_ANTI_RANGE
825 && integer_zerop (vr->min)
826 && integer_zerop (vr->max);
830 /* Return true if VR is [0, 0]. */
832 static inline bool
833 range_is_null (value_range_t *vr)
835 return vr->type == VR_RANGE
836 && integer_zerop (vr->min)
837 && integer_zerop (vr->max);
840 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
841 a singleton. */
843 static inline bool
844 range_int_cst_p (value_range_t *vr)
846 return (vr->type == VR_RANGE
847 && TREE_CODE (vr->max) == INTEGER_CST
848 && TREE_CODE (vr->min) == INTEGER_CST);
851 /* Return true if VR is a INTEGER_CST singleton. */
853 static inline bool
854 range_int_cst_singleton_p (value_range_t *vr)
856 return (range_int_cst_p (vr)
857 && !TREE_OVERFLOW (vr->min)
858 && !TREE_OVERFLOW (vr->max)
859 && tree_int_cst_equal (vr->min, vr->max));
862 /* Return true if value range VR involves at least one symbol. */
864 static inline bool
865 symbolic_range_p (value_range_t *vr)
867 return (!is_gimple_min_invariant (vr->min)
868 || !is_gimple_min_invariant (vr->max));
871 /* Return true if value range VR uses an overflow infinity. */
873 static inline bool
874 overflow_infinity_range_p (value_range_t *vr)
876 return (vr->type == VR_RANGE
877 && (is_overflow_infinity (vr->min)
878 || is_overflow_infinity (vr->max)));
881 /* Return false if we can not make a valid comparison based on VR;
882 this will be the case if it uses an overflow infinity and overflow
883 is not undefined (i.e., -fno-strict-overflow is in effect).
884 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
885 uses an overflow infinity. */
887 static bool
888 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
890 gcc_assert (vr->type == VR_RANGE);
891 if (is_overflow_infinity (vr->min))
893 *strict_overflow_p = true;
894 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
895 return false;
897 if (is_overflow_infinity (vr->max))
899 *strict_overflow_p = true;
900 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
901 return false;
903 return true;
907 /* Return true if the result of assignment STMT is know to be non-negative.
908 If the return value is based on the assumption that signed overflow is
909 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
910 *STRICT_OVERFLOW_P.*/
912 static bool
913 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
915 enum tree_code code = gimple_assign_rhs_code (stmt);
916 switch (get_gimple_rhs_class (code))
918 case GIMPLE_UNARY_RHS:
919 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
920 gimple_expr_type (stmt),
921 gimple_assign_rhs1 (stmt),
922 strict_overflow_p);
923 case GIMPLE_BINARY_RHS:
924 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
925 gimple_expr_type (stmt),
926 gimple_assign_rhs1 (stmt),
927 gimple_assign_rhs2 (stmt),
928 strict_overflow_p);
929 case GIMPLE_TERNARY_RHS:
930 return false;
931 case GIMPLE_SINGLE_RHS:
932 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
933 strict_overflow_p);
934 case GIMPLE_INVALID_RHS:
935 gcc_unreachable ();
936 default:
937 gcc_unreachable ();
941 /* Return true if return value of call STMT is know to be non-negative.
942 If the return value is based on the assumption that signed overflow is
943 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
944 *STRICT_OVERFLOW_P.*/
946 static bool
947 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
949 tree arg0 = gimple_call_num_args (stmt) > 0 ?
950 gimple_call_arg (stmt, 0) : NULL_TREE;
951 tree arg1 = gimple_call_num_args (stmt) > 1 ?
952 gimple_call_arg (stmt, 1) : NULL_TREE;
954 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
955 gimple_call_fndecl (stmt),
956 arg0,
957 arg1,
958 strict_overflow_p);
961 /* Return true if STMT is know to to compute a non-negative value.
962 If the return value is based on the assumption that signed overflow is
963 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
964 *STRICT_OVERFLOW_P.*/
966 static bool
967 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
969 switch (gimple_code (stmt))
971 case GIMPLE_ASSIGN:
972 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
973 case GIMPLE_CALL:
974 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
975 default:
976 gcc_unreachable ();
980 /* Return true if the result of assignment STMT is know to be non-zero.
981 If the return value is based on the assumption that signed overflow is
982 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
983 *STRICT_OVERFLOW_P.*/
985 static bool
986 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
988 enum tree_code code = gimple_assign_rhs_code (stmt);
989 switch (get_gimple_rhs_class (code))
991 case GIMPLE_UNARY_RHS:
992 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
993 gimple_expr_type (stmt),
994 gimple_assign_rhs1 (stmt),
995 strict_overflow_p);
996 case GIMPLE_BINARY_RHS:
997 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
998 gimple_expr_type (stmt),
999 gimple_assign_rhs1 (stmt),
1000 gimple_assign_rhs2 (stmt),
1001 strict_overflow_p);
1002 case GIMPLE_TERNARY_RHS:
1003 return false;
1004 case GIMPLE_SINGLE_RHS:
1005 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1006 strict_overflow_p);
1007 case GIMPLE_INVALID_RHS:
1008 gcc_unreachable ();
1009 default:
1010 gcc_unreachable ();
1014 /* Return true if STMT is know to to compute a non-zero value.
1015 If the return value is based on the assumption that signed overflow is
1016 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1017 *STRICT_OVERFLOW_P.*/
1019 static bool
1020 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1022 switch (gimple_code (stmt))
1024 case GIMPLE_ASSIGN:
1025 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1026 case GIMPLE_CALL:
1027 return gimple_alloca_call_p (stmt);
1028 default:
1029 gcc_unreachable ();
1033 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1034 obtained so far. */
1036 static bool
1037 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1039 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1040 return true;
1042 /* If we have an expression of the form &X->a, then the expression
1043 is nonnull if X is nonnull. */
1044 if (is_gimple_assign (stmt)
1045 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1047 tree expr = gimple_assign_rhs1 (stmt);
1048 tree base = get_base_address (TREE_OPERAND (expr, 0));
1050 if (base != NULL_TREE
1051 && TREE_CODE (base) == MEM_REF
1052 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1054 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1055 if (range_is_nonnull (vr))
1056 return true;
1060 return false;
1063 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1064 a gimple invariant, or SSA_NAME +- CST. */
1066 static bool
1067 valid_value_p (tree expr)
1069 if (TREE_CODE (expr) == SSA_NAME)
1070 return true;
1072 if (TREE_CODE (expr) == PLUS_EXPR
1073 || TREE_CODE (expr) == MINUS_EXPR)
1074 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1075 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1077 return is_gimple_min_invariant (expr);
1080 /* Return
1081 1 if VAL < VAL2
1082 0 if !(VAL < VAL2)
1083 -2 if those are incomparable. */
1084 static inline int
1085 operand_less_p (tree val, tree val2)
1087 /* LT is folded faster than GE and others. Inline the common case. */
1088 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1090 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1091 return INT_CST_LT_UNSIGNED (val, val2);
1092 else
1094 if (INT_CST_LT (val, val2))
1095 return 1;
1098 else
1100 tree tcmp;
1102 fold_defer_overflow_warnings ();
1104 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1106 fold_undefer_and_ignore_overflow_warnings ();
1108 if (!tcmp
1109 || TREE_CODE (tcmp) != INTEGER_CST)
1110 return -2;
1112 if (!integer_zerop (tcmp))
1113 return 1;
1116 /* val >= val2, not considering overflow infinity. */
1117 if (is_negative_overflow_infinity (val))
1118 return is_negative_overflow_infinity (val2) ? 0 : 1;
1119 else if (is_positive_overflow_infinity (val2))
1120 return is_positive_overflow_infinity (val) ? 0 : 1;
1122 return 0;
1125 /* Compare two values VAL1 and VAL2. Return
1127 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1128 -1 if VAL1 < VAL2,
1129 0 if VAL1 == VAL2,
1130 +1 if VAL1 > VAL2, and
1131 +2 if VAL1 != VAL2
1133 This is similar to tree_int_cst_compare but supports pointer values
1134 and values that cannot be compared at compile time.
1136 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1137 true if the return value is only valid if we assume that signed
1138 overflow is undefined. */
1140 static int
1141 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1143 if (val1 == val2)
1144 return 0;
1146 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1147 both integers. */
1148 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1149 == POINTER_TYPE_P (TREE_TYPE (val2)));
1150 /* Convert the two values into the same type. This is needed because
1151 sizetype causes sign extension even for unsigned types. */
1152 val2 = fold_convert (TREE_TYPE (val1), val2);
1153 STRIP_USELESS_TYPE_CONVERSION (val2);
1155 if ((TREE_CODE (val1) == SSA_NAME
1156 || TREE_CODE (val1) == PLUS_EXPR
1157 || TREE_CODE (val1) == MINUS_EXPR)
1158 && (TREE_CODE (val2) == SSA_NAME
1159 || TREE_CODE (val2) == PLUS_EXPR
1160 || TREE_CODE (val2) == MINUS_EXPR))
1162 tree n1, c1, n2, c2;
1163 enum tree_code code1, code2;
1165 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1166 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1167 same name, return -2. */
1168 if (TREE_CODE (val1) == SSA_NAME)
1170 code1 = SSA_NAME;
1171 n1 = val1;
1172 c1 = NULL_TREE;
1174 else
1176 code1 = TREE_CODE (val1);
1177 n1 = TREE_OPERAND (val1, 0);
1178 c1 = TREE_OPERAND (val1, 1);
1179 if (tree_int_cst_sgn (c1) == -1)
1181 if (is_negative_overflow_infinity (c1))
1182 return -2;
1183 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1184 if (!c1)
1185 return -2;
1186 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1190 if (TREE_CODE (val2) == SSA_NAME)
1192 code2 = SSA_NAME;
1193 n2 = val2;
1194 c2 = NULL_TREE;
1196 else
1198 code2 = TREE_CODE (val2);
1199 n2 = TREE_OPERAND (val2, 0);
1200 c2 = TREE_OPERAND (val2, 1);
1201 if (tree_int_cst_sgn (c2) == -1)
1203 if (is_negative_overflow_infinity (c2))
1204 return -2;
1205 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1206 if (!c2)
1207 return -2;
1208 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1212 /* Both values must use the same name. */
1213 if (n1 != n2)
1214 return -2;
1216 if (code1 == SSA_NAME
1217 && code2 == SSA_NAME)
1218 /* NAME == NAME */
1219 return 0;
1221 /* If overflow is defined we cannot simplify more. */
1222 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1223 return -2;
1225 if (strict_overflow_p != NULL
1226 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1227 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1228 *strict_overflow_p = true;
1230 if (code1 == SSA_NAME)
1232 if (code2 == PLUS_EXPR)
1233 /* NAME < NAME + CST */
1234 return -1;
1235 else if (code2 == MINUS_EXPR)
1236 /* NAME > NAME - CST */
1237 return 1;
1239 else if (code1 == PLUS_EXPR)
1241 if (code2 == SSA_NAME)
1242 /* NAME + CST > NAME */
1243 return 1;
1244 else if (code2 == PLUS_EXPR)
1245 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1246 return compare_values_warnv (c1, c2, strict_overflow_p);
1247 else if (code2 == MINUS_EXPR)
1248 /* NAME + CST1 > NAME - CST2 */
1249 return 1;
1251 else if (code1 == MINUS_EXPR)
1253 if (code2 == SSA_NAME)
1254 /* NAME - CST < NAME */
1255 return -1;
1256 else if (code2 == PLUS_EXPR)
1257 /* NAME - CST1 < NAME + CST2 */
1258 return -1;
1259 else if (code2 == MINUS_EXPR)
1260 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1261 C1 and C2 are swapped in the call to compare_values. */
1262 return compare_values_warnv (c2, c1, strict_overflow_p);
1265 gcc_unreachable ();
1268 /* We cannot compare non-constants. */
1269 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1270 return -2;
1272 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1274 /* We cannot compare overflowed values, except for overflow
1275 infinities. */
1276 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1278 if (strict_overflow_p != NULL)
1279 *strict_overflow_p = true;
1280 if (is_negative_overflow_infinity (val1))
1281 return is_negative_overflow_infinity (val2) ? 0 : -1;
1282 else if (is_negative_overflow_infinity (val2))
1283 return 1;
1284 else if (is_positive_overflow_infinity (val1))
1285 return is_positive_overflow_infinity (val2) ? 0 : 1;
1286 else if (is_positive_overflow_infinity (val2))
1287 return -1;
1288 return -2;
1291 return tree_int_cst_compare (val1, val2);
1293 else
1295 tree t;
1297 /* First see if VAL1 and VAL2 are not the same. */
1298 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1299 return 0;
1301 /* If VAL1 is a lower address than VAL2, return -1. */
1302 if (operand_less_p (val1, val2) == 1)
1303 return -1;
1305 /* If VAL1 is a higher address than VAL2, return +1. */
1306 if (operand_less_p (val2, val1) == 1)
1307 return 1;
1309 /* If VAL1 is different than VAL2, return +2.
1310 For integer constants we either have already returned -1 or 1
1311 or they are equivalent. We still might succeed in proving
1312 something about non-trivial operands. */
1313 if (TREE_CODE (val1) != INTEGER_CST
1314 || TREE_CODE (val2) != INTEGER_CST)
1316 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1317 if (t && integer_onep (t))
1318 return 2;
1321 return -2;
1325 /* Compare values like compare_values_warnv, but treat comparisons of
1326 nonconstants which rely on undefined overflow as incomparable. */
1328 static int
1329 compare_values (tree val1, tree val2)
1331 bool sop;
1332 int ret;
1334 sop = false;
1335 ret = compare_values_warnv (val1, val2, &sop);
1336 if (sop
1337 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1338 ret = -2;
1339 return ret;
1343 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1344 0 if VAL is not inside [MIN, MAX],
1345 -2 if we cannot tell either way.
1347 Benchmark compile/20001226-1.c compilation time after changing this
1348 function. */
1350 static inline int
1351 value_inside_range (tree val, tree min, tree max)
1353 int cmp1, cmp2;
1355 cmp1 = operand_less_p (val, min);
1356 if (cmp1 == -2)
1357 return -2;
1358 if (cmp1 == 1)
1359 return 0;
1361 cmp2 = operand_less_p (max, val);
1362 if (cmp2 == -2)
1363 return -2;
1365 return !cmp2;
1369 /* Return true if value ranges VR0 and VR1 have a non-empty
1370 intersection.
1372 Benchmark compile/20001226-1.c compilation time after changing this
1373 function.
1376 static inline bool
1377 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1379 /* The value ranges do not intersect if the maximum of the first range is
1380 less than the minimum of the second range or vice versa.
1381 When those relations are unknown, we can't do any better. */
1382 if (operand_less_p (vr0->max, vr1->min) != 0)
1383 return false;
1384 if (operand_less_p (vr1->max, vr0->min) != 0)
1385 return false;
1386 return true;
1390 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1391 include the value zero, -2 if we cannot tell. */
1393 static inline int
1394 range_includes_zero_p (tree min, tree max)
1396 tree zero = build_int_cst (TREE_TYPE (min), 0);
1397 return value_inside_range (zero, min, max);
1400 /* Return true if *VR is know to only contain nonnegative values. */
1402 static inline bool
1403 value_range_nonnegative_p (value_range_t *vr)
1405 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1406 which would return a useful value should be encoded as a
1407 VR_RANGE. */
1408 if (vr->type == VR_RANGE)
1410 int result = compare_values (vr->min, integer_zero_node);
1411 return (result == 0 || result == 1);
1414 return false;
1417 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1418 false otherwise or if no value range information is available. */
1420 bool
1421 ssa_name_nonnegative_p (const_tree t)
1423 value_range_t *vr = get_value_range (t);
1425 if (INTEGRAL_TYPE_P (t)
1426 && TYPE_UNSIGNED (t))
1427 return true;
1429 if (!vr)
1430 return false;
1432 return value_range_nonnegative_p (vr);
1435 /* If *VR has a value rante that is a single constant value return that,
1436 otherwise return NULL_TREE. */
1438 static tree
1439 value_range_constant_singleton (value_range_t *vr)
1441 if (vr->type == VR_RANGE
1442 && operand_equal_p (vr->min, vr->max, 0)
1443 && is_gimple_min_invariant (vr->min))
1444 return vr->min;
1446 return NULL_TREE;
1449 /* If OP has a value range with a single constant value return that,
1450 otherwise return NULL_TREE. This returns OP itself if OP is a
1451 constant. */
1453 static tree
1454 op_with_constant_singleton_value_range (tree op)
1456 if (is_gimple_min_invariant (op))
1457 return op;
1459 if (TREE_CODE (op) != SSA_NAME)
1460 return NULL_TREE;
1462 return value_range_constant_singleton (get_value_range (op));
1465 /* Return true if op is in a boolean [0, 1] value-range. */
1467 static bool
1468 op_with_boolean_value_range_p (tree op)
1470 value_range_t *vr;
1472 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1473 return true;
1475 if (integer_zerop (op)
1476 || integer_onep (op))
1477 return true;
1479 if (TREE_CODE (op) != SSA_NAME)
1480 return false;
1482 vr = get_value_range (op);
1483 return (vr->type == VR_RANGE
1484 && integer_zerop (vr->min)
1485 && integer_onep (vr->max));
1488 /* Extract value range information from an ASSERT_EXPR EXPR and store
1489 it in *VR_P. */
1491 static void
1492 extract_range_from_assert (value_range_t *vr_p, tree expr)
1494 tree var, cond, limit, min, max, type;
1495 value_range_t *limit_vr;
1496 enum tree_code cond_code;
1498 var = ASSERT_EXPR_VAR (expr);
1499 cond = ASSERT_EXPR_COND (expr);
1501 gcc_assert (COMPARISON_CLASS_P (cond));
1503 /* Find VAR in the ASSERT_EXPR conditional. */
1504 if (var == TREE_OPERAND (cond, 0)
1505 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1506 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1508 /* If the predicate is of the form VAR COMP LIMIT, then we just
1509 take LIMIT from the RHS and use the same comparison code. */
1510 cond_code = TREE_CODE (cond);
1511 limit = TREE_OPERAND (cond, 1);
1512 cond = TREE_OPERAND (cond, 0);
1514 else
1516 /* If the predicate is of the form LIMIT COMP VAR, then we need
1517 to flip around the comparison code to create the proper range
1518 for VAR. */
1519 cond_code = swap_tree_comparison (TREE_CODE (cond));
1520 limit = TREE_OPERAND (cond, 0);
1521 cond = TREE_OPERAND (cond, 1);
1524 limit = avoid_overflow_infinity (limit);
1526 type = TREE_TYPE (var);
1527 gcc_assert (limit != var);
1529 /* For pointer arithmetic, we only keep track of pointer equality
1530 and inequality. */
1531 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1533 set_value_range_to_varying (vr_p);
1534 return;
1537 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1538 try to use LIMIT's range to avoid creating symbolic ranges
1539 unnecessarily. */
1540 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1542 /* LIMIT's range is only interesting if it has any useful information. */
1543 if (limit_vr
1544 && (limit_vr->type == VR_UNDEFINED
1545 || limit_vr->type == VR_VARYING
1546 || symbolic_range_p (limit_vr)))
1547 limit_vr = NULL;
1549 /* Initially, the new range has the same set of equivalences of
1550 VAR's range. This will be revised before returning the final
1551 value. Since assertions may be chained via mutually exclusive
1552 predicates, we will need to trim the set of equivalences before
1553 we are done. */
1554 gcc_assert (vr_p->equiv == NULL);
1555 add_equivalence (&vr_p->equiv, var);
1557 /* Extract a new range based on the asserted comparison for VAR and
1558 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1559 will only use it for equality comparisons (EQ_EXPR). For any
1560 other kind of assertion, we cannot derive a range from LIMIT's
1561 anti-range that can be used to describe the new range. For
1562 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1563 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1564 no single range for x_2 that could describe LE_EXPR, so we might
1565 as well build the range [b_4, +INF] for it.
1566 One special case we handle is extracting a range from a
1567 range test encoded as (unsigned)var + CST <= limit. */
1568 if (TREE_CODE (cond) == NOP_EXPR
1569 || TREE_CODE (cond) == PLUS_EXPR)
1571 if (TREE_CODE (cond) == PLUS_EXPR)
1573 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1574 TREE_OPERAND (cond, 1));
1575 max = int_const_binop (PLUS_EXPR, limit, min);
1576 cond = TREE_OPERAND (cond, 0);
1578 else
1580 min = build_int_cst (TREE_TYPE (var), 0);
1581 max = limit;
1584 /* Make sure to not set TREE_OVERFLOW on the final type
1585 conversion. We are willingly interpreting large positive
1586 unsigned values as negative singed values here. */
1587 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1588 0, false);
1589 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1590 0, false);
1592 /* We can transform a max, min range to an anti-range or
1593 vice-versa. Use set_and_canonicalize_value_range which does
1594 this for us. */
1595 if (cond_code == LE_EXPR)
1596 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1597 min, max, vr_p->equiv);
1598 else if (cond_code == GT_EXPR)
1599 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1600 min, max, vr_p->equiv);
1601 else
1602 gcc_unreachable ();
1604 else if (cond_code == EQ_EXPR)
1606 enum value_range_type range_type;
1608 if (limit_vr)
1610 range_type = limit_vr->type;
1611 min = limit_vr->min;
1612 max = limit_vr->max;
1614 else
1616 range_type = VR_RANGE;
1617 min = limit;
1618 max = limit;
1621 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1623 /* When asserting the equality VAR == LIMIT and LIMIT is another
1624 SSA name, the new range will also inherit the equivalence set
1625 from LIMIT. */
1626 if (TREE_CODE (limit) == SSA_NAME)
1627 add_equivalence (&vr_p->equiv, limit);
1629 else if (cond_code == NE_EXPR)
1631 /* As described above, when LIMIT's range is an anti-range and
1632 this assertion is an inequality (NE_EXPR), then we cannot
1633 derive anything from the anti-range. For instance, if
1634 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1635 not imply that VAR's range is [0, 0]. So, in the case of
1636 anti-ranges, we just assert the inequality using LIMIT and
1637 not its anti-range.
1639 If LIMIT_VR is a range, we can only use it to build a new
1640 anti-range if LIMIT_VR is a single-valued range. For
1641 instance, if LIMIT_VR is [0, 1], the predicate
1642 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1643 Rather, it means that for value 0 VAR should be ~[0, 0]
1644 and for value 1, VAR should be ~[1, 1]. We cannot
1645 represent these ranges.
1647 The only situation in which we can build a valid
1648 anti-range is when LIMIT_VR is a single-valued range
1649 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1650 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1651 if (limit_vr
1652 && limit_vr->type == VR_RANGE
1653 && compare_values (limit_vr->min, limit_vr->max) == 0)
1655 min = limit_vr->min;
1656 max = limit_vr->max;
1658 else
1660 /* In any other case, we cannot use LIMIT's range to build a
1661 valid anti-range. */
1662 min = max = limit;
1665 /* If MIN and MAX cover the whole range for their type, then
1666 just use the original LIMIT. */
1667 if (INTEGRAL_TYPE_P (type)
1668 && vrp_val_is_min (min)
1669 && vrp_val_is_max (max))
1670 min = max = limit;
1672 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1674 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1676 min = TYPE_MIN_VALUE (type);
1678 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1679 max = limit;
1680 else
1682 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1683 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1684 LT_EXPR. */
1685 max = limit_vr->max;
1688 /* If the maximum value forces us to be out of bounds, simply punt.
1689 It would be pointless to try and do anything more since this
1690 all should be optimized away above us. */
1691 if ((cond_code == LT_EXPR
1692 && compare_values (max, min) == 0)
1693 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1694 set_value_range_to_varying (vr_p);
1695 else
1697 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1698 if (cond_code == LT_EXPR)
1700 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1701 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1702 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1703 build_int_cst (TREE_TYPE (max), -1));
1704 else
1705 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1706 build_int_cst (TREE_TYPE (max), 1));
1707 if (EXPR_P (max))
1708 TREE_NO_WARNING (max) = 1;
1711 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1714 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1716 max = TYPE_MAX_VALUE (type);
1718 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1719 min = limit;
1720 else
1722 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1723 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1724 GT_EXPR. */
1725 min = limit_vr->min;
1728 /* If the minimum value forces us to be out of bounds, simply punt.
1729 It would be pointless to try and do anything more since this
1730 all should be optimized away above us. */
1731 if ((cond_code == GT_EXPR
1732 && compare_values (min, max) == 0)
1733 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1734 set_value_range_to_varying (vr_p);
1735 else
1737 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1738 if (cond_code == GT_EXPR)
1740 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1741 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1742 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1743 build_int_cst (TREE_TYPE (min), -1));
1744 else
1745 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1746 build_int_cst (TREE_TYPE (min), 1));
1747 if (EXPR_P (min))
1748 TREE_NO_WARNING (min) = 1;
1751 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1754 else
1755 gcc_unreachable ();
1757 /* Finally intersect the new range with what we already know about var. */
1758 vrp_intersect_ranges (vr_p, get_value_range (var));
1762 /* Extract range information from SSA name VAR and store it in VR. If
1763 VAR has an interesting range, use it. Otherwise, create the
1764 range [VAR, VAR] and return it. This is useful in situations where
1765 we may have conditionals testing values of VARYING names. For
1766 instance,
1768 x_3 = y_5;
1769 if (x_3 > y_5)
1772 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1773 always false. */
1775 static void
1776 extract_range_from_ssa_name (value_range_t *vr, tree var)
1778 value_range_t *var_vr = get_value_range (var);
1780 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1781 copy_value_range (vr, var_vr);
1782 else
1783 set_value_range (vr, VR_RANGE, var, var, NULL);
1785 add_equivalence (&vr->equiv, var);
1789 /* Wrapper around int_const_binop. If the operation overflows and we
1790 are not using wrapping arithmetic, then adjust the result to be
1791 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1792 NULL_TREE if we need to use an overflow infinity representation but
1793 the type does not support it. */
1795 static tree
1796 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1798 tree res;
1800 res = int_const_binop (code, val1, val2);
1802 /* If we are using unsigned arithmetic, operate symbolically
1803 on -INF and +INF as int_const_binop only handles signed overflow. */
1804 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1806 int checkz = compare_values (res, val1);
1807 bool overflow = false;
1809 /* Ensure that res = val1 [+*] val2 >= val1
1810 or that res = val1 - val2 <= val1. */
1811 if ((code == PLUS_EXPR
1812 && !(checkz == 1 || checkz == 0))
1813 || (code == MINUS_EXPR
1814 && !(checkz == 0 || checkz == -1)))
1816 overflow = true;
1818 /* Checking for multiplication overflow is done by dividing the
1819 output of the multiplication by the first input of the
1820 multiplication. If the result of that division operation is
1821 not equal to the second input of the multiplication, then the
1822 multiplication overflowed. */
1823 else if (code == MULT_EXPR && !integer_zerop (val1))
1825 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1826 res,
1827 val1);
1828 int check = compare_values (tmp, val2);
1830 if (check != 0)
1831 overflow = true;
1834 if (overflow)
1836 res = copy_node (res);
1837 TREE_OVERFLOW (res) = 1;
1841 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1842 /* If the singed operation wraps then int_const_binop has done
1843 everything we want. */
1845 else if ((TREE_OVERFLOW (res)
1846 && !TREE_OVERFLOW (val1)
1847 && !TREE_OVERFLOW (val2))
1848 || is_overflow_infinity (val1)
1849 || is_overflow_infinity (val2))
1851 /* If the operation overflowed but neither VAL1 nor VAL2 are
1852 overflown, return -INF or +INF depending on the operation
1853 and the combination of signs of the operands. */
1854 int sgn1 = tree_int_cst_sgn (val1);
1855 int sgn2 = tree_int_cst_sgn (val2);
1857 if (needs_overflow_infinity (TREE_TYPE (res))
1858 && !supports_overflow_infinity (TREE_TYPE (res)))
1859 return NULL_TREE;
1861 /* We have to punt on adding infinities of different signs,
1862 since we can't tell what the sign of the result should be.
1863 Likewise for subtracting infinities of the same sign. */
1864 if (((code == PLUS_EXPR && sgn1 != sgn2)
1865 || (code == MINUS_EXPR && sgn1 == sgn2))
1866 && is_overflow_infinity (val1)
1867 && is_overflow_infinity (val2))
1868 return NULL_TREE;
1870 /* Don't try to handle division or shifting of infinities. */
1871 if ((code == TRUNC_DIV_EXPR
1872 || code == FLOOR_DIV_EXPR
1873 || code == CEIL_DIV_EXPR
1874 || code == EXACT_DIV_EXPR
1875 || code == ROUND_DIV_EXPR
1876 || code == RSHIFT_EXPR)
1877 && (is_overflow_infinity (val1)
1878 || is_overflow_infinity (val2)))
1879 return NULL_TREE;
1881 /* Notice that we only need to handle the restricted set of
1882 operations handled by extract_range_from_binary_expr.
1883 Among them, only multiplication, addition and subtraction
1884 can yield overflow without overflown operands because we
1885 are working with integral types only... except in the
1886 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1887 for division too. */
1889 /* For multiplication, the sign of the overflow is given
1890 by the comparison of the signs of the operands. */
1891 if ((code == MULT_EXPR && sgn1 == sgn2)
1892 /* For addition, the operands must be of the same sign
1893 to yield an overflow. Its sign is therefore that
1894 of one of the operands, for example the first. For
1895 infinite operands X + -INF is negative, not positive. */
1896 || (code == PLUS_EXPR
1897 && (sgn1 >= 0
1898 ? !is_negative_overflow_infinity (val2)
1899 : is_positive_overflow_infinity (val2)))
1900 /* For subtraction, non-infinite operands must be of
1901 different signs to yield an overflow. Its sign is
1902 therefore that of the first operand or the opposite of
1903 that of the second operand. A first operand of 0 counts
1904 as positive here, for the corner case 0 - (-INF), which
1905 overflows, but must yield +INF. For infinite operands 0
1906 - INF is negative, not positive. */
1907 || (code == MINUS_EXPR
1908 && (sgn1 >= 0
1909 ? !is_positive_overflow_infinity (val2)
1910 : is_negative_overflow_infinity (val2)))
1911 /* We only get in here with positive shift count, so the
1912 overflow direction is the same as the sign of val1.
1913 Actually rshift does not overflow at all, but we only
1914 handle the case of shifting overflowed -INF and +INF. */
1915 || (code == RSHIFT_EXPR
1916 && sgn1 >= 0)
1917 /* For division, the only case is -INF / -1 = +INF. */
1918 || code == TRUNC_DIV_EXPR
1919 || code == FLOOR_DIV_EXPR
1920 || code == CEIL_DIV_EXPR
1921 || code == EXACT_DIV_EXPR
1922 || code == ROUND_DIV_EXPR)
1923 return (needs_overflow_infinity (TREE_TYPE (res))
1924 ? positive_overflow_infinity (TREE_TYPE (res))
1925 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1926 else
1927 return (needs_overflow_infinity (TREE_TYPE (res))
1928 ? negative_overflow_infinity (TREE_TYPE (res))
1929 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1932 return res;
1936 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
1937 bitmask if some bit is unset, it means for all numbers in the range
1938 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1939 bitmask if some bit is set, it means for all numbers in the range
1940 the bit is 1, otherwise it might be 0 or 1. */
1942 static bool
1943 zero_nonzero_bits_from_vr (value_range_t *vr,
1944 double_int *may_be_nonzero,
1945 double_int *must_be_nonzero)
1947 *may_be_nonzero = double_int_minus_one;
1948 *must_be_nonzero = double_int_zero;
1949 if (!range_int_cst_p (vr)
1950 || TREE_OVERFLOW (vr->min)
1951 || TREE_OVERFLOW (vr->max))
1952 return false;
1954 if (range_int_cst_singleton_p (vr))
1956 *may_be_nonzero = tree_to_double_int (vr->min);
1957 *must_be_nonzero = *may_be_nonzero;
1959 else if (tree_int_cst_sgn (vr->min) >= 0
1960 || tree_int_cst_sgn (vr->max) < 0)
1962 double_int dmin = tree_to_double_int (vr->min);
1963 double_int dmax = tree_to_double_int (vr->max);
1964 double_int xor_mask = double_int_xor (dmin, dmax);
1965 *may_be_nonzero = double_int_ior (dmin, dmax);
1966 *must_be_nonzero = double_int_and (dmin, dmax);
1967 if (xor_mask.high != 0)
1969 unsigned HOST_WIDE_INT mask
1970 = ((unsigned HOST_WIDE_INT) 1
1971 << floor_log2 (xor_mask.high)) - 1;
1972 may_be_nonzero->low = ALL_ONES;
1973 may_be_nonzero->high |= mask;
1974 must_be_nonzero->low = 0;
1975 must_be_nonzero->high &= ~mask;
1977 else if (xor_mask.low != 0)
1979 unsigned HOST_WIDE_INT mask
1980 = ((unsigned HOST_WIDE_INT) 1
1981 << floor_log2 (xor_mask.low)) - 1;
1982 may_be_nonzero->low |= mask;
1983 must_be_nonzero->low &= ~mask;
1987 return true;
1990 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1991 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1992 false otherwise. If *AR can be represented with a single range
1993 *VR1 will be VR_UNDEFINED. */
1995 static bool
1996 ranges_from_anti_range (value_range_t *ar,
1997 value_range_t *vr0, value_range_t *vr1)
1999 tree type = TREE_TYPE (ar->min);
2001 vr0->type = VR_UNDEFINED;
2002 vr1->type = VR_UNDEFINED;
2004 if (ar->type != VR_ANTI_RANGE
2005 || TREE_CODE (ar->min) != INTEGER_CST
2006 || TREE_CODE (ar->max) != INTEGER_CST
2007 || !vrp_val_min (type)
2008 || !vrp_val_max (type))
2009 return false;
2011 if (!vrp_val_is_min (ar->min))
2013 vr0->type = VR_RANGE;
2014 vr0->min = vrp_val_min (type);
2015 vr0->max
2016 = double_int_to_tree (type,
2017 double_int_sub (tree_to_double_int (ar->min),
2018 double_int_one));
2020 if (!vrp_val_is_max (ar->max))
2022 vr1->type = VR_RANGE;
2023 vr1->min
2024 = double_int_to_tree (type,
2025 double_int_add (tree_to_double_int (ar->max),
2026 double_int_one));
2027 vr1->max = vrp_val_max (type);
2029 if (vr0->type == VR_UNDEFINED)
2031 *vr0 = *vr1;
2032 vr1->type = VR_UNDEFINED;
2035 return vr0->type != VR_UNDEFINED;
2038 /* Helper to extract a value-range *VR for a multiplicative operation
2039 *VR0 CODE *VR1. */
2041 static void
2042 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2043 enum tree_code code,
2044 value_range_t *vr0, value_range_t *vr1)
2046 enum value_range_type type;
2047 tree val[4];
2048 size_t i;
2049 tree min, max;
2050 bool sop;
2051 int cmp;
2053 /* Multiplications, divisions and shifts are a bit tricky to handle,
2054 depending on the mix of signs we have in the two ranges, we
2055 need to operate on different values to get the minimum and
2056 maximum values for the new range. One approach is to figure
2057 out all the variations of range combinations and do the
2058 operations.
2060 However, this involves several calls to compare_values and it
2061 is pretty convoluted. It's simpler to do the 4 operations
2062 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2063 MAX1) and then figure the smallest and largest values to form
2064 the new range. */
2065 gcc_assert (code == MULT_EXPR
2066 || code == TRUNC_DIV_EXPR
2067 || code == FLOOR_DIV_EXPR
2068 || code == CEIL_DIV_EXPR
2069 || code == EXACT_DIV_EXPR
2070 || code == ROUND_DIV_EXPR
2071 || code == RSHIFT_EXPR);
2072 gcc_assert ((vr0->type == VR_RANGE
2073 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2074 && vr0->type == vr1->type);
2076 type = vr0->type;
2078 /* Compute the 4 cross operations. */
2079 sop = false;
2080 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2081 if (val[0] == NULL_TREE)
2082 sop = true;
2084 if (vr1->max == vr1->min)
2085 val[1] = NULL_TREE;
2086 else
2088 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2089 if (val[1] == NULL_TREE)
2090 sop = true;
2093 if (vr0->max == vr0->min)
2094 val[2] = NULL_TREE;
2095 else
2097 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2098 if (val[2] == NULL_TREE)
2099 sop = true;
2102 if (vr0->min == vr0->max || vr1->min == vr1->max)
2103 val[3] = NULL_TREE;
2104 else
2106 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2107 if (val[3] == NULL_TREE)
2108 sop = true;
2111 if (sop)
2113 set_value_range_to_varying (vr);
2114 return;
2117 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2118 of VAL[i]. */
2119 min = val[0];
2120 max = val[0];
2121 for (i = 1; i < 4; i++)
2123 if (!is_gimple_min_invariant (min)
2124 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2125 || !is_gimple_min_invariant (max)
2126 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2127 break;
2129 if (val[i])
2131 if (!is_gimple_min_invariant (val[i])
2132 || (TREE_OVERFLOW (val[i])
2133 && !is_overflow_infinity (val[i])))
2135 /* If we found an overflowed value, set MIN and MAX
2136 to it so that we set the resulting range to
2137 VARYING. */
2138 min = max = val[i];
2139 break;
2142 if (compare_values (val[i], min) == -1)
2143 min = val[i];
2145 if (compare_values (val[i], max) == 1)
2146 max = val[i];
2150 /* If either MIN or MAX overflowed, then set the resulting range to
2151 VARYING. But we do accept an overflow infinity
2152 representation. */
2153 if (min == NULL_TREE
2154 || !is_gimple_min_invariant (min)
2155 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2156 || max == NULL_TREE
2157 || !is_gimple_min_invariant (max)
2158 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2160 set_value_range_to_varying (vr);
2161 return;
2164 /* We punt if:
2165 1) [-INF, +INF]
2166 2) [-INF, +-INF(OVF)]
2167 3) [+-INF(OVF), +INF]
2168 4) [+-INF(OVF), +-INF(OVF)]
2169 We learn nothing when we have INF and INF(OVF) on both sides.
2170 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2171 overflow. */
2172 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2173 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2175 set_value_range_to_varying (vr);
2176 return;
2179 cmp = compare_values (min, max);
2180 if (cmp == -2 || cmp == 1)
2182 /* If the new range has its limits swapped around (MIN > MAX),
2183 then the operation caused one of them to wrap around, mark
2184 the new range VARYING. */
2185 set_value_range_to_varying (vr);
2187 else
2188 set_value_range (vr, type, min, max, NULL);
2191 /* Some quadruple precision helpers. */
2192 static int
2193 quad_int_cmp (double_int l0, double_int h0,
2194 double_int l1, double_int h1, bool uns)
2196 int c = double_int_cmp (h0, h1, uns);
2197 if (c != 0) return c;
2198 return double_int_ucmp (l0, l1);
2201 static void
2202 quad_int_pair_sort (double_int *l0, double_int *h0,
2203 double_int *l1, double_int *h1, bool uns)
2205 if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2207 double_int tmp;
2208 tmp = *l0; *l0 = *l1; *l1 = tmp;
2209 tmp = *h0; *h0 = *h1; *h1 = tmp;
2213 /* Extract range information from a binary operation CODE based on
2214 the ranges of each of its operands, *VR0 and *VR1 with resulting
2215 type EXPR_TYPE. The resulting range is stored in *VR. */
2217 static void
2218 extract_range_from_binary_expr_1 (value_range_t *vr,
2219 enum tree_code code, tree expr_type,
2220 value_range_t *vr0_, value_range_t *vr1_)
2222 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2223 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2224 enum value_range_type type;
2225 tree min = NULL_TREE, max = NULL_TREE;
2226 int cmp;
2228 if (!INTEGRAL_TYPE_P (expr_type)
2229 && !POINTER_TYPE_P (expr_type))
2231 set_value_range_to_varying (vr);
2232 return;
2235 /* Not all binary expressions can be applied to ranges in a
2236 meaningful way. Handle only arithmetic operations. */
2237 if (code != PLUS_EXPR
2238 && code != MINUS_EXPR
2239 && code != POINTER_PLUS_EXPR
2240 && code != MULT_EXPR
2241 && code != TRUNC_DIV_EXPR
2242 && code != FLOOR_DIV_EXPR
2243 && code != CEIL_DIV_EXPR
2244 && code != EXACT_DIV_EXPR
2245 && code != ROUND_DIV_EXPR
2246 && code != TRUNC_MOD_EXPR
2247 && code != RSHIFT_EXPR
2248 && code != LSHIFT_EXPR
2249 && code != MIN_EXPR
2250 && code != MAX_EXPR
2251 && code != BIT_AND_EXPR
2252 && code != BIT_IOR_EXPR
2253 && code != BIT_XOR_EXPR)
2255 set_value_range_to_varying (vr);
2256 return;
2259 /* If both ranges are UNDEFINED, so is the result. */
2260 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2262 set_value_range_to_undefined (vr);
2263 return;
2265 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2266 code. At some point we may want to special-case operations that
2267 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2268 operand. */
2269 else if (vr0.type == VR_UNDEFINED)
2270 set_value_range_to_varying (&vr0);
2271 else if (vr1.type == VR_UNDEFINED)
2272 set_value_range_to_varying (&vr1);
2274 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2275 and express ~[] op X as ([]' op X) U ([]'' op X). */
2276 if (vr0.type == VR_ANTI_RANGE
2277 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2279 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2280 if (vrtem1.type != VR_UNDEFINED)
2282 value_range_t vrres = VR_INITIALIZER;
2283 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2284 &vrtem1, vr1_);
2285 vrp_meet (vr, &vrres);
2287 return;
2289 /* Likewise for X op ~[]. */
2290 if (vr1.type == VR_ANTI_RANGE
2291 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2293 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2294 if (vrtem1.type != VR_UNDEFINED)
2296 value_range_t vrres = VR_INITIALIZER;
2297 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2298 vr0_, &vrtem1);
2299 vrp_meet (vr, &vrres);
2301 return;
2304 /* The type of the resulting value range defaults to VR0.TYPE. */
2305 type = vr0.type;
2307 /* Refuse to operate on VARYING ranges, ranges of different kinds
2308 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2309 because we may be able to derive a useful range even if one of
2310 the operands is VR_VARYING or symbolic range. Similarly for
2311 divisions. TODO, we may be able to derive anti-ranges in
2312 some cases. */
2313 if (code != BIT_AND_EXPR
2314 && code != BIT_IOR_EXPR
2315 && code != TRUNC_DIV_EXPR
2316 && code != FLOOR_DIV_EXPR
2317 && code != CEIL_DIV_EXPR
2318 && code != EXACT_DIV_EXPR
2319 && code != ROUND_DIV_EXPR
2320 && code != TRUNC_MOD_EXPR
2321 && (vr0.type == VR_VARYING
2322 || vr1.type == VR_VARYING
2323 || vr0.type != vr1.type
2324 || symbolic_range_p (&vr0)
2325 || symbolic_range_p (&vr1)))
2327 set_value_range_to_varying (vr);
2328 return;
2331 /* Now evaluate the expression to determine the new range. */
2332 if (POINTER_TYPE_P (expr_type))
2334 if (code == MIN_EXPR || code == MAX_EXPR)
2336 /* For MIN/MAX expressions with pointers, we only care about
2337 nullness, if both are non null, then the result is nonnull.
2338 If both are null, then the result is null. Otherwise they
2339 are varying. */
2340 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2341 set_value_range_to_nonnull (vr, expr_type);
2342 else if (range_is_null (&vr0) && range_is_null (&vr1))
2343 set_value_range_to_null (vr, expr_type);
2344 else
2345 set_value_range_to_varying (vr);
2347 else if (code == POINTER_PLUS_EXPR)
2349 /* For pointer types, we are really only interested in asserting
2350 whether the expression evaluates to non-NULL. */
2351 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2352 set_value_range_to_nonnull (vr, expr_type);
2353 else if (range_is_null (&vr0) && range_is_null (&vr1))
2354 set_value_range_to_null (vr, expr_type);
2355 else
2356 set_value_range_to_varying (vr);
2358 else if (code == BIT_AND_EXPR)
2360 /* For pointer types, we are really only interested in asserting
2361 whether the expression evaluates to non-NULL. */
2362 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2363 set_value_range_to_nonnull (vr, expr_type);
2364 else if (range_is_null (&vr0) || range_is_null (&vr1))
2365 set_value_range_to_null (vr, expr_type);
2366 else
2367 set_value_range_to_varying (vr);
2369 else
2370 set_value_range_to_varying (vr);
2372 return;
2375 /* For integer ranges, apply the operation to each end of the
2376 range and see what we end up with. */
2377 if (code == PLUS_EXPR || code == MINUS_EXPR)
2379 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2380 ranges compute the precise range for such case if possible. */
2381 if (range_int_cst_p (&vr0)
2382 && range_int_cst_p (&vr1)
2383 /* We need as many bits as the possibly unsigned inputs. */
2384 && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2386 double_int min0 = tree_to_double_int (vr0.min);
2387 double_int max0 = tree_to_double_int (vr0.max);
2388 double_int min1 = tree_to_double_int (vr1.min);
2389 double_int max1 = tree_to_double_int (vr1.max);
2390 bool uns = TYPE_UNSIGNED (expr_type);
2391 double_int type_min
2392 = double_int_min_value (TYPE_PRECISION (expr_type), uns);
2393 double_int type_max
2394 = double_int_max_value (TYPE_PRECISION (expr_type), uns);
2395 double_int dmin, dmax;
2396 int min_ovf = 0;
2397 int max_ovf = 0;
2399 if (code == PLUS_EXPR)
2401 dmin = double_int_add (min0, min1);
2402 dmax = double_int_add (max0, max1);
2404 /* Check for overflow in double_int. */
2405 if (double_int_cmp (min1, double_int_zero, uns)
2406 != double_int_cmp (dmin, min0, uns))
2407 min_ovf = double_int_cmp (min0, dmin, uns);
2408 if (double_int_cmp (max1, double_int_zero, uns)
2409 != double_int_cmp (dmax, max0, uns))
2410 max_ovf = double_int_cmp (max0, dmax, uns);
2412 else /* if (code == MINUS_EXPR) */
2414 dmin = double_int_sub (min0, max1);
2415 dmax = double_int_sub (max0, min1);
2417 if (double_int_cmp (double_int_zero, max1, uns)
2418 != double_int_cmp (dmin, min0, uns))
2419 min_ovf = double_int_cmp (min0, max1, uns);
2420 if (double_int_cmp (double_int_zero, min1, uns)
2421 != double_int_cmp (dmax, max0, uns))
2422 max_ovf = double_int_cmp (max0, min1, uns);
2425 /* For non-wrapping arithmetic look at possibly smaller
2426 value-ranges of the type. */
2427 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2429 if (vrp_val_min (expr_type))
2430 type_min = tree_to_double_int (vrp_val_min (expr_type));
2431 if (vrp_val_max (expr_type))
2432 type_max = tree_to_double_int (vrp_val_max (expr_type));
2435 /* Check for type overflow. */
2436 if (min_ovf == 0)
2438 if (double_int_cmp (dmin, type_min, uns) == -1)
2439 min_ovf = -1;
2440 else if (double_int_cmp (dmin, type_max, uns) == 1)
2441 min_ovf = 1;
2443 if (max_ovf == 0)
2445 if (double_int_cmp (dmax, type_min, uns) == -1)
2446 max_ovf = -1;
2447 else if (double_int_cmp (dmax, type_max, uns) == 1)
2448 max_ovf = 1;
2451 if (TYPE_OVERFLOW_WRAPS (expr_type))
2453 /* If overflow wraps, truncate the values and adjust the
2454 range kind and bounds appropriately. */
2455 double_int tmin
2456 = double_int_ext (dmin, TYPE_PRECISION (expr_type), uns);
2457 double_int tmax
2458 = double_int_ext (dmax, TYPE_PRECISION (expr_type), uns);
2459 if (min_ovf == max_ovf)
2461 /* No overflow or both overflow or underflow. The
2462 range kind stays VR_RANGE. */
2463 min = double_int_to_tree (expr_type, tmin);
2464 max = double_int_to_tree (expr_type, tmax);
2466 else if (min_ovf == -1
2467 && max_ovf == 1)
2469 /* Underflow and overflow, drop to VR_VARYING. */
2470 set_value_range_to_varying (vr);
2471 return;
2473 else
2475 /* Min underflow or max overflow. The range kind
2476 changes to VR_ANTI_RANGE. */
2477 double_int tem = tmin;
2478 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2479 || (max_ovf == 1 && min_ovf == 0));
2480 type = VR_ANTI_RANGE;
2481 tmin = double_int_add (tmax, double_int_one);
2482 tmax = double_int_add (tem, double_int_minus_one);
2483 /* If the anti-range would cover nothing, drop to varying.
2484 Likewise if the anti-range bounds are outside of the
2485 types values. */
2486 if (double_int_cmp (tmin, tmax, uns) > 0
2487 || double_int_cmp (tmin, type_min, uns) < 0
2488 || double_int_cmp (tmax, type_max, uns) > 0)
2490 set_value_range_to_varying (vr);
2491 return;
2493 min = double_int_to_tree (expr_type, tmin);
2494 max = double_int_to_tree (expr_type, tmax);
2497 else
2499 /* If overflow does not wrap, saturate to the types min/max
2500 value. */
2501 if (min_ovf == -1)
2503 if (needs_overflow_infinity (expr_type)
2504 && supports_overflow_infinity (expr_type))
2505 min = negative_overflow_infinity (expr_type);
2506 else
2507 min = double_int_to_tree (expr_type, type_min);
2509 else if (min_ovf == 1)
2511 if (needs_overflow_infinity (expr_type)
2512 && supports_overflow_infinity (expr_type))
2513 min = positive_overflow_infinity (expr_type);
2514 else
2515 min = double_int_to_tree (expr_type, type_max);
2517 else
2518 min = double_int_to_tree (expr_type, dmin);
2520 if (max_ovf == -1)
2522 if (needs_overflow_infinity (expr_type)
2523 && supports_overflow_infinity (expr_type))
2524 max = negative_overflow_infinity (expr_type);
2525 else
2526 max = double_int_to_tree (expr_type, type_min);
2528 else if (max_ovf == 1)
2530 if (needs_overflow_infinity (expr_type)
2531 && supports_overflow_infinity (expr_type))
2532 max = positive_overflow_infinity (expr_type);
2533 else
2534 max = double_int_to_tree (expr_type, type_max);
2536 else
2537 max = double_int_to_tree (expr_type, dmax);
2539 if (needs_overflow_infinity (expr_type)
2540 && supports_overflow_infinity (expr_type))
2542 if (is_negative_overflow_infinity (vr0.min)
2543 || (code == PLUS_EXPR
2544 ? is_negative_overflow_infinity (vr1.min)
2545 : is_positive_overflow_infinity (vr1.max)))
2546 min = negative_overflow_infinity (expr_type);
2547 if (is_positive_overflow_infinity (vr0.max)
2548 || (code == PLUS_EXPR
2549 ? is_positive_overflow_infinity (vr1.max)
2550 : is_negative_overflow_infinity (vr1.min)))
2551 max = positive_overflow_infinity (expr_type);
2554 else
2556 /* For other cases, for example if we have a PLUS_EXPR with two
2557 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2558 to compute a precise range for such a case.
2559 ??? General even mixed range kind operations can be expressed
2560 by for example transforming ~[3, 5] + [1, 2] to range-only
2561 operations and a union primitive:
2562 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2563 [-INF+1, 4] U [6, +INF(OVF)]
2564 though usually the union is not exactly representable with
2565 a single range or anti-range as the above is
2566 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2567 but one could use a scheme similar to equivalences for this. */
2568 set_value_range_to_varying (vr);
2569 return;
2572 else if (code == MIN_EXPR
2573 || code == MAX_EXPR)
2575 if (vr0.type == VR_ANTI_RANGE)
2577 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2578 the resulting VR_ANTI_RANGE is the same - intersection
2579 of the two ranges. */
2580 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2581 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2583 else
2585 /* For operations that make the resulting range directly
2586 proportional to the original ranges, apply the operation to
2587 the same end of each range. */
2588 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2589 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2592 else if (code == MULT_EXPR)
2594 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2595 drop to varying. */
2596 if (range_int_cst_p (&vr0)
2597 && range_int_cst_p (&vr1)
2598 && TYPE_OVERFLOW_WRAPS (expr_type))
2600 double_int min0, max0, min1, max1, sizem1, size;
2601 double_int prod0l, prod0h, prod1l, prod1h,
2602 prod2l, prod2h, prod3l, prod3h;
2603 bool uns0, uns1, uns;
2605 sizem1 = double_int_max_value (TYPE_PRECISION (expr_type), true);
2606 size = double_int_add (sizem1, double_int_one);
2608 min0 = tree_to_double_int (vr0.min);
2609 max0 = tree_to_double_int (vr0.max);
2610 min1 = tree_to_double_int (vr1.min);
2611 max1 = tree_to_double_int (vr1.max);
2613 uns0 = TYPE_UNSIGNED (expr_type);
2614 uns1 = uns0;
2616 /* Canonicalize the intervals. */
2617 if (TYPE_UNSIGNED (expr_type))
2619 double_int min2 = double_int_sub (size, min0);
2620 if (double_int_cmp (min2, max0, true) < 0)
2622 min0 = double_int_neg (min2);
2623 max0 = double_int_sub (max0, size);
2624 uns0 = false;
2627 min2 = double_int_sub (size, min1);
2628 if (double_int_cmp (min2, max1, true) < 0)
2630 min1 = double_int_neg (min2);
2631 max1 = double_int_sub (max1, size);
2632 uns1 = false;
2635 uns = uns0 & uns1;
2637 mul_double_wide_with_sign (min0.low, min0.high,
2638 min1.low, min1.high,
2639 &prod0l.low, &prod0l.high,
2640 &prod0h.low, &prod0h.high, true);
2641 if (!uns0 && double_int_negative_p (min0))
2642 prod0h = double_int_sub (prod0h, min1);
2643 if (!uns1 && double_int_negative_p (min1))
2644 prod0h = double_int_sub (prod0h, min0);
2646 mul_double_wide_with_sign (min0.low, min0.high,
2647 max1.low, max1.high,
2648 &prod1l.low, &prod1l.high,
2649 &prod1h.low, &prod1h.high, true);
2650 if (!uns0 && double_int_negative_p (min0))
2651 prod1h = double_int_sub (prod1h, max1);
2652 if (!uns1 && double_int_negative_p (max1))
2653 prod1h = double_int_sub (prod1h, min0);
2655 mul_double_wide_with_sign (max0.low, max0.high,
2656 min1.low, min1.high,
2657 &prod2l.low, &prod2l.high,
2658 &prod2h.low, &prod2h.high, true);
2659 if (!uns0 && double_int_negative_p (max0))
2660 prod2h = double_int_sub (prod2h, min1);
2661 if (!uns1 && double_int_negative_p (min1))
2662 prod2h = double_int_sub (prod2h, max0);
2664 mul_double_wide_with_sign (max0.low, max0.high,
2665 max1.low, max1.high,
2666 &prod3l.low, &prod3l.high,
2667 &prod3h.low, &prod3h.high, true);
2668 if (!uns0 && double_int_negative_p (max0))
2669 prod3h = double_int_sub (prod3h, max1);
2670 if (!uns1 && double_int_negative_p (max1))
2671 prod3h = double_int_sub (prod3h, max0);
2673 /* Sort the 4 products. */
2674 quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2675 quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2676 quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2677 quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2679 /* Max - min. */
2680 if (double_int_zero_p (prod0l))
2682 prod1l = double_int_zero;
2683 prod1h = double_int_neg (prod0h);
2685 else
2687 prod1l = double_int_neg (prod0l);
2688 prod1h = double_int_not (prod0h);
2690 prod2l = double_int_add (prod3l, prod1l);
2691 prod2h = double_int_add (prod3h, prod1h);
2692 if (double_int_ucmp (prod2l, prod3l) < 0)
2693 prod2h = double_int_add (prod2h, double_int_one); /* carry */
2695 if (!double_int_zero_p (prod2h)
2696 || double_int_cmp (prod2l, sizem1, true) >= 0)
2698 /* the range covers all values. */
2699 set_value_range_to_varying (vr);
2700 return;
2703 /* The following should handle the wrapping and selecting
2704 VR_ANTI_RANGE for us. */
2705 min = double_int_to_tree (expr_type, prod0l);
2706 max = double_int_to_tree (expr_type, prod3l);
2707 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2708 return;
2711 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2712 drop to VR_VARYING. It would take more effort to compute a
2713 precise range for such a case. For example, if we have
2714 op0 == 65536 and op1 == 65536 with their ranges both being
2715 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2716 we cannot claim that the product is in ~[0,0]. Note that we
2717 are guaranteed to have vr0.type == vr1.type at this
2718 point. */
2719 if (vr0.type == VR_ANTI_RANGE
2720 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2722 set_value_range_to_varying (vr);
2723 return;
2726 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2727 return;
2729 else if (code == RSHIFT_EXPR
2730 || code == LSHIFT_EXPR)
2732 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2733 then drop to VR_VARYING. Outside of this range we get undefined
2734 behavior from the shift operation. We cannot even trust
2735 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2736 shifts, and the operation at the tree level may be widened. */
2737 if (range_int_cst_p (&vr1)
2738 && compare_tree_int (vr1.min, 0) >= 0
2739 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2741 if (code == RSHIFT_EXPR)
2743 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2744 return;
2746 /* We can map lshifts by constants to MULT_EXPR handling. */
2747 else if (code == LSHIFT_EXPR
2748 && range_int_cst_singleton_p (&vr1))
2750 bool saved_flag_wrapv;
2751 value_range_t vr1p = VR_INITIALIZER;
2752 vr1p.type = VR_RANGE;
2753 vr1p.min
2754 = double_int_to_tree (expr_type,
2755 double_int_lshift
2756 (double_int_one,
2757 TREE_INT_CST_LOW (vr1.min),
2758 TYPE_PRECISION (expr_type),
2759 false));
2760 vr1p.max = vr1p.min;
2761 /* We have to use a wrapping multiply though as signed overflow
2762 on lshifts is implementation defined in C89. */
2763 saved_flag_wrapv = flag_wrapv;
2764 flag_wrapv = 1;
2765 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2766 &vr0, &vr1p);
2767 flag_wrapv = saved_flag_wrapv;
2768 return;
2771 set_value_range_to_varying (vr);
2772 return;
2774 else if (code == TRUNC_DIV_EXPR
2775 || code == FLOOR_DIV_EXPR
2776 || code == CEIL_DIV_EXPR
2777 || code == EXACT_DIV_EXPR
2778 || code == ROUND_DIV_EXPR)
2780 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2782 /* For division, if op1 has VR_RANGE but op0 does not, something
2783 can be deduced just from that range. Say [min, max] / [4, max]
2784 gives [min / 4, max / 4] range. */
2785 if (vr1.type == VR_RANGE
2786 && !symbolic_range_p (&vr1)
2787 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2789 vr0.type = type = VR_RANGE;
2790 vr0.min = vrp_val_min (expr_type);
2791 vr0.max = vrp_val_max (expr_type);
2793 else
2795 set_value_range_to_varying (vr);
2796 return;
2800 /* For divisions, if flag_non_call_exceptions is true, we must
2801 not eliminate a division by zero. */
2802 if (cfun->can_throw_non_call_exceptions
2803 && (vr1.type != VR_RANGE
2804 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2806 set_value_range_to_varying (vr);
2807 return;
2810 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2811 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2812 include 0. */
2813 if (vr0.type == VR_RANGE
2814 && (vr1.type != VR_RANGE
2815 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2817 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2818 int cmp;
2820 min = NULL_TREE;
2821 max = NULL_TREE;
2822 if (TYPE_UNSIGNED (expr_type)
2823 || value_range_nonnegative_p (&vr1))
2825 /* For unsigned division or when divisor is known
2826 to be non-negative, the range has to cover
2827 all numbers from 0 to max for positive max
2828 and all numbers from min to 0 for negative min. */
2829 cmp = compare_values (vr0.max, zero);
2830 if (cmp == -1)
2831 max = zero;
2832 else if (cmp == 0 || cmp == 1)
2833 max = vr0.max;
2834 else
2835 type = VR_VARYING;
2836 cmp = compare_values (vr0.min, zero);
2837 if (cmp == 1)
2838 min = zero;
2839 else if (cmp == 0 || cmp == -1)
2840 min = vr0.min;
2841 else
2842 type = VR_VARYING;
2844 else
2846 /* Otherwise the range is -max .. max or min .. -min
2847 depending on which bound is bigger in absolute value,
2848 as the division can change the sign. */
2849 abs_extent_range (vr, vr0.min, vr0.max);
2850 return;
2852 if (type == VR_VARYING)
2854 set_value_range_to_varying (vr);
2855 return;
2858 else
2860 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2861 return;
2864 else if (code == TRUNC_MOD_EXPR)
2866 if (vr1.type != VR_RANGE
2867 || range_includes_zero_p (vr1.min, vr1.max) != 0
2868 || vrp_val_is_min (vr1.min))
2870 set_value_range_to_varying (vr);
2871 return;
2873 type = VR_RANGE;
2874 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2875 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2876 if (tree_int_cst_lt (max, vr1.max))
2877 max = vr1.max;
2878 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2879 /* If the dividend is non-negative the modulus will be
2880 non-negative as well. */
2881 if (TYPE_UNSIGNED (expr_type)
2882 || value_range_nonnegative_p (&vr0))
2883 min = build_int_cst (TREE_TYPE (max), 0);
2884 else
2885 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2887 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2889 bool int_cst_range0, int_cst_range1;
2890 double_int may_be_nonzero0, may_be_nonzero1;
2891 double_int must_be_nonzero0, must_be_nonzero1;
2893 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2894 &must_be_nonzero0);
2895 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2896 &must_be_nonzero1);
2898 type = VR_RANGE;
2899 if (code == BIT_AND_EXPR)
2901 double_int dmax;
2902 min = double_int_to_tree (expr_type,
2903 double_int_and (must_be_nonzero0,
2904 must_be_nonzero1));
2905 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
2906 /* If both input ranges contain only negative values we can
2907 truncate the result range maximum to the minimum of the
2908 input range maxima. */
2909 if (int_cst_range0 && int_cst_range1
2910 && tree_int_cst_sgn (vr0.max) < 0
2911 && tree_int_cst_sgn (vr1.max) < 0)
2913 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2914 TYPE_UNSIGNED (expr_type));
2915 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2916 TYPE_UNSIGNED (expr_type));
2918 /* If either input range contains only non-negative values
2919 we can truncate the result range maximum to the respective
2920 maximum of the input range. */
2921 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2922 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2923 TYPE_UNSIGNED (expr_type));
2924 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2925 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2926 TYPE_UNSIGNED (expr_type));
2927 max = double_int_to_tree (expr_type, dmax);
2929 else if (code == BIT_IOR_EXPR)
2931 double_int dmin;
2932 max = double_int_to_tree (expr_type,
2933 double_int_ior (may_be_nonzero0,
2934 may_be_nonzero1));
2935 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
2936 /* If the input ranges contain only positive values we can
2937 truncate the minimum of the result range to the maximum
2938 of the input range minima. */
2939 if (int_cst_range0 && int_cst_range1
2940 && tree_int_cst_sgn (vr0.min) >= 0
2941 && tree_int_cst_sgn (vr1.min) >= 0)
2943 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2944 TYPE_UNSIGNED (expr_type));
2945 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2946 TYPE_UNSIGNED (expr_type));
2948 /* If either input range contains only negative values
2949 we can truncate the minimum of the result range to the
2950 respective minimum range. */
2951 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2952 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2953 TYPE_UNSIGNED (expr_type));
2954 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2955 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2956 TYPE_UNSIGNED (expr_type));
2957 min = double_int_to_tree (expr_type, dmin);
2959 else if (code == BIT_XOR_EXPR)
2961 double_int result_zero_bits, result_one_bits;
2962 result_zero_bits
2963 = double_int_ior (double_int_and (must_be_nonzero0,
2964 must_be_nonzero1),
2965 double_int_not
2966 (double_int_ior (may_be_nonzero0,
2967 may_be_nonzero1)));
2968 result_one_bits
2969 = double_int_ior (double_int_and
2970 (must_be_nonzero0,
2971 double_int_not (may_be_nonzero1)),
2972 double_int_and
2973 (must_be_nonzero1,
2974 double_int_not (may_be_nonzero0)));
2975 max = double_int_to_tree (expr_type,
2976 double_int_not (result_zero_bits));
2977 min = double_int_to_tree (expr_type, result_one_bits);
2978 /* If the range has all positive or all negative values the
2979 result is better than VARYING. */
2980 if (tree_int_cst_sgn (min) < 0
2981 || tree_int_cst_sgn (max) >= 0)
2983 else
2984 max = min = NULL_TREE;
2987 else
2988 gcc_unreachable ();
2990 /* If either MIN or MAX overflowed, then set the resulting range to
2991 VARYING. But we do accept an overflow infinity
2992 representation. */
2993 if (min == NULL_TREE
2994 || !is_gimple_min_invariant (min)
2995 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2996 || max == NULL_TREE
2997 || !is_gimple_min_invariant (max)
2998 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3000 set_value_range_to_varying (vr);
3001 return;
3004 /* We punt if:
3005 1) [-INF, +INF]
3006 2) [-INF, +-INF(OVF)]
3007 3) [+-INF(OVF), +INF]
3008 4) [+-INF(OVF), +-INF(OVF)]
3009 We learn nothing when we have INF and INF(OVF) on both sides.
3010 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3011 overflow. */
3012 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3013 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3015 set_value_range_to_varying (vr);
3016 return;
3019 cmp = compare_values (min, max);
3020 if (cmp == -2 || cmp == 1)
3022 /* If the new range has its limits swapped around (MIN > MAX),
3023 then the operation caused one of them to wrap around, mark
3024 the new range VARYING. */
3025 set_value_range_to_varying (vr);
3027 else
3028 set_value_range (vr, type, min, max, NULL);
3031 /* Extract range information from a binary expression OP0 CODE OP1 based on
3032 the ranges of each of its operands with resulting type EXPR_TYPE.
3033 The resulting range is stored in *VR. */
3035 static void
3036 extract_range_from_binary_expr (value_range_t *vr,
3037 enum tree_code code,
3038 tree expr_type, tree op0, tree op1)
3040 value_range_t vr0 = VR_INITIALIZER;
3041 value_range_t vr1 = VR_INITIALIZER;
3043 /* Get value ranges for each operand. For constant operands, create
3044 a new value range with the operand to simplify processing. */
3045 if (TREE_CODE (op0) == SSA_NAME)
3046 vr0 = *(get_value_range (op0));
3047 else if (is_gimple_min_invariant (op0))
3048 set_value_range_to_value (&vr0, op0, NULL);
3049 else
3050 set_value_range_to_varying (&vr0);
3052 if (TREE_CODE (op1) == SSA_NAME)
3053 vr1 = *(get_value_range (op1));
3054 else if (is_gimple_min_invariant (op1))
3055 set_value_range_to_value (&vr1, op1, NULL);
3056 else
3057 set_value_range_to_varying (&vr1);
3059 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3062 /* Extract range information from a unary operation CODE based on
3063 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3064 The The resulting range is stored in *VR. */
3066 static void
3067 extract_range_from_unary_expr_1 (value_range_t *vr,
3068 enum tree_code code, tree type,
3069 value_range_t *vr0_, tree op0_type)
3071 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3073 /* VRP only operates on integral and pointer types. */
3074 if (!(INTEGRAL_TYPE_P (op0_type)
3075 || POINTER_TYPE_P (op0_type))
3076 || !(INTEGRAL_TYPE_P (type)
3077 || POINTER_TYPE_P (type)))
3079 set_value_range_to_varying (vr);
3080 return;
3083 /* If VR0 is UNDEFINED, so is the result. */
3084 if (vr0.type == VR_UNDEFINED)
3086 set_value_range_to_undefined (vr);
3087 return;
3090 /* Handle operations that we express in terms of others. */
3091 if (code == PAREN_EXPR)
3093 /* PAREN_EXPR is a simple copy. */
3094 copy_value_range (vr, &vr0);
3095 return;
3097 else if (code == NEGATE_EXPR)
3099 /* -X is simply 0 - X, so re-use existing code that also handles
3100 anti-ranges fine. */
3101 value_range_t zero = VR_INITIALIZER;
3102 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3103 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3104 return;
3106 else if (code == BIT_NOT_EXPR)
3108 /* ~X is simply -1 - X, so re-use existing code that also handles
3109 anti-ranges fine. */
3110 value_range_t minusone = VR_INITIALIZER;
3111 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3112 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3113 type, &minusone, &vr0);
3114 return;
3117 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3118 and express op ~[] as (op []') U (op []''). */
3119 if (vr0.type == VR_ANTI_RANGE
3120 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3122 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3123 if (vrtem1.type != VR_UNDEFINED)
3125 value_range_t vrres = VR_INITIALIZER;
3126 extract_range_from_unary_expr_1 (&vrres, code, type,
3127 &vrtem1, op0_type);
3128 vrp_meet (vr, &vrres);
3130 return;
3133 if (CONVERT_EXPR_CODE_P (code))
3135 tree inner_type = op0_type;
3136 tree outer_type = type;
3138 /* If the expression evaluates to a pointer, we are only interested in
3139 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3140 if (POINTER_TYPE_P (type))
3142 if (range_is_nonnull (&vr0))
3143 set_value_range_to_nonnull (vr, type);
3144 else if (range_is_null (&vr0))
3145 set_value_range_to_null (vr, type);
3146 else
3147 set_value_range_to_varying (vr);
3148 return;
3151 /* If VR0 is varying and we increase the type precision, assume
3152 a full range for the following transformation. */
3153 if (vr0.type == VR_VARYING
3154 && INTEGRAL_TYPE_P (inner_type)
3155 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3157 vr0.type = VR_RANGE;
3158 vr0.min = TYPE_MIN_VALUE (inner_type);
3159 vr0.max = TYPE_MAX_VALUE (inner_type);
3162 /* If VR0 is a constant range or anti-range and the conversion is
3163 not truncating we can convert the min and max values and
3164 canonicalize the resulting range. Otherwise we can do the
3165 conversion if the size of the range is less than what the
3166 precision of the target type can represent and the range is
3167 not an anti-range. */
3168 if ((vr0.type == VR_RANGE
3169 || vr0.type == VR_ANTI_RANGE)
3170 && TREE_CODE (vr0.min) == INTEGER_CST
3171 && TREE_CODE (vr0.max) == INTEGER_CST
3172 && (!is_overflow_infinity (vr0.min)
3173 || (vr0.type == VR_RANGE
3174 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3175 && needs_overflow_infinity (outer_type)
3176 && supports_overflow_infinity (outer_type)))
3177 && (!is_overflow_infinity (vr0.max)
3178 || (vr0.type == VR_RANGE
3179 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3180 && needs_overflow_infinity (outer_type)
3181 && supports_overflow_infinity (outer_type)))
3182 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3183 || (vr0.type == VR_RANGE
3184 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3185 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3186 size_int (TYPE_PRECISION (outer_type)))))))
3188 tree new_min, new_max;
3189 if (is_overflow_infinity (vr0.min))
3190 new_min = negative_overflow_infinity (outer_type);
3191 else
3192 new_min = force_fit_type_double (outer_type,
3193 tree_to_double_int (vr0.min),
3194 0, false);
3195 if (is_overflow_infinity (vr0.max))
3196 new_max = positive_overflow_infinity (outer_type);
3197 else
3198 new_max = force_fit_type_double (outer_type,
3199 tree_to_double_int (vr0.max),
3200 0, false);
3201 set_and_canonicalize_value_range (vr, vr0.type,
3202 new_min, new_max, NULL);
3203 return;
3206 set_value_range_to_varying (vr);
3207 return;
3209 else if (code == ABS_EXPR)
3211 tree min, max;
3212 int cmp;
3214 /* Pass through vr0 in the easy cases. */
3215 if (TYPE_UNSIGNED (type)
3216 || value_range_nonnegative_p (&vr0))
3218 copy_value_range (vr, &vr0);
3219 return;
3222 /* For the remaining varying or symbolic ranges we can't do anything
3223 useful. */
3224 if (vr0.type == VR_VARYING
3225 || symbolic_range_p (&vr0))
3227 set_value_range_to_varying (vr);
3228 return;
3231 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3232 useful range. */
3233 if (!TYPE_OVERFLOW_UNDEFINED (type)
3234 && ((vr0.type == VR_RANGE
3235 && vrp_val_is_min (vr0.min))
3236 || (vr0.type == VR_ANTI_RANGE
3237 && !vrp_val_is_min (vr0.min))))
3239 set_value_range_to_varying (vr);
3240 return;
3243 /* ABS_EXPR may flip the range around, if the original range
3244 included negative values. */
3245 if (is_overflow_infinity (vr0.min))
3246 min = positive_overflow_infinity (type);
3247 else if (!vrp_val_is_min (vr0.min))
3248 min = fold_unary_to_constant (code, type, vr0.min);
3249 else if (!needs_overflow_infinity (type))
3250 min = TYPE_MAX_VALUE (type);
3251 else if (supports_overflow_infinity (type))
3252 min = positive_overflow_infinity (type);
3253 else
3255 set_value_range_to_varying (vr);
3256 return;
3259 if (is_overflow_infinity (vr0.max))
3260 max = positive_overflow_infinity (type);
3261 else if (!vrp_val_is_min (vr0.max))
3262 max = fold_unary_to_constant (code, type, vr0.max);
3263 else if (!needs_overflow_infinity (type))
3264 max = TYPE_MAX_VALUE (type);
3265 else if (supports_overflow_infinity (type)
3266 /* We shouldn't generate [+INF, +INF] as set_value_range
3267 doesn't like this and ICEs. */
3268 && !is_positive_overflow_infinity (min))
3269 max = positive_overflow_infinity (type);
3270 else
3272 set_value_range_to_varying (vr);
3273 return;
3276 cmp = compare_values (min, max);
3278 /* If a VR_ANTI_RANGEs contains zero, then we have
3279 ~[-INF, min(MIN, MAX)]. */
3280 if (vr0.type == VR_ANTI_RANGE)
3282 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3284 /* Take the lower of the two values. */
3285 if (cmp != 1)
3286 max = min;
3288 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3289 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3290 flag_wrapv is set and the original anti-range doesn't include
3291 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3292 if (TYPE_OVERFLOW_WRAPS (type))
3294 tree type_min_value = TYPE_MIN_VALUE (type);
3296 min = (vr0.min != type_min_value
3297 ? int_const_binop (PLUS_EXPR, type_min_value,
3298 integer_one_node)
3299 : type_min_value);
3301 else
3303 if (overflow_infinity_range_p (&vr0))
3304 min = negative_overflow_infinity (type);
3305 else
3306 min = TYPE_MIN_VALUE (type);
3309 else
3311 /* All else has failed, so create the range [0, INF], even for
3312 flag_wrapv since TYPE_MIN_VALUE is in the original
3313 anti-range. */
3314 vr0.type = VR_RANGE;
3315 min = build_int_cst (type, 0);
3316 if (needs_overflow_infinity (type))
3318 if (supports_overflow_infinity (type))
3319 max = positive_overflow_infinity (type);
3320 else
3322 set_value_range_to_varying (vr);
3323 return;
3326 else
3327 max = TYPE_MAX_VALUE (type);
3331 /* If the range contains zero then we know that the minimum value in the
3332 range will be zero. */
3333 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3335 if (cmp == 1)
3336 max = min;
3337 min = build_int_cst (type, 0);
3339 else
3341 /* If the range was reversed, swap MIN and MAX. */
3342 if (cmp == 1)
3344 tree t = min;
3345 min = max;
3346 max = t;
3350 cmp = compare_values (min, max);
3351 if (cmp == -2 || cmp == 1)
3353 /* If the new range has its limits swapped around (MIN > MAX),
3354 then the operation caused one of them to wrap around, mark
3355 the new range VARYING. */
3356 set_value_range_to_varying (vr);
3358 else
3359 set_value_range (vr, vr0.type, min, max, NULL);
3360 return;
3363 /* For unhandled operations fall back to varying. */
3364 set_value_range_to_varying (vr);
3365 return;
3369 /* Extract range information from a unary expression CODE OP0 based on
3370 the range of its operand with resulting type TYPE.
3371 The resulting range is stored in *VR. */
3373 static void
3374 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3375 tree type, tree op0)
3377 value_range_t vr0 = VR_INITIALIZER;
3379 /* Get value ranges for the operand. For constant operands, create
3380 a new value range with the operand to simplify processing. */
3381 if (TREE_CODE (op0) == SSA_NAME)
3382 vr0 = *(get_value_range (op0));
3383 else if (is_gimple_min_invariant (op0))
3384 set_value_range_to_value (&vr0, op0, NULL);
3385 else
3386 set_value_range_to_varying (&vr0);
3388 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3392 /* Extract range information from a conditional expression STMT based on
3393 the ranges of each of its operands and the expression code. */
3395 static void
3396 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3398 tree op0, op1;
3399 value_range_t vr0 = VR_INITIALIZER;
3400 value_range_t vr1 = VR_INITIALIZER;
3402 /* Get value ranges for each operand. For constant operands, create
3403 a new value range with the operand to simplify processing. */
3404 op0 = gimple_assign_rhs2 (stmt);
3405 if (TREE_CODE (op0) == SSA_NAME)
3406 vr0 = *(get_value_range (op0));
3407 else if (is_gimple_min_invariant (op0))
3408 set_value_range_to_value (&vr0, op0, NULL);
3409 else
3410 set_value_range_to_varying (&vr0);
3412 op1 = gimple_assign_rhs3 (stmt);
3413 if (TREE_CODE (op1) == SSA_NAME)
3414 vr1 = *(get_value_range (op1));
3415 else if (is_gimple_min_invariant (op1))
3416 set_value_range_to_value (&vr1, op1, NULL);
3417 else
3418 set_value_range_to_varying (&vr1);
3420 /* The resulting value range is the union of the operand ranges */
3421 copy_value_range (vr, &vr0);
3422 vrp_meet (vr, &vr1);
3426 /* Extract range information from a comparison expression EXPR based
3427 on the range of its operand and the expression code. */
3429 static void
3430 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3431 tree type, tree op0, tree op1)
3433 bool sop = false;
3434 tree val;
3436 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3437 NULL);
3439 /* A disadvantage of using a special infinity as an overflow
3440 representation is that we lose the ability to record overflow
3441 when we don't have an infinity. So we have to ignore a result
3442 which relies on overflow. */
3444 if (val && !is_overflow_infinity (val) && !sop)
3446 /* Since this expression was found on the RHS of an assignment,
3447 its type may be different from _Bool. Convert VAL to EXPR's
3448 type. */
3449 val = fold_convert (type, val);
3450 if (is_gimple_min_invariant (val))
3451 set_value_range_to_value (vr, val, vr->equiv);
3452 else
3453 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3455 else
3456 /* The result of a comparison is always true or false. */
3457 set_value_range_to_truthvalue (vr, type);
3460 /* Try to derive a nonnegative or nonzero range out of STMT relying
3461 primarily on generic routines in fold in conjunction with range data.
3462 Store the result in *VR */
3464 static void
3465 extract_range_basic (value_range_t *vr, gimple stmt)
3467 bool sop = false;
3468 tree type = gimple_expr_type (stmt);
3470 if (INTEGRAL_TYPE_P (type)
3471 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3472 set_value_range_to_nonnegative (vr, type,
3473 sop || stmt_overflow_infinity (stmt));
3474 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3475 && !sop)
3476 set_value_range_to_nonnull (vr, type);
3477 else
3478 set_value_range_to_varying (vr);
3482 /* Try to compute a useful range out of assignment STMT and store it
3483 in *VR. */
3485 static void
3486 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3488 enum tree_code code = gimple_assign_rhs_code (stmt);
3490 if (code == ASSERT_EXPR)
3491 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3492 else if (code == SSA_NAME)
3493 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3494 else if (TREE_CODE_CLASS (code) == tcc_binary)
3495 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3496 gimple_expr_type (stmt),
3497 gimple_assign_rhs1 (stmt),
3498 gimple_assign_rhs2 (stmt));
3499 else if (TREE_CODE_CLASS (code) == tcc_unary)
3500 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3501 gimple_expr_type (stmt),
3502 gimple_assign_rhs1 (stmt));
3503 else if (code == COND_EXPR)
3504 extract_range_from_cond_expr (vr, stmt);
3505 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3506 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3507 gimple_expr_type (stmt),
3508 gimple_assign_rhs1 (stmt),
3509 gimple_assign_rhs2 (stmt));
3510 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3511 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3512 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3513 else
3514 set_value_range_to_varying (vr);
3516 if (vr->type == VR_VARYING)
3517 extract_range_basic (vr, stmt);
3520 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3521 would be profitable to adjust VR using scalar evolution information
3522 for VAR. If so, update VR with the new limits. */
3524 static void
3525 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3526 gimple stmt, tree var)
3528 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3529 enum ev_direction dir;
3531 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3532 better opportunities than a regular range, but I'm not sure. */
3533 if (vr->type == VR_ANTI_RANGE)
3534 return;
3536 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3538 /* Like in PR19590, scev can return a constant function. */
3539 if (is_gimple_min_invariant (chrec))
3541 set_value_range_to_value (vr, chrec, vr->equiv);
3542 return;
3545 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3546 return;
3548 init = initial_condition_in_loop_num (chrec, loop->num);
3549 tem = op_with_constant_singleton_value_range (init);
3550 if (tem)
3551 init = tem;
3552 step = evolution_part_in_loop_num (chrec, loop->num);
3553 tem = op_with_constant_singleton_value_range (step);
3554 if (tem)
3555 step = tem;
3557 /* If STEP is symbolic, we can't know whether INIT will be the
3558 minimum or maximum value in the range. Also, unless INIT is
3559 a simple expression, compare_values and possibly other functions
3560 in tree-vrp won't be able to handle it. */
3561 if (step == NULL_TREE
3562 || !is_gimple_min_invariant (step)
3563 || !valid_value_p (init))
3564 return;
3566 dir = scev_direction (chrec);
3567 if (/* Do not adjust ranges if we do not know whether the iv increases
3568 or decreases, ... */
3569 dir == EV_DIR_UNKNOWN
3570 /* ... or if it may wrap. */
3571 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3572 true))
3573 return;
3575 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3576 negative_overflow_infinity and positive_overflow_infinity,
3577 because we have concluded that the loop probably does not
3578 wrap. */
3580 type = TREE_TYPE (var);
3581 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3582 tmin = lower_bound_in_type (type, type);
3583 else
3584 tmin = TYPE_MIN_VALUE (type);
3585 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3586 tmax = upper_bound_in_type (type, type);
3587 else
3588 tmax = TYPE_MAX_VALUE (type);
3590 /* Try to use estimated number of iterations for the loop to constrain the
3591 final value in the evolution. */
3592 if (TREE_CODE (step) == INTEGER_CST
3593 && is_gimple_val (init)
3594 && (TREE_CODE (init) != SSA_NAME
3595 || get_value_range (init)->type == VR_RANGE))
3597 double_int nit;
3599 /* We are only entering here for loop header PHI nodes, so using
3600 the number of latch executions is the correct thing to use. */
3601 if (max_loop_iterations (loop, &nit))
3603 value_range_t maxvr = VR_INITIALIZER;
3604 double_int dtmp;
3605 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3606 int overflow = 0;
3608 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3609 unsigned_p, &overflow);
3610 /* If the multiplication overflowed we can't do a meaningful
3611 adjustment. Likewise if the result doesn't fit in the type
3612 of the induction variable. For a signed type we have to
3613 check whether the result has the expected signedness which
3614 is that of the step as number of iterations is unsigned. */
3615 if (!overflow
3616 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3617 && (unsigned_p
3618 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3620 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3621 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3622 TREE_TYPE (init), init, tem);
3623 /* Likewise if the addition did. */
3624 if (maxvr.type == VR_RANGE)
3626 tmin = maxvr.min;
3627 tmax = maxvr.max;
3633 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3635 min = tmin;
3636 max = tmax;
3638 /* For VARYING or UNDEFINED ranges, just about anything we get
3639 from scalar evolutions should be better. */
3641 if (dir == EV_DIR_DECREASES)
3642 max = init;
3643 else
3644 min = init;
3646 /* If we would create an invalid range, then just assume we
3647 know absolutely nothing. This may be over-conservative,
3648 but it's clearly safe, and should happen only in unreachable
3649 parts of code, or for invalid programs. */
3650 if (compare_values (min, max) == 1)
3651 return;
3653 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3655 else if (vr->type == VR_RANGE)
3657 min = vr->min;
3658 max = vr->max;
3660 if (dir == EV_DIR_DECREASES)
3662 /* INIT is the maximum value. If INIT is lower than VR->MAX
3663 but no smaller than VR->MIN, set VR->MAX to INIT. */
3664 if (compare_values (init, max) == -1)
3665 max = init;
3667 /* According to the loop information, the variable does not
3668 overflow. If we think it does, probably because of an
3669 overflow due to arithmetic on a different INF value,
3670 reset now. */
3671 if (is_negative_overflow_infinity (min)
3672 || compare_values (min, tmin) == -1)
3673 min = tmin;
3676 else
3678 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3679 if (compare_values (init, min) == 1)
3680 min = init;
3682 if (is_positive_overflow_infinity (max)
3683 || compare_values (tmax, max) == -1)
3684 max = tmax;
3687 /* If we just created an invalid range with the minimum
3688 greater than the maximum, we fail conservatively.
3689 This should happen only in unreachable
3690 parts of code, or for invalid programs. */
3691 if (compare_values (min, max) == 1)
3692 return;
3694 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3698 /* Return true if VAR may overflow at STMT. This checks any available
3699 loop information to see if we can determine that VAR does not
3700 overflow. */
3702 static bool
3703 vrp_var_may_overflow (tree var, gimple stmt)
3705 struct loop *l;
3706 tree chrec, init, step;
3708 if (current_loops == NULL)
3709 return true;
3711 l = loop_containing_stmt (stmt);
3712 if (l == NULL
3713 || !loop_outer (l))
3714 return true;
3716 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3717 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3718 return true;
3720 init = initial_condition_in_loop_num (chrec, l->num);
3721 step = evolution_part_in_loop_num (chrec, l->num);
3723 if (step == NULL_TREE
3724 || !is_gimple_min_invariant (step)
3725 || !valid_value_p (init))
3726 return true;
3728 /* If we get here, we know something useful about VAR based on the
3729 loop information. If it wraps, it may overflow. */
3731 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3732 true))
3733 return true;
3735 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3737 print_generic_expr (dump_file, var, 0);
3738 fprintf (dump_file, ": loop information indicates does not overflow\n");
3741 return false;
3745 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3747 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3748 all the values in the ranges.
3750 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3752 - Return NULL_TREE if it is not always possible to determine the
3753 value of the comparison.
3755 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3756 overflow infinity was used in the test. */
3759 static tree
3760 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3761 bool *strict_overflow_p)
3763 /* VARYING or UNDEFINED ranges cannot be compared. */
3764 if (vr0->type == VR_VARYING
3765 || vr0->type == VR_UNDEFINED
3766 || vr1->type == VR_VARYING
3767 || vr1->type == VR_UNDEFINED)
3768 return NULL_TREE;
3770 /* Anti-ranges need to be handled separately. */
3771 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3773 /* If both are anti-ranges, then we cannot compute any
3774 comparison. */
3775 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3776 return NULL_TREE;
3778 /* These comparisons are never statically computable. */
3779 if (comp == GT_EXPR
3780 || comp == GE_EXPR
3781 || comp == LT_EXPR
3782 || comp == LE_EXPR)
3783 return NULL_TREE;
3785 /* Equality can be computed only between a range and an
3786 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3787 if (vr0->type == VR_RANGE)
3789 /* To simplify processing, make VR0 the anti-range. */
3790 value_range_t *tmp = vr0;
3791 vr0 = vr1;
3792 vr1 = tmp;
3795 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3797 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3798 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3799 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3801 return NULL_TREE;
3804 if (!usable_range_p (vr0, strict_overflow_p)
3805 || !usable_range_p (vr1, strict_overflow_p))
3806 return NULL_TREE;
3808 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3809 operands around and change the comparison code. */
3810 if (comp == GT_EXPR || comp == GE_EXPR)
3812 value_range_t *tmp;
3813 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3814 tmp = vr0;
3815 vr0 = vr1;
3816 vr1 = tmp;
3819 if (comp == EQ_EXPR)
3821 /* Equality may only be computed if both ranges represent
3822 exactly one value. */
3823 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3824 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3826 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3827 strict_overflow_p);
3828 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3829 strict_overflow_p);
3830 if (cmp_min == 0 && cmp_max == 0)
3831 return boolean_true_node;
3832 else if (cmp_min != -2 && cmp_max != -2)
3833 return boolean_false_node;
3835 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3836 else if (compare_values_warnv (vr0->min, vr1->max,
3837 strict_overflow_p) == 1
3838 || compare_values_warnv (vr1->min, vr0->max,
3839 strict_overflow_p) == 1)
3840 return boolean_false_node;
3842 return NULL_TREE;
3844 else if (comp == NE_EXPR)
3846 int cmp1, cmp2;
3848 /* If VR0 is completely to the left or completely to the right
3849 of VR1, they are always different. Notice that we need to
3850 make sure that both comparisons yield similar results to
3851 avoid comparing values that cannot be compared at
3852 compile-time. */
3853 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3854 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3855 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3856 return boolean_true_node;
3858 /* If VR0 and VR1 represent a single value and are identical,
3859 return false. */
3860 else if (compare_values_warnv (vr0->min, vr0->max,
3861 strict_overflow_p) == 0
3862 && compare_values_warnv (vr1->min, vr1->max,
3863 strict_overflow_p) == 0
3864 && compare_values_warnv (vr0->min, vr1->min,
3865 strict_overflow_p) == 0
3866 && compare_values_warnv (vr0->max, vr1->max,
3867 strict_overflow_p) == 0)
3868 return boolean_false_node;
3870 /* Otherwise, they may or may not be different. */
3871 else
3872 return NULL_TREE;
3874 else if (comp == LT_EXPR || comp == LE_EXPR)
3876 int tst;
3878 /* If VR0 is to the left of VR1, return true. */
3879 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3880 if ((comp == LT_EXPR && tst == -1)
3881 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3883 if (overflow_infinity_range_p (vr0)
3884 || overflow_infinity_range_p (vr1))
3885 *strict_overflow_p = true;
3886 return boolean_true_node;
3889 /* If VR0 is to the right of VR1, return false. */
3890 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3891 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3892 || (comp == LE_EXPR && tst == 1))
3894 if (overflow_infinity_range_p (vr0)
3895 || overflow_infinity_range_p (vr1))
3896 *strict_overflow_p = true;
3897 return boolean_false_node;
3900 /* Otherwise, we don't know. */
3901 return NULL_TREE;
3904 gcc_unreachable ();
3908 /* Given a value range VR, a value VAL and a comparison code COMP, return
3909 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3910 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3911 always returns false. Return NULL_TREE if it is not always
3912 possible to determine the value of the comparison. Also set
3913 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3914 infinity was used in the test. */
3916 static tree
3917 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3918 bool *strict_overflow_p)
3920 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3921 return NULL_TREE;
3923 /* Anti-ranges need to be handled separately. */
3924 if (vr->type == VR_ANTI_RANGE)
3926 /* For anti-ranges, the only predicates that we can compute at
3927 compile time are equality and inequality. */
3928 if (comp == GT_EXPR
3929 || comp == GE_EXPR
3930 || comp == LT_EXPR
3931 || comp == LE_EXPR)
3932 return NULL_TREE;
3934 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3935 if (value_inside_range (val, vr->min, vr->max) == 1)
3936 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3938 return NULL_TREE;
3941 if (!usable_range_p (vr, strict_overflow_p))
3942 return NULL_TREE;
3944 if (comp == EQ_EXPR)
3946 /* EQ_EXPR may only be computed if VR represents exactly
3947 one value. */
3948 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3950 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3951 if (cmp == 0)
3952 return boolean_true_node;
3953 else if (cmp == -1 || cmp == 1 || cmp == 2)
3954 return boolean_false_node;
3956 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3957 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3958 return boolean_false_node;
3960 return NULL_TREE;
3962 else if (comp == NE_EXPR)
3964 /* If VAL is not inside VR, then they are always different. */
3965 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3966 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3967 return boolean_true_node;
3969 /* If VR represents exactly one value equal to VAL, then return
3970 false. */
3971 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3972 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3973 return boolean_false_node;
3975 /* Otherwise, they may or may not be different. */
3976 return NULL_TREE;
3978 else if (comp == LT_EXPR || comp == LE_EXPR)
3980 int tst;
3982 /* If VR is to the left of VAL, return true. */
3983 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3984 if ((comp == LT_EXPR && tst == -1)
3985 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3987 if (overflow_infinity_range_p (vr))
3988 *strict_overflow_p = true;
3989 return boolean_true_node;
3992 /* If VR is to the right of VAL, return false. */
3993 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3994 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3995 || (comp == LE_EXPR && tst == 1))
3997 if (overflow_infinity_range_p (vr))
3998 *strict_overflow_p = true;
3999 return boolean_false_node;
4002 /* Otherwise, we don't know. */
4003 return NULL_TREE;
4005 else if (comp == GT_EXPR || comp == GE_EXPR)
4007 int tst;
4009 /* If VR is to the right of VAL, return true. */
4010 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4011 if ((comp == GT_EXPR && tst == 1)
4012 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4014 if (overflow_infinity_range_p (vr))
4015 *strict_overflow_p = true;
4016 return boolean_true_node;
4019 /* If VR is to the left of VAL, return false. */
4020 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4021 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4022 || (comp == GE_EXPR && tst == -1))
4024 if (overflow_infinity_range_p (vr))
4025 *strict_overflow_p = true;
4026 return boolean_false_node;
4029 /* Otherwise, we don't know. */
4030 return NULL_TREE;
4033 gcc_unreachable ();
4037 /* Debugging dumps. */
4039 void dump_value_range (FILE *, value_range_t *);
4040 void debug_value_range (value_range_t *);
4041 void dump_all_value_ranges (FILE *);
4042 void debug_all_value_ranges (void);
4043 void dump_vr_equiv (FILE *, bitmap);
4044 void debug_vr_equiv (bitmap);
4047 /* Dump value range VR to FILE. */
4049 void
4050 dump_value_range (FILE *file, value_range_t *vr)
4052 if (vr == NULL)
4053 fprintf (file, "[]");
4054 else if (vr->type == VR_UNDEFINED)
4055 fprintf (file, "UNDEFINED");
4056 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4058 tree type = TREE_TYPE (vr->min);
4060 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4062 if (is_negative_overflow_infinity (vr->min))
4063 fprintf (file, "-INF(OVF)");
4064 else if (INTEGRAL_TYPE_P (type)
4065 && !TYPE_UNSIGNED (type)
4066 && vrp_val_is_min (vr->min))
4067 fprintf (file, "-INF");
4068 else
4069 print_generic_expr (file, vr->min, 0);
4071 fprintf (file, ", ");
4073 if (is_positive_overflow_infinity (vr->max))
4074 fprintf (file, "+INF(OVF)");
4075 else if (INTEGRAL_TYPE_P (type)
4076 && vrp_val_is_max (vr->max))
4077 fprintf (file, "+INF");
4078 else
4079 print_generic_expr (file, vr->max, 0);
4081 fprintf (file, "]");
4083 if (vr->equiv)
4085 bitmap_iterator bi;
4086 unsigned i, c = 0;
4088 fprintf (file, " EQUIVALENCES: { ");
4090 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4092 print_generic_expr (file, ssa_name (i), 0);
4093 fprintf (file, " ");
4094 c++;
4097 fprintf (file, "} (%u elements)", c);
4100 else if (vr->type == VR_VARYING)
4101 fprintf (file, "VARYING");
4102 else
4103 fprintf (file, "INVALID RANGE");
4107 /* Dump value range VR to stderr. */
4109 DEBUG_FUNCTION void
4110 debug_value_range (value_range_t *vr)
4112 dump_value_range (stderr, vr);
4113 fprintf (stderr, "\n");
4117 /* Dump value ranges of all SSA_NAMEs to FILE. */
4119 void
4120 dump_all_value_ranges (FILE *file)
4122 size_t i;
4124 for (i = 0; i < num_vr_values; i++)
4126 if (vr_value[i])
4128 print_generic_expr (file, ssa_name (i), 0);
4129 fprintf (file, ": ");
4130 dump_value_range (file, vr_value[i]);
4131 fprintf (file, "\n");
4135 fprintf (file, "\n");
4139 /* Dump all value ranges to stderr. */
4141 DEBUG_FUNCTION void
4142 debug_all_value_ranges (void)
4144 dump_all_value_ranges (stderr);
4148 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4149 create a new SSA name N and return the assertion assignment
4150 'V = ASSERT_EXPR <V, V OP W>'. */
4152 static gimple
4153 build_assert_expr_for (tree cond, tree v)
4155 tree a;
4156 gimple assertion;
4158 gcc_assert (TREE_CODE (v) == SSA_NAME
4159 && COMPARISON_CLASS_P (cond));
4161 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4162 assertion = gimple_build_assign (NULL_TREE, a);
4164 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4165 operand of the ASSERT_EXPR. Create it so the new name and the old one
4166 are registered in the replacement table so that we can fix the SSA web
4167 after adding all the ASSERT_EXPRs. */
4168 create_new_def_for (v, assertion, NULL);
4170 return assertion;
4174 /* Return false if EXPR is a predicate expression involving floating
4175 point values. */
4177 static inline bool
4178 fp_predicate (gimple stmt)
4180 GIMPLE_CHECK (stmt, GIMPLE_COND);
4182 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4186 /* If the range of values taken by OP can be inferred after STMT executes,
4187 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4188 describes the inferred range. Return true if a range could be
4189 inferred. */
4191 static bool
4192 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4194 *val_p = NULL_TREE;
4195 *comp_code_p = ERROR_MARK;
4197 /* Do not attempt to infer anything in names that flow through
4198 abnormal edges. */
4199 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4200 return false;
4202 /* Similarly, don't infer anything from statements that may throw
4203 exceptions. */
4204 if (stmt_could_throw_p (stmt))
4205 return false;
4207 /* If STMT is the last statement of a basic block with no
4208 successors, there is no point inferring anything about any of its
4209 operands. We would not be able to find a proper insertion point
4210 for the assertion, anyway. */
4211 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4212 return false;
4214 /* We can only assume that a pointer dereference will yield
4215 non-NULL if -fdelete-null-pointer-checks is enabled. */
4216 if (flag_delete_null_pointer_checks
4217 && POINTER_TYPE_P (TREE_TYPE (op))
4218 && gimple_code (stmt) != GIMPLE_ASM)
4220 unsigned num_uses, num_loads, num_stores;
4222 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4223 if (num_loads + num_stores > 0)
4225 *val_p = build_int_cst (TREE_TYPE (op), 0);
4226 *comp_code_p = NE_EXPR;
4227 return true;
4231 return false;
4235 void dump_asserts_for (FILE *, tree);
4236 void debug_asserts_for (tree);
4237 void dump_all_asserts (FILE *);
4238 void debug_all_asserts (void);
4240 /* Dump all the registered assertions for NAME to FILE. */
4242 void
4243 dump_asserts_for (FILE *file, tree name)
4245 assert_locus_t loc;
4247 fprintf (file, "Assertions to be inserted for ");
4248 print_generic_expr (file, name, 0);
4249 fprintf (file, "\n");
4251 loc = asserts_for[SSA_NAME_VERSION (name)];
4252 while (loc)
4254 fprintf (file, "\t");
4255 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4256 fprintf (file, "\n\tBB #%d", loc->bb->index);
4257 if (loc->e)
4259 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4260 loc->e->dest->index);
4261 dump_edge_info (file, loc->e, dump_flags, 0);
4263 fprintf (file, "\n\tPREDICATE: ");
4264 print_generic_expr (file, name, 0);
4265 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4266 print_generic_expr (file, loc->val, 0);
4267 fprintf (file, "\n\n");
4268 loc = loc->next;
4271 fprintf (file, "\n");
4275 /* Dump all the registered assertions for NAME to stderr. */
4277 DEBUG_FUNCTION void
4278 debug_asserts_for (tree name)
4280 dump_asserts_for (stderr, name);
4284 /* Dump all the registered assertions for all the names to FILE. */
4286 void
4287 dump_all_asserts (FILE *file)
4289 unsigned i;
4290 bitmap_iterator bi;
4292 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4293 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4294 dump_asserts_for (file, ssa_name (i));
4295 fprintf (file, "\n");
4299 /* Dump all the registered assertions for all the names to stderr. */
4301 DEBUG_FUNCTION void
4302 debug_all_asserts (void)
4304 dump_all_asserts (stderr);
4308 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4309 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4310 E->DEST, then register this location as a possible insertion point
4311 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4313 BB, E and SI provide the exact insertion point for the new
4314 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4315 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4316 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4317 must not be NULL. */
4319 static void
4320 register_new_assert_for (tree name, tree expr,
4321 enum tree_code comp_code,
4322 tree val,
4323 basic_block bb,
4324 edge e,
4325 gimple_stmt_iterator si)
4327 assert_locus_t n, loc, last_loc;
4328 basic_block dest_bb;
4330 gcc_checking_assert (bb == NULL || e == NULL);
4332 if (e == NULL)
4333 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4334 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4336 /* Never build an assert comparing against an integer constant with
4337 TREE_OVERFLOW set. This confuses our undefined overflow warning
4338 machinery. */
4339 if (TREE_CODE (val) == INTEGER_CST
4340 && TREE_OVERFLOW (val))
4341 val = build_int_cst_wide (TREE_TYPE (val),
4342 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4344 /* The new assertion A will be inserted at BB or E. We need to
4345 determine if the new location is dominated by a previously
4346 registered location for A. If we are doing an edge insertion,
4347 assume that A will be inserted at E->DEST. Note that this is not
4348 necessarily true.
4350 If E is a critical edge, it will be split. But even if E is
4351 split, the new block will dominate the same set of blocks that
4352 E->DEST dominates.
4354 The reverse, however, is not true, blocks dominated by E->DEST
4355 will not be dominated by the new block created to split E. So,
4356 if the insertion location is on a critical edge, we will not use
4357 the new location to move another assertion previously registered
4358 at a block dominated by E->DEST. */
4359 dest_bb = (bb) ? bb : e->dest;
4361 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4362 VAL at a block dominating DEST_BB, then we don't need to insert a new
4363 one. Similarly, if the same assertion already exists at a block
4364 dominated by DEST_BB and the new location is not on a critical
4365 edge, then update the existing location for the assertion (i.e.,
4366 move the assertion up in the dominance tree).
4368 Note, this is implemented as a simple linked list because there
4369 should not be more than a handful of assertions registered per
4370 name. If this becomes a performance problem, a table hashed by
4371 COMP_CODE and VAL could be implemented. */
4372 loc = asserts_for[SSA_NAME_VERSION (name)];
4373 last_loc = loc;
4374 while (loc)
4376 if (loc->comp_code == comp_code
4377 && (loc->val == val
4378 || operand_equal_p (loc->val, val, 0))
4379 && (loc->expr == expr
4380 || operand_equal_p (loc->expr, expr, 0)))
4382 /* If the assertion NAME COMP_CODE VAL has already been
4383 registered at a basic block that dominates DEST_BB, then
4384 we don't need to insert the same assertion again. Note
4385 that we don't check strict dominance here to avoid
4386 replicating the same assertion inside the same basic
4387 block more than once (e.g., when a pointer is
4388 dereferenced several times inside a block).
4390 An exception to this rule are edge insertions. If the
4391 new assertion is to be inserted on edge E, then it will
4392 dominate all the other insertions that we may want to
4393 insert in DEST_BB. So, if we are doing an edge
4394 insertion, don't do this dominance check. */
4395 if (e == NULL
4396 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4397 return;
4399 /* Otherwise, if E is not a critical edge and DEST_BB
4400 dominates the existing location for the assertion, move
4401 the assertion up in the dominance tree by updating its
4402 location information. */
4403 if ((e == NULL || !EDGE_CRITICAL_P (e))
4404 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4406 loc->bb = dest_bb;
4407 loc->e = e;
4408 loc->si = si;
4409 return;
4413 /* Update the last node of the list and move to the next one. */
4414 last_loc = loc;
4415 loc = loc->next;
4418 /* If we didn't find an assertion already registered for
4419 NAME COMP_CODE VAL, add a new one at the end of the list of
4420 assertions associated with NAME. */
4421 n = XNEW (struct assert_locus_d);
4422 n->bb = dest_bb;
4423 n->e = e;
4424 n->si = si;
4425 n->comp_code = comp_code;
4426 n->val = val;
4427 n->expr = expr;
4428 n->next = NULL;
4430 if (last_loc)
4431 last_loc->next = n;
4432 else
4433 asserts_for[SSA_NAME_VERSION (name)] = n;
4435 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4438 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4439 Extract a suitable test code and value and store them into *CODE_P and
4440 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4442 If no extraction was possible, return FALSE, otherwise return TRUE.
4444 If INVERT is true, then we invert the result stored into *CODE_P. */
4446 static bool
4447 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4448 tree cond_op0, tree cond_op1,
4449 bool invert, enum tree_code *code_p,
4450 tree *val_p)
4452 enum tree_code comp_code;
4453 tree val;
4455 /* Otherwise, we have a comparison of the form NAME COMP VAL
4456 or VAL COMP NAME. */
4457 if (name == cond_op1)
4459 /* If the predicate is of the form VAL COMP NAME, flip
4460 COMP around because we need to register NAME as the
4461 first operand in the predicate. */
4462 comp_code = swap_tree_comparison (cond_code);
4463 val = cond_op0;
4465 else
4467 /* The comparison is of the form NAME COMP VAL, so the
4468 comparison code remains unchanged. */
4469 comp_code = cond_code;
4470 val = cond_op1;
4473 /* Invert the comparison code as necessary. */
4474 if (invert)
4475 comp_code = invert_tree_comparison (comp_code, 0);
4477 /* VRP does not handle float types. */
4478 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4479 return false;
4481 /* Do not register always-false predicates.
4482 FIXME: this works around a limitation in fold() when dealing with
4483 enumerations. Given 'enum { N1, N2 } x;', fold will not
4484 fold 'if (x > N2)' to 'if (0)'. */
4485 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4486 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4488 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4489 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4491 if (comp_code == GT_EXPR
4492 && (!max
4493 || compare_values (val, max) == 0))
4494 return false;
4496 if (comp_code == LT_EXPR
4497 && (!min
4498 || compare_values (val, min) == 0))
4499 return false;
4501 *code_p = comp_code;
4502 *val_p = val;
4503 return true;
4506 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4507 (otherwise return VAL). VAL and MASK must be zero-extended for
4508 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4509 (to transform signed values into unsigned) and at the end xor
4510 SGNBIT back. */
4512 static double_int
4513 masked_increment (double_int val, double_int mask, double_int sgnbit,
4514 unsigned int prec)
4516 double_int bit = double_int_one, res;
4517 unsigned int i;
4519 val = double_int_xor (val, sgnbit);
4520 for (i = 0; i < prec; i++, bit = double_int_add (bit, bit))
4522 res = mask;
4523 if (double_int_zero_p (double_int_and (res, bit)))
4524 continue;
4525 res = double_int_sub (bit, double_int_one);
4526 res = double_int_and_not (double_int_add (val, bit), res);
4527 res = double_int_and (res, mask);
4528 if (double_int_ucmp (res, val) > 0)
4529 return double_int_xor (res, sgnbit);
4531 return double_int_xor (val, sgnbit);
4534 /* Try to register an edge assertion for SSA name NAME on edge E for
4535 the condition COND contributing to the conditional jump pointed to by BSI.
4536 Invert the condition COND if INVERT is true.
4537 Return true if an assertion for NAME could be registered. */
4539 static bool
4540 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4541 enum tree_code cond_code,
4542 tree cond_op0, tree cond_op1, bool invert)
4544 tree val;
4545 enum tree_code comp_code;
4546 bool retval = false;
4548 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4549 cond_op0,
4550 cond_op1,
4551 invert, &comp_code, &val))
4552 return false;
4554 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4555 reachable from E. */
4556 if (live_on_edge (e, name)
4557 && !has_single_use (name))
4559 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4560 retval = true;
4563 /* In the case of NAME <= CST and NAME being defined as
4564 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4565 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4566 This catches range and anti-range tests. */
4567 if ((comp_code == LE_EXPR
4568 || comp_code == GT_EXPR)
4569 && TREE_CODE (val) == INTEGER_CST
4570 && TYPE_UNSIGNED (TREE_TYPE (val)))
4572 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4573 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4575 /* Extract CST2 from the (optional) addition. */
4576 if (is_gimple_assign (def_stmt)
4577 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4579 name2 = gimple_assign_rhs1 (def_stmt);
4580 cst2 = gimple_assign_rhs2 (def_stmt);
4581 if (TREE_CODE (name2) == SSA_NAME
4582 && TREE_CODE (cst2) == INTEGER_CST)
4583 def_stmt = SSA_NAME_DEF_STMT (name2);
4586 /* Extract NAME2 from the (optional) sign-changing cast. */
4587 if (gimple_assign_cast_p (def_stmt))
4589 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4590 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4591 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4592 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4593 name3 = gimple_assign_rhs1 (def_stmt);
4596 /* If name3 is used later, create an ASSERT_EXPR for it. */
4597 if (name3 != NULL_TREE
4598 && TREE_CODE (name3) == SSA_NAME
4599 && (cst2 == NULL_TREE
4600 || TREE_CODE (cst2) == INTEGER_CST)
4601 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4602 && live_on_edge (e, name3)
4603 && !has_single_use (name3))
4605 tree tmp;
4607 /* Build an expression for the range test. */
4608 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4609 if (cst2 != NULL_TREE)
4610 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4612 if (dump_file)
4614 fprintf (dump_file, "Adding assert for ");
4615 print_generic_expr (dump_file, name3, 0);
4616 fprintf (dump_file, " from ");
4617 print_generic_expr (dump_file, tmp, 0);
4618 fprintf (dump_file, "\n");
4621 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4623 retval = true;
4626 /* If name2 is used later, create an ASSERT_EXPR for it. */
4627 if (name2 != NULL_TREE
4628 && TREE_CODE (name2) == SSA_NAME
4629 && TREE_CODE (cst2) == INTEGER_CST
4630 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4631 && live_on_edge (e, name2)
4632 && !has_single_use (name2))
4634 tree tmp;
4636 /* Build an expression for the range test. */
4637 tmp = name2;
4638 if (TREE_TYPE (name) != TREE_TYPE (name2))
4639 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4640 if (cst2 != NULL_TREE)
4641 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4643 if (dump_file)
4645 fprintf (dump_file, "Adding assert for ");
4646 print_generic_expr (dump_file, name2, 0);
4647 fprintf (dump_file, " from ");
4648 print_generic_expr (dump_file, tmp, 0);
4649 fprintf (dump_file, "\n");
4652 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4654 retval = true;
4658 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4659 && TREE_CODE (val) == INTEGER_CST)
4661 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4662 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4663 tree val2 = NULL_TREE;
4664 double_int mask = double_int_zero;
4665 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4667 /* Add asserts for NAME cmp CST and NAME being defined
4668 as NAME = (int) NAME2. */
4669 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4670 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4671 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4672 && gimple_assign_cast_p (def_stmt))
4674 name2 = gimple_assign_rhs1 (def_stmt);
4675 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4676 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4677 && TYPE_UNSIGNED (TREE_TYPE (name2))
4678 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4679 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4680 || !tree_int_cst_equal (val,
4681 TYPE_MIN_VALUE (TREE_TYPE (val))))
4682 && live_on_edge (e, name2)
4683 && !has_single_use (name2))
4685 tree tmp, cst;
4686 enum tree_code new_comp_code = comp_code;
4688 cst = fold_convert (TREE_TYPE (name2),
4689 TYPE_MIN_VALUE (TREE_TYPE (val)));
4690 /* Build an expression for the range test. */
4691 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4692 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4693 fold_convert (TREE_TYPE (name2), val));
4694 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4696 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4697 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4698 build_int_cst (TREE_TYPE (name2), 1));
4701 if (dump_file)
4703 fprintf (dump_file, "Adding assert for ");
4704 print_generic_expr (dump_file, name2, 0);
4705 fprintf (dump_file, " from ");
4706 print_generic_expr (dump_file, tmp, 0);
4707 fprintf (dump_file, "\n");
4710 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
4711 e, bsi);
4713 retval = true;
4717 /* Add asserts for NAME cmp CST and NAME being defined as
4718 NAME = NAME2 >> CST2.
4720 Extract CST2 from the right shift. */
4721 if (is_gimple_assign (def_stmt)
4722 && gimple_assign_rhs_code (def_stmt) == RSHIFT_EXPR)
4724 name2 = gimple_assign_rhs1 (def_stmt);
4725 cst2 = gimple_assign_rhs2 (def_stmt);
4726 if (TREE_CODE (name2) == SSA_NAME
4727 && host_integerp (cst2, 1)
4728 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4729 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
4730 && prec <= HOST_BITS_PER_DOUBLE_INT
4731 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
4732 && live_on_edge (e, name2)
4733 && !has_single_use (name2))
4735 mask = double_int_mask (tree_low_cst (cst2, 1));
4736 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4739 if (val2 != NULL_TREE
4740 && TREE_CODE (val2) == INTEGER_CST
4741 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4742 TREE_TYPE (val),
4743 val2, cst2), val))
4745 enum tree_code new_comp_code = comp_code;
4746 tree tmp, new_val;
4748 tmp = name2;
4749 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
4751 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4753 tree type = build_nonstandard_integer_type (prec, 1);
4754 tmp = build1 (NOP_EXPR, type, name2);
4755 val2 = fold_convert (type, val2);
4757 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
4758 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
4759 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
4761 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4762 new_val = val2;
4763 else
4765 double_int maxval
4766 = double_int_max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
4767 mask = double_int_ior (tree_to_double_int (val2), mask);
4768 if (double_int_equal_p (mask, maxval))
4769 new_val = NULL_TREE;
4770 else
4771 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
4774 if (new_val)
4776 if (dump_file)
4778 fprintf (dump_file, "Adding assert for ");
4779 print_generic_expr (dump_file, name2, 0);
4780 fprintf (dump_file, " from ");
4781 print_generic_expr (dump_file, tmp, 0);
4782 fprintf (dump_file, "\n");
4785 register_new_assert_for (name2, tmp, new_comp_code, new_val,
4786 NULL, e, bsi);
4787 retval = true;
4791 /* Add asserts for NAME cmp CST and NAME being defined as
4792 NAME = NAME2 & CST2.
4794 Extract CST2 from the and. */
4795 names[0] = NULL_TREE;
4796 names[1] = NULL_TREE;
4797 cst2 = NULL_TREE;
4798 if (is_gimple_assign (def_stmt)
4799 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
4801 name2 = gimple_assign_rhs1 (def_stmt);
4802 cst2 = gimple_assign_rhs2 (def_stmt);
4803 if (TREE_CODE (name2) == SSA_NAME
4804 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4805 && TREE_CODE (cst2) == INTEGER_CST
4806 && !integer_zerop (cst2)
4807 && prec <= HOST_BITS_PER_DOUBLE_INT
4808 && (prec > 1
4809 || TYPE_UNSIGNED (TREE_TYPE (val))))
4811 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
4812 if (gimple_assign_cast_p (def_stmt2))
4814 names[1] = gimple_assign_rhs1 (def_stmt2);
4815 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
4816 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
4817 || (TYPE_PRECISION (TREE_TYPE (name2))
4818 != TYPE_PRECISION (TREE_TYPE (names[1])))
4819 || !live_on_edge (e, names[1])
4820 || has_single_use (names[1]))
4821 names[1] = NULL_TREE;
4823 if (live_on_edge (e, name2)
4824 && !has_single_use (name2))
4825 names[0] = name2;
4828 if (names[0] || names[1])
4830 double_int minv, maxv = double_int_zero, valv, cst2v;
4831 double_int tem, sgnbit;
4832 bool valid_p = false, valn = false, cst2n = false;
4833 enum tree_code ccode = comp_code;
4835 valv = double_int_zext (tree_to_double_int (val), prec);
4836 cst2v = double_int_zext (tree_to_double_int (cst2), prec);
4837 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4839 valn = double_int_negative_p (double_int_sext (valv, prec));
4840 cst2n = double_int_negative_p (double_int_sext (cst2v, prec));
4842 /* If CST2 doesn't have most significant bit set,
4843 but VAL is negative, we have comparison like
4844 if ((x & 0x123) > -4) (always true). Just give up. */
4845 if (!cst2n && valn)
4846 ccode = ERROR_MARK;
4847 if (cst2n)
4848 sgnbit = double_int_zext (double_int_lshift (double_int_one,
4849 prec - 1, prec,
4850 false), prec);
4851 else
4852 sgnbit = double_int_zero;
4853 minv = double_int_and (valv, cst2v);
4854 switch (ccode)
4856 case EQ_EXPR:
4857 /* Minimum unsigned value for equality is VAL & CST2
4858 (should be equal to VAL, otherwise we probably should
4859 have folded the comparison into false) and
4860 maximum unsigned value is VAL | ~CST2. */
4861 maxv = double_int_ior (valv, double_int_not (cst2v));
4862 maxv = double_int_zext (maxv, prec);
4863 valid_p = true;
4864 break;
4865 case NE_EXPR:
4866 tem = double_int_ior (valv, double_int_not (cst2v));
4867 tem = double_int_zext (tem, prec);
4868 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
4869 if (double_int_zero_p (valv))
4871 cst2n = false;
4872 sgnbit = double_int_zero;
4873 goto gt_expr;
4875 /* If (VAL | ~CST2) is all ones, handle it as
4876 (X & CST2) < VAL. */
4877 if (double_int_equal_p (tem, double_int_mask (prec)))
4879 cst2n = false;
4880 valn = false;
4881 sgnbit = double_int_zero;
4882 goto lt_expr;
4884 if (!cst2n
4885 && double_int_negative_p (double_int_sext (cst2v, prec)))
4886 sgnbit = double_int_zext (double_int_lshift (double_int_one,
4887 prec - 1, prec,
4888 false), prec);
4889 if (!double_int_zero_p (sgnbit))
4891 if (double_int_equal_p (valv, sgnbit))
4893 cst2n = true;
4894 valn = true;
4895 goto gt_expr;
4897 if (double_int_equal_p (tem, double_int_mask (prec - 1)))
4899 cst2n = true;
4900 goto lt_expr;
4902 if (!cst2n)
4903 sgnbit = double_int_zero;
4905 break;
4906 case GE_EXPR:
4907 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
4908 is VAL and maximum unsigned value is ~0. For signed
4909 comparison, if CST2 doesn't have most significant bit
4910 set, handle it similarly. If CST2 has MSB set,
4911 the minimum is the same, and maximum is ~0U/2. */
4912 if (!double_int_equal_p (minv, valv))
4914 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
4915 VAL. */
4916 minv = masked_increment (valv, cst2v, sgnbit, prec);
4917 if (double_int_equal_p (minv, valv))
4918 break;
4920 maxv = double_int_mask (prec - (cst2n ? 1 : 0));
4921 valid_p = true;
4922 break;
4923 case GT_EXPR:
4924 gt_expr:
4925 /* Find out smallest MINV where MINV > VAL
4926 && (MINV & CST2) == MINV, if any. If VAL is signed and
4927 CST2 has MSB set, compute it biased by 1 << (prec - 1). */
4928 minv = masked_increment (valv, cst2v, sgnbit, prec);
4929 if (double_int_equal_p (minv, valv))
4930 break;
4931 maxv = double_int_mask (prec - (cst2n ? 1 : 0));
4932 valid_p = true;
4933 break;
4934 case LE_EXPR:
4935 /* Minimum unsigned value for <= is 0 and maximum
4936 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
4937 Otherwise, find smallest VAL2 where VAL2 > VAL
4938 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
4939 as maximum.
4940 For signed comparison, if CST2 doesn't have most
4941 significant bit set, handle it similarly. If CST2 has
4942 MSB set, the maximum is the same and minimum is INT_MIN. */
4943 if (double_int_equal_p (minv, valv))
4944 maxv = valv;
4945 else
4947 maxv = masked_increment (valv, cst2v, sgnbit, prec);
4948 if (double_int_equal_p (maxv, valv))
4949 break;
4950 maxv = double_int_sub (maxv, double_int_one);
4952 maxv = double_int_ior (maxv, double_int_not (cst2v));
4953 maxv = double_int_zext (maxv, prec);
4954 minv = sgnbit;
4955 valid_p = true;
4956 break;
4957 case LT_EXPR:
4958 lt_expr:
4959 /* Minimum unsigned value for < is 0 and maximum
4960 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
4961 Otherwise, find smallest VAL2 where VAL2 > VAL
4962 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
4963 as maximum.
4964 For signed comparison, if CST2 doesn't have most
4965 significant bit set, handle it similarly. If CST2 has
4966 MSB set, the maximum is the same and minimum is INT_MIN. */
4967 if (double_int_equal_p (minv, valv))
4969 if (double_int_equal_p (valv, sgnbit))
4970 break;
4971 maxv = valv;
4973 else
4975 maxv = masked_increment (valv, cst2v, sgnbit, prec);
4976 if (double_int_equal_p (maxv, valv))
4977 break;
4979 maxv = double_int_sub (maxv, double_int_one);
4980 maxv = double_int_ior (maxv, double_int_not (cst2v));
4981 maxv = double_int_zext (maxv, prec);
4982 minv = sgnbit;
4983 valid_p = true;
4984 break;
4985 default:
4986 break;
4988 if (valid_p
4989 && !double_int_equal_p (double_int_zext (double_int_sub (maxv,
4990 minv),
4991 prec),
4992 double_int_mask (prec)))
4994 tree tmp, new_val, type;
4995 int i;
4997 for (i = 0; i < 2; i++)
4998 if (names[i])
5000 double_int maxv2 = maxv;
5001 tmp = names[i];
5002 type = TREE_TYPE (names[i]);
5003 if (!TYPE_UNSIGNED (type))
5005 type = build_nonstandard_integer_type (prec, 1);
5006 tmp = build1 (NOP_EXPR, type, names[i]);
5008 if (!double_int_zero_p (minv))
5010 tmp = build2 (PLUS_EXPR, type, tmp,
5011 double_int_to_tree (type,
5012 double_int_neg (minv)));
5013 maxv2 = double_int_sub (maxv, minv);
5015 new_val = double_int_to_tree (type, maxv2);
5017 if (dump_file)
5019 fprintf (dump_file, "Adding assert for ");
5020 print_generic_expr (dump_file, names[i], 0);
5021 fprintf (dump_file, " from ");
5022 print_generic_expr (dump_file, tmp, 0);
5023 fprintf (dump_file, "\n");
5026 register_new_assert_for (names[i], tmp, LE_EXPR,
5027 new_val, NULL, e, bsi);
5028 retval = true;
5034 return retval;
5037 /* OP is an operand of a truth value expression which is known to have
5038 a particular value. Register any asserts for OP and for any
5039 operands in OP's defining statement.
5041 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5042 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5044 static bool
5045 register_edge_assert_for_1 (tree op, enum tree_code code,
5046 edge e, gimple_stmt_iterator bsi)
5048 bool retval = false;
5049 gimple op_def;
5050 tree val;
5051 enum tree_code rhs_code;
5053 /* We only care about SSA_NAMEs. */
5054 if (TREE_CODE (op) != SSA_NAME)
5055 return false;
5057 /* We know that OP will have a zero or nonzero value. If OP is used
5058 more than once go ahead and register an assert for OP.
5060 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5061 it will always be set for OP (because OP is used in a COND_EXPR in
5062 the subgraph). */
5063 if (!has_single_use (op))
5065 val = build_int_cst (TREE_TYPE (op), 0);
5066 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5067 retval = true;
5070 /* Now look at how OP is set. If it's set from a comparison,
5071 a truth operation or some bit operations, then we may be able
5072 to register information about the operands of that assignment. */
5073 op_def = SSA_NAME_DEF_STMT (op);
5074 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5075 return retval;
5077 rhs_code = gimple_assign_rhs_code (op_def);
5079 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5081 bool invert = (code == EQ_EXPR ? true : false);
5082 tree op0 = gimple_assign_rhs1 (op_def);
5083 tree op1 = gimple_assign_rhs2 (op_def);
5085 if (TREE_CODE (op0) == SSA_NAME)
5086 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5087 invert);
5088 if (TREE_CODE (op1) == SSA_NAME)
5089 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5090 invert);
5092 else if ((code == NE_EXPR
5093 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5094 || (code == EQ_EXPR
5095 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5097 /* Recurse on each operand. */
5098 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5099 code, e, bsi);
5100 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
5101 code, e, bsi);
5103 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5104 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5106 /* Recurse, flipping CODE. */
5107 code = invert_tree_comparison (code, false);
5108 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5109 code, e, bsi);
5111 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5113 /* Recurse through the copy. */
5114 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5115 code, e, bsi);
5117 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5119 /* Recurse through the type conversion. */
5120 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5121 code, e, bsi);
5124 return retval;
5127 /* Try to register an edge assertion for SSA name NAME on edge E for
5128 the condition COND contributing to the conditional jump pointed to by SI.
5129 Return true if an assertion for NAME could be registered. */
5131 static bool
5132 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5133 enum tree_code cond_code, tree cond_op0,
5134 tree cond_op1)
5136 tree val;
5137 enum tree_code comp_code;
5138 bool retval = false;
5139 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5141 /* Do not attempt to infer anything in names that flow through
5142 abnormal edges. */
5143 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5144 return false;
5146 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5147 cond_op0, cond_op1,
5148 is_else_edge,
5149 &comp_code, &val))
5150 return false;
5152 /* Register ASSERT_EXPRs for name. */
5153 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5154 cond_op1, is_else_edge);
5157 /* If COND is effectively an equality test of an SSA_NAME against
5158 the value zero or one, then we may be able to assert values
5159 for SSA_NAMEs which flow into COND. */
5161 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5162 statement of NAME we can assert both operands of the BIT_AND_EXPR
5163 have nonzero value. */
5164 if (((comp_code == EQ_EXPR && integer_onep (val))
5165 || (comp_code == NE_EXPR && integer_zerop (val))))
5167 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5169 if (is_gimple_assign (def_stmt)
5170 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5172 tree op0 = gimple_assign_rhs1 (def_stmt);
5173 tree op1 = gimple_assign_rhs2 (def_stmt);
5174 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5175 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5179 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5180 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5181 have zero value. */
5182 if (((comp_code == EQ_EXPR && integer_zerop (val))
5183 || (comp_code == NE_EXPR && integer_onep (val))))
5185 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5187 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5188 necessarily zero value, or if type-precision is one. */
5189 if (is_gimple_assign (def_stmt)
5190 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5191 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5192 || comp_code == EQ_EXPR)))
5194 tree op0 = gimple_assign_rhs1 (def_stmt);
5195 tree op1 = gimple_assign_rhs2 (def_stmt);
5196 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5197 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5201 return retval;
5205 /* Determine whether the outgoing edges of BB should receive an
5206 ASSERT_EXPR for each of the operands of BB's LAST statement.
5207 The last statement of BB must be a COND_EXPR.
5209 If any of the sub-graphs rooted at BB have an interesting use of
5210 the predicate operands, an assert location node is added to the
5211 list of assertions for the corresponding operands. */
5213 static bool
5214 find_conditional_asserts (basic_block bb, gimple last)
5216 bool need_assert;
5217 gimple_stmt_iterator bsi;
5218 tree op;
5219 edge_iterator ei;
5220 edge e;
5221 ssa_op_iter iter;
5223 need_assert = false;
5224 bsi = gsi_for_stmt (last);
5226 /* Look for uses of the operands in each of the sub-graphs
5227 rooted at BB. We need to check each of the outgoing edges
5228 separately, so that we know what kind of ASSERT_EXPR to
5229 insert. */
5230 FOR_EACH_EDGE (e, ei, bb->succs)
5232 if (e->dest == bb)
5233 continue;
5235 /* Register the necessary assertions for each operand in the
5236 conditional predicate. */
5237 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5239 need_assert |= register_edge_assert_for (op, e, bsi,
5240 gimple_cond_code (last),
5241 gimple_cond_lhs (last),
5242 gimple_cond_rhs (last));
5246 return need_assert;
5249 struct case_info
5251 tree expr;
5252 basic_block bb;
5255 /* Compare two case labels sorting first by the destination bb index
5256 and then by the case value. */
5258 static int
5259 compare_case_labels (const void *p1, const void *p2)
5261 const struct case_info *ci1 = (const struct case_info *) p1;
5262 const struct case_info *ci2 = (const struct case_info *) p2;
5263 int idx1 = ci1->bb->index;
5264 int idx2 = ci2->bb->index;
5266 if (idx1 < idx2)
5267 return -1;
5268 else if (idx1 == idx2)
5270 /* Make sure the default label is first in a group. */
5271 if (!CASE_LOW (ci1->expr))
5272 return -1;
5273 else if (!CASE_LOW (ci2->expr))
5274 return 1;
5275 else
5276 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5277 CASE_LOW (ci2->expr));
5279 else
5280 return 1;
5283 /* Determine whether the outgoing edges of BB should receive an
5284 ASSERT_EXPR for each of the operands of BB's LAST statement.
5285 The last statement of BB must be a SWITCH_EXPR.
5287 If any of the sub-graphs rooted at BB have an interesting use of
5288 the predicate operands, an assert location node is added to the
5289 list of assertions for the corresponding operands. */
5291 static bool
5292 find_switch_asserts (basic_block bb, gimple last)
5294 bool need_assert;
5295 gimple_stmt_iterator bsi;
5296 tree op;
5297 edge e;
5298 struct case_info *ci;
5299 size_t n = gimple_switch_num_labels (last);
5300 #if GCC_VERSION >= 4000
5301 unsigned int idx;
5302 #else
5303 /* Work around GCC 3.4 bug (PR 37086). */
5304 volatile unsigned int idx;
5305 #endif
5307 need_assert = false;
5308 bsi = gsi_for_stmt (last);
5309 op = gimple_switch_index (last);
5310 if (TREE_CODE (op) != SSA_NAME)
5311 return false;
5313 /* Build a vector of case labels sorted by destination label. */
5314 ci = XNEWVEC (struct case_info, n);
5315 for (idx = 0; idx < n; ++idx)
5317 ci[idx].expr = gimple_switch_label (last, idx);
5318 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5320 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5322 for (idx = 0; idx < n; ++idx)
5324 tree min, max;
5325 tree cl = ci[idx].expr;
5326 basic_block cbb = ci[idx].bb;
5328 min = CASE_LOW (cl);
5329 max = CASE_HIGH (cl);
5331 /* If there are multiple case labels with the same destination
5332 we need to combine them to a single value range for the edge. */
5333 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5335 /* Skip labels until the last of the group. */
5336 do {
5337 ++idx;
5338 } while (idx < n && cbb == ci[idx].bb);
5339 --idx;
5341 /* Pick up the maximum of the case label range. */
5342 if (CASE_HIGH (ci[idx].expr))
5343 max = CASE_HIGH (ci[idx].expr);
5344 else
5345 max = CASE_LOW (ci[idx].expr);
5348 /* Nothing to do if the range includes the default label until we
5349 can register anti-ranges. */
5350 if (min == NULL_TREE)
5351 continue;
5353 /* Find the edge to register the assert expr on. */
5354 e = find_edge (bb, cbb);
5356 /* Register the necessary assertions for the operand in the
5357 SWITCH_EXPR. */
5358 need_assert |= register_edge_assert_for (op, e, bsi,
5359 max ? GE_EXPR : EQ_EXPR,
5361 fold_convert (TREE_TYPE (op),
5362 min));
5363 if (max)
5365 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5367 fold_convert (TREE_TYPE (op),
5368 max));
5372 XDELETEVEC (ci);
5373 return need_assert;
5377 /* Traverse all the statements in block BB looking for statements that
5378 may generate useful assertions for the SSA names in their operand.
5379 If a statement produces a useful assertion A for name N_i, then the
5380 list of assertions already generated for N_i is scanned to
5381 determine if A is actually needed.
5383 If N_i already had the assertion A at a location dominating the
5384 current location, then nothing needs to be done. Otherwise, the
5385 new location for A is recorded instead.
5387 1- For every statement S in BB, all the variables used by S are
5388 added to bitmap FOUND_IN_SUBGRAPH.
5390 2- If statement S uses an operand N in a way that exposes a known
5391 value range for N, then if N was not already generated by an
5392 ASSERT_EXPR, create a new assert location for N. For instance,
5393 if N is a pointer and the statement dereferences it, we can
5394 assume that N is not NULL.
5396 3- COND_EXPRs are a special case of #2. We can derive range
5397 information from the predicate but need to insert different
5398 ASSERT_EXPRs for each of the sub-graphs rooted at the
5399 conditional block. If the last statement of BB is a conditional
5400 expression of the form 'X op Y', then
5402 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5404 b) If the conditional is the only entry point to the sub-graph
5405 corresponding to the THEN_CLAUSE, recurse into it. On
5406 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5407 an ASSERT_EXPR is added for the corresponding variable.
5409 c) Repeat step (b) on the ELSE_CLAUSE.
5411 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5413 For instance,
5415 if (a == 9)
5416 b = a;
5417 else
5418 b = c + 1;
5420 In this case, an assertion on the THEN clause is useful to
5421 determine that 'a' is always 9 on that edge. However, an assertion
5422 on the ELSE clause would be unnecessary.
5424 4- If BB does not end in a conditional expression, then we recurse
5425 into BB's dominator children.
5427 At the end of the recursive traversal, every SSA name will have a
5428 list of locations where ASSERT_EXPRs should be added. When a new
5429 location for name N is found, it is registered by calling
5430 register_new_assert_for. That function keeps track of all the
5431 registered assertions to prevent adding unnecessary assertions.
5432 For instance, if a pointer P_4 is dereferenced more than once in a
5433 dominator tree, only the location dominating all the dereference of
5434 P_4 will receive an ASSERT_EXPR.
5436 If this function returns true, then it means that there are names
5437 for which we need to generate ASSERT_EXPRs. Those assertions are
5438 inserted by process_assert_insertions. */
5440 static bool
5441 find_assert_locations_1 (basic_block bb, sbitmap live)
5443 gimple_stmt_iterator si;
5444 gimple last;
5445 gimple phi;
5446 bool need_assert;
5448 need_assert = false;
5449 last = last_stmt (bb);
5451 /* If BB's last statement is a conditional statement involving integer
5452 operands, determine if we need to add ASSERT_EXPRs. */
5453 if (last
5454 && gimple_code (last) == GIMPLE_COND
5455 && !fp_predicate (last)
5456 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5457 need_assert |= find_conditional_asserts (bb, last);
5459 /* If BB's last statement is a switch statement involving integer
5460 operands, determine if we need to add ASSERT_EXPRs. */
5461 if (last
5462 && gimple_code (last) == GIMPLE_SWITCH
5463 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5464 need_assert |= find_switch_asserts (bb, last);
5466 /* Traverse all the statements in BB marking used names and looking
5467 for statements that may infer assertions for their used operands. */
5468 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5470 gimple stmt;
5471 tree op;
5472 ssa_op_iter i;
5474 stmt = gsi_stmt (si);
5476 if (is_gimple_debug (stmt))
5477 continue;
5479 /* See if we can derive an assertion for any of STMT's operands. */
5480 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5482 tree value;
5483 enum tree_code comp_code;
5485 /* Mark OP in our live bitmap. */
5486 SET_BIT (live, SSA_NAME_VERSION (op));
5488 /* If OP is used in such a way that we can infer a value
5489 range for it, and we don't find a previous assertion for
5490 it, create a new assertion location node for OP. */
5491 if (infer_value_range (stmt, op, &comp_code, &value))
5493 /* If we are able to infer a nonzero value range for OP,
5494 then walk backwards through the use-def chain to see if OP
5495 was set via a typecast.
5497 If so, then we can also infer a nonzero value range
5498 for the operand of the NOP_EXPR. */
5499 if (comp_code == NE_EXPR && integer_zerop (value))
5501 tree t = op;
5502 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5504 while (is_gimple_assign (def_stmt)
5505 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5506 && TREE_CODE
5507 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5508 && POINTER_TYPE_P
5509 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5511 t = gimple_assign_rhs1 (def_stmt);
5512 def_stmt = SSA_NAME_DEF_STMT (t);
5514 /* Note we want to register the assert for the
5515 operand of the NOP_EXPR after SI, not after the
5516 conversion. */
5517 if (! has_single_use (t))
5519 register_new_assert_for (t, t, comp_code, value,
5520 bb, NULL, si);
5521 need_assert = true;
5526 /* If OP is used only once, namely in this STMT, don't
5527 bother creating an ASSERT_EXPR for it. Such an
5528 ASSERT_EXPR would do nothing but increase compile time. */
5529 if (!has_single_use (op))
5531 register_new_assert_for (op, op, comp_code, value,
5532 bb, NULL, si);
5533 need_assert = true;
5539 /* Traverse all PHI nodes in BB marking used operands. */
5540 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5542 use_operand_p arg_p;
5543 ssa_op_iter i;
5544 phi = gsi_stmt (si);
5546 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5548 tree arg = USE_FROM_PTR (arg_p);
5549 if (TREE_CODE (arg) == SSA_NAME)
5550 SET_BIT (live, SSA_NAME_VERSION (arg));
5554 return need_assert;
5557 /* Do an RPO walk over the function computing SSA name liveness
5558 on-the-fly and deciding on assert expressions to insert.
5559 Returns true if there are assert expressions to be inserted. */
5561 static bool
5562 find_assert_locations (void)
5564 int *rpo = XNEWVEC (int, last_basic_block);
5565 int *bb_rpo = XNEWVEC (int, last_basic_block);
5566 int *last_rpo = XCNEWVEC (int, last_basic_block);
5567 int rpo_cnt, i;
5568 bool need_asserts;
5570 live = XCNEWVEC (sbitmap, last_basic_block);
5571 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5572 for (i = 0; i < rpo_cnt; ++i)
5573 bb_rpo[rpo[i]] = i;
5575 need_asserts = false;
5576 for (i = rpo_cnt - 1; i >= 0; --i)
5578 basic_block bb = BASIC_BLOCK (rpo[i]);
5579 edge e;
5580 edge_iterator ei;
5582 if (!live[rpo[i]])
5584 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5585 sbitmap_zero (live[rpo[i]]);
5588 /* Process BB and update the live information with uses in
5589 this block. */
5590 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5592 /* Merge liveness into the predecessor blocks and free it. */
5593 if (!sbitmap_empty_p (live[rpo[i]]))
5595 int pred_rpo = i;
5596 FOR_EACH_EDGE (e, ei, bb->preds)
5598 int pred = e->src->index;
5599 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5600 continue;
5602 if (!live[pred])
5604 live[pred] = sbitmap_alloc (num_ssa_names);
5605 sbitmap_zero (live[pred]);
5607 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5609 if (bb_rpo[pred] < pred_rpo)
5610 pred_rpo = bb_rpo[pred];
5613 /* Record the RPO number of the last visited block that needs
5614 live information from this block. */
5615 last_rpo[rpo[i]] = pred_rpo;
5617 else
5619 sbitmap_free (live[rpo[i]]);
5620 live[rpo[i]] = NULL;
5623 /* We can free all successors live bitmaps if all their
5624 predecessors have been visited already. */
5625 FOR_EACH_EDGE (e, ei, bb->succs)
5626 if (last_rpo[e->dest->index] == i
5627 && live[e->dest->index])
5629 sbitmap_free (live[e->dest->index]);
5630 live[e->dest->index] = NULL;
5634 XDELETEVEC (rpo);
5635 XDELETEVEC (bb_rpo);
5636 XDELETEVEC (last_rpo);
5637 for (i = 0; i < last_basic_block; ++i)
5638 if (live[i])
5639 sbitmap_free (live[i]);
5640 XDELETEVEC (live);
5642 return need_asserts;
5645 /* Create an ASSERT_EXPR for NAME and insert it in the location
5646 indicated by LOC. Return true if we made any edge insertions. */
5648 static bool
5649 process_assert_insertions_for (tree name, assert_locus_t loc)
5651 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5652 gimple stmt;
5653 tree cond;
5654 gimple assert_stmt;
5655 edge_iterator ei;
5656 edge e;
5658 /* If we have X <=> X do not insert an assert expr for that. */
5659 if (loc->expr == loc->val)
5660 return false;
5662 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5663 assert_stmt = build_assert_expr_for (cond, name);
5664 if (loc->e)
5666 /* We have been asked to insert the assertion on an edge. This
5667 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5668 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5669 || (gimple_code (gsi_stmt (loc->si))
5670 == GIMPLE_SWITCH));
5672 gsi_insert_on_edge (loc->e, assert_stmt);
5673 return true;
5676 /* Otherwise, we can insert right after LOC->SI iff the
5677 statement must not be the last statement in the block. */
5678 stmt = gsi_stmt (loc->si);
5679 if (!stmt_ends_bb_p (stmt))
5681 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5682 return false;
5685 /* If STMT must be the last statement in BB, we can only insert new
5686 assertions on the non-abnormal edge out of BB. Note that since
5687 STMT is not control flow, there may only be one non-abnormal edge
5688 out of BB. */
5689 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5690 if (!(e->flags & EDGE_ABNORMAL))
5692 gsi_insert_on_edge (e, assert_stmt);
5693 return true;
5696 gcc_unreachable ();
5700 /* Process all the insertions registered for every name N_i registered
5701 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5702 found in ASSERTS_FOR[i]. */
5704 static void
5705 process_assert_insertions (void)
5707 unsigned i;
5708 bitmap_iterator bi;
5709 bool update_edges_p = false;
5710 int num_asserts = 0;
5712 if (dump_file && (dump_flags & TDF_DETAILS))
5713 dump_all_asserts (dump_file);
5715 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5717 assert_locus_t loc = asserts_for[i];
5718 gcc_assert (loc);
5720 while (loc)
5722 assert_locus_t next = loc->next;
5723 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5724 free (loc);
5725 loc = next;
5726 num_asserts++;
5730 if (update_edges_p)
5731 gsi_commit_edge_inserts ();
5733 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5734 num_asserts);
5738 /* Traverse the flowgraph looking for conditional jumps to insert range
5739 expressions. These range expressions are meant to provide information
5740 to optimizations that need to reason in terms of value ranges. They
5741 will not be expanded into RTL. For instance, given:
5743 x = ...
5744 y = ...
5745 if (x < y)
5746 y = x - 2;
5747 else
5748 x = y + 3;
5750 this pass will transform the code into:
5752 x = ...
5753 y = ...
5754 if (x < y)
5756 x = ASSERT_EXPR <x, x < y>
5757 y = x - 2
5759 else
5761 y = ASSERT_EXPR <y, x <= y>
5762 x = y + 3
5765 The idea is that once copy and constant propagation have run, other
5766 optimizations will be able to determine what ranges of values can 'x'
5767 take in different paths of the code, simply by checking the reaching
5768 definition of 'x'. */
5770 static void
5771 insert_range_assertions (void)
5773 need_assert_for = BITMAP_ALLOC (NULL);
5774 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5776 calculate_dominance_info (CDI_DOMINATORS);
5778 if (find_assert_locations ())
5780 process_assert_insertions ();
5781 update_ssa (TODO_update_ssa_no_phi);
5784 if (dump_file && (dump_flags & TDF_DETAILS))
5786 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5787 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5790 free (asserts_for);
5791 BITMAP_FREE (need_assert_for);
5794 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5795 and "struct" hacks. If VRP can determine that the
5796 array subscript is a constant, check if it is outside valid
5797 range. If the array subscript is a RANGE, warn if it is
5798 non-overlapping with valid range.
5799 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5801 static void
5802 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5804 value_range_t* vr = NULL;
5805 tree low_sub, up_sub;
5806 tree low_bound, up_bound, up_bound_p1;
5807 tree base;
5809 if (TREE_NO_WARNING (ref))
5810 return;
5812 low_sub = up_sub = TREE_OPERAND (ref, 1);
5813 up_bound = array_ref_up_bound (ref);
5815 /* Can not check flexible arrays. */
5816 if (!up_bound
5817 || TREE_CODE (up_bound) != INTEGER_CST)
5818 return;
5820 /* Accesses to trailing arrays via pointers may access storage
5821 beyond the types array bounds. */
5822 base = get_base_address (ref);
5823 if (base && TREE_CODE (base) == MEM_REF)
5825 tree cref, next = NULL_TREE;
5827 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5828 return;
5830 cref = TREE_OPERAND (ref, 0);
5831 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5832 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5833 next && TREE_CODE (next) != FIELD_DECL;
5834 next = DECL_CHAIN (next))
5837 /* If this is the last field in a struct type or a field in a
5838 union type do not warn. */
5839 if (!next)
5840 return;
5843 low_bound = array_ref_low_bound (ref);
5844 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5846 if (TREE_CODE (low_sub) == SSA_NAME)
5848 vr = get_value_range (low_sub);
5849 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5851 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5852 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5856 if (vr && vr->type == VR_ANTI_RANGE)
5858 if (TREE_CODE (up_sub) == INTEGER_CST
5859 && tree_int_cst_lt (up_bound, up_sub)
5860 && TREE_CODE (low_sub) == INTEGER_CST
5861 && tree_int_cst_lt (low_sub, low_bound))
5863 warning_at (location, OPT_Warray_bounds,
5864 "array subscript is outside array bounds");
5865 TREE_NO_WARNING (ref) = 1;
5868 else if (TREE_CODE (up_sub) == INTEGER_CST
5869 && (ignore_off_by_one
5870 ? (tree_int_cst_lt (up_bound, up_sub)
5871 && !tree_int_cst_equal (up_bound_p1, up_sub))
5872 : (tree_int_cst_lt (up_bound, up_sub)
5873 || tree_int_cst_equal (up_bound_p1, up_sub))))
5875 warning_at (location, OPT_Warray_bounds,
5876 "array subscript is above array bounds");
5877 TREE_NO_WARNING (ref) = 1;
5879 else if (TREE_CODE (low_sub) == INTEGER_CST
5880 && tree_int_cst_lt (low_sub, low_bound))
5882 warning_at (location, OPT_Warray_bounds,
5883 "array subscript is below array bounds");
5884 TREE_NO_WARNING (ref) = 1;
5888 /* Searches if the expr T, located at LOCATION computes
5889 address of an ARRAY_REF, and call check_array_ref on it. */
5891 static void
5892 search_for_addr_array (tree t, location_t location)
5894 while (TREE_CODE (t) == SSA_NAME)
5896 gimple g = SSA_NAME_DEF_STMT (t);
5898 if (gimple_code (g) != GIMPLE_ASSIGN)
5899 return;
5901 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5902 != GIMPLE_SINGLE_RHS)
5903 return;
5905 t = gimple_assign_rhs1 (g);
5909 /* We are only interested in addresses of ARRAY_REF's. */
5910 if (TREE_CODE (t) != ADDR_EXPR)
5911 return;
5913 /* Check each ARRAY_REFs in the reference chain. */
5916 if (TREE_CODE (t) == ARRAY_REF)
5917 check_array_ref (location, t, true /*ignore_off_by_one*/);
5919 t = TREE_OPERAND (t, 0);
5921 while (handled_component_p (t));
5923 if (TREE_CODE (t) == MEM_REF
5924 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5925 && !TREE_NO_WARNING (t))
5927 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5928 tree low_bound, up_bound, el_sz;
5929 double_int idx;
5930 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5931 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5932 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5933 return;
5935 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5936 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5937 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5938 if (!low_bound
5939 || TREE_CODE (low_bound) != INTEGER_CST
5940 || !up_bound
5941 || TREE_CODE (up_bound) != INTEGER_CST
5942 || !el_sz
5943 || TREE_CODE (el_sz) != INTEGER_CST)
5944 return;
5946 idx = mem_ref_offset (t);
5947 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5948 if (double_int_scmp (idx, double_int_zero) < 0)
5950 warning_at (location, OPT_Warray_bounds,
5951 "array subscript is below array bounds");
5952 TREE_NO_WARNING (t) = 1;
5954 else if (double_int_scmp (idx,
5955 double_int_add
5956 (double_int_add
5957 (tree_to_double_int (up_bound),
5958 double_int_neg
5959 (tree_to_double_int (low_bound))),
5960 double_int_one)) > 0)
5962 warning_at (location, OPT_Warray_bounds,
5963 "array subscript is above array bounds");
5964 TREE_NO_WARNING (t) = 1;
5969 /* walk_tree() callback that checks if *TP is
5970 an ARRAY_REF inside an ADDR_EXPR (in which an array
5971 subscript one outside the valid range is allowed). Call
5972 check_array_ref for each ARRAY_REF found. The location is
5973 passed in DATA. */
5975 static tree
5976 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5978 tree t = *tp;
5979 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5980 location_t location;
5982 if (EXPR_HAS_LOCATION (t))
5983 location = EXPR_LOCATION (t);
5984 else
5986 location_t *locp = (location_t *) wi->info;
5987 location = *locp;
5990 *walk_subtree = TRUE;
5992 if (TREE_CODE (t) == ARRAY_REF)
5993 check_array_ref (location, t, false /*ignore_off_by_one*/);
5995 if (TREE_CODE (t) == MEM_REF
5996 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5997 search_for_addr_array (TREE_OPERAND (t, 0), location);
5999 if (TREE_CODE (t) == ADDR_EXPR)
6000 *walk_subtree = FALSE;
6002 return NULL_TREE;
6005 /* Walk over all statements of all reachable BBs and call check_array_bounds
6006 on them. */
6008 static void
6009 check_all_array_refs (void)
6011 basic_block bb;
6012 gimple_stmt_iterator si;
6014 FOR_EACH_BB (bb)
6016 edge_iterator ei;
6017 edge e;
6018 bool executable = false;
6020 /* Skip blocks that were found to be unreachable. */
6021 FOR_EACH_EDGE (e, ei, bb->preds)
6022 executable |= !!(e->flags & EDGE_EXECUTABLE);
6023 if (!executable)
6024 continue;
6026 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6028 gimple stmt = gsi_stmt (si);
6029 struct walk_stmt_info wi;
6030 if (!gimple_has_location (stmt))
6031 continue;
6033 if (is_gimple_call (stmt))
6035 size_t i;
6036 size_t n = gimple_call_num_args (stmt);
6037 for (i = 0; i < n; i++)
6039 tree arg = gimple_call_arg (stmt, i);
6040 search_for_addr_array (arg, gimple_location (stmt));
6043 else
6045 memset (&wi, 0, sizeof (wi));
6046 wi.info = CONST_CAST (void *, (const void *)
6047 gimple_location_ptr (stmt));
6049 walk_gimple_op (gsi_stmt (si),
6050 check_array_bounds,
6051 &wi);
6057 /* Convert range assertion expressions into the implied copies and
6058 copy propagate away the copies. Doing the trivial copy propagation
6059 here avoids the need to run the full copy propagation pass after
6060 VRP.
6062 FIXME, this will eventually lead to copy propagation removing the
6063 names that had useful range information attached to them. For
6064 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6065 then N_i will have the range [3, +INF].
6067 However, by converting the assertion into the implied copy
6068 operation N_i = N_j, we will then copy-propagate N_j into the uses
6069 of N_i and lose the range information. We may want to hold on to
6070 ASSERT_EXPRs a little while longer as the ranges could be used in
6071 things like jump threading.
6073 The problem with keeping ASSERT_EXPRs around is that passes after
6074 VRP need to handle them appropriately.
6076 Another approach would be to make the range information a first
6077 class property of the SSA_NAME so that it can be queried from
6078 any pass. This is made somewhat more complex by the need for
6079 multiple ranges to be associated with one SSA_NAME. */
6081 static void
6082 remove_range_assertions (void)
6084 basic_block bb;
6085 gimple_stmt_iterator si;
6087 /* Note that the BSI iterator bump happens at the bottom of the
6088 loop and no bump is necessary if we're removing the statement
6089 referenced by the current BSI. */
6090 FOR_EACH_BB (bb)
6091 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6093 gimple stmt = gsi_stmt (si);
6094 gimple use_stmt;
6096 if (is_gimple_assign (stmt)
6097 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6099 tree rhs = gimple_assign_rhs1 (stmt);
6100 tree var;
6101 tree cond = fold (ASSERT_EXPR_COND (rhs));
6102 use_operand_p use_p;
6103 imm_use_iterator iter;
6105 gcc_assert (cond != boolean_false_node);
6107 /* Propagate the RHS into every use of the LHS. */
6108 var = ASSERT_EXPR_VAR (rhs);
6109 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6110 gimple_assign_lhs (stmt))
6111 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6113 SET_USE (use_p, var);
6114 gcc_assert (TREE_CODE (var) == SSA_NAME);
6117 /* And finally, remove the copy, it is not needed. */
6118 gsi_remove (&si, true);
6119 release_defs (stmt);
6121 else
6122 gsi_next (&si);
6127 /* Return true if STMT is interesting for VRP. */
6129 static bool
6130 stmt_interesting_for_vrp (gimple stmt)
6132 if (gimple_code (stmt) == GIMPLE_PHI)
6134 tree res = gimple_phi_result (stmt);
6135 return (!virtual_operand_p (res)
6136 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6137 || POINTER_TYPE_P (TREE_TYPE (res))));
6139 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6141 tree lhs = gimple_get_lhs (stmt);
6143 /* In general, assignments with virtual operands are not useful
6144 for deriving ranges, with the obvious exception of calls to
6145 builtin functions. */
6146 if (lhs && TREE_CODE (lhs) == SSA_NAME
6147 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6148 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6149 && ((is_gimple_call (stmt)
6150 && gimple_call_fndecl (stmt) != NULL_TREE
6151 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6152 || !gimple_vuse (stmt)))
6153 return true;
6155 else if (gimple_code (stmt) == GIMPLE_COND
6156 || gimple_code (stmt) == GIMPLE_SWITCH)
6157 return true;
6159 return false;
6163 /* Initialize local data structures for VRP. */
6165 static void
6166 vrp_initialize (void)
6168 basic_block bb;
6170 values_propagated = false;
6171 num_vr_values = num_ssa_names;
6172 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6173 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6175 FOR_EACH_BB (bb)
6177 gimple_stmt_iterator si;
6179 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6181 gimple phi = gsi_stmt (si);
6182 if (!stmt_interesting_for_vrp (phi))
6184 tree lhs = PHI_RESULT (phi);
6185 set_value_range_to_varying (get_value_range (lhs));
6186 prop_set_simulate_again (phi, false);
6188 else
6189 prop_set_simulate_again (phi, true);
6192 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6194 gimple stmt = gsi_stmt (si);
6196 /* If the statement is a control insn, then we do not
6197 want to avoid simulating the statement once. Failure
6198 to do so means that those edges will never get added. */
6199 if (stmt_ends_bb_p (stmt))
6200 prop_set_simulate_again (stmt, true);
6201 else if (!stmt_interesting_for_vrp (stmt))
6203 ssa_op_iter i;
6204 tree def;
6205 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6206 set_value_range_to_varying (get_value_range (def));
6207 prop_set_simulate_again (stmt, false);
6209 else
6210 prop_set_simulate_again (stmt, true);
6215 /* Return the singleton value-range for NAME or NAME. */
6217 static inline tree
6218 vrp_valueize (tree name)
6220 if (TREE_CODE (name) == SSA_NAME)
6222 value_range_t *vr = get_value_range (name);
6223 if (vr->type == VR_RANGE
6224 && (vr->min == vr->max
6225 || operand_equal_p (vr->min, vr->max, 0)))
6226 return vr->min;
6228 return name;
6231 /* Visit assignment STMT. If it produces an interesting range, record
6232 the SSA name in *OUTPUT_P. */
6234 static enum ssa_prop_result
6235 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6237 tree def, lhs;
6238 ssa_op_iter iter;
6239 enum gimple_code code = gimple_code (stmt);
6240 lhs = gimple_get_lhs (stmt);
6242 /* We only keep track of ranges in integral and pointer types. */
6243 if (TREE_CODE (lhs) == SSA_NAME
6244 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6245 /* It is valid to have NULL MIN/MAX values on a type. See
6246 build_range_type. */
6247 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6248 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6249 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6251 value_range_t new_vr = VR_INITIALIZER;
6253 /* Try folding the statement to a constant first. */
6254 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6255 if (tem && !is_overflow_infinity (tem))
6256 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6257 /* Then dispatch to value-range extracting functions. */
6258 else if (code == GIMPLE_CALL)
6259 extract_range_basic (&new_vr, stmt);
6260 else
6261 extract_range_from_assignment (&new_vr, stmt);
6263 if (update_value_range (lhs, &new_vr))
6265 *output_p = lhs;
6267 if (dump_file && (dump_flags & TDF_DETAILS))
6269 fprintf (dump_file, "Found new range for ");
6270 print_generic_expr (dump_file, lhs, 0);
6271 fprintf (dump_file, ": ");
6272 dump_value_range (dump_file, &new_vr);
6273 fprintf (dump_file, "\n\n");
6276 if (new_vr.type == VR_VARYING)
6277 return SSA_PROP_VARYING;
6279 return SSA_PROP_INTERESTING;
6282 return SSA_PROP_NOT_INTERESTING;
6285 /* Every other statement produces no useful ranges. */
6286 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6287 set_value_range_to_varying (get_value_range (def));
6289 return SSA_PROP_VARYING;
6292 /* Helper that gets the value range of the SSA_NAME with version I
6293 or a symbolic range containing the SSA_NAME only if the value range
6294 is varying or undefined. */
6296 static inline value_range_t
6297 get_vr_for_comparison (int i)
6299 value_range_t vr = *get_value_range (ssa_name (i));
6301 /* If name N_i does not have a valid range, use N_i as its own
6302 range. This allows us to compare against names that may
6303 have N_i in their ranges. */
6304 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6306 vr.type = VR_RANGE;
6307 vr.min = ssa_name (i);
6308 vr.max = ssa_name (i);
6311 return vr;
6314 /* Compare all the value ranges for names equivalent to VAR with VAL
6315 using comparison code COMP. Return the same value returned by
6316 compare_range_with_value, including the setting of
6317 *STRICT_OVERFLOW_P. */
6319 static tree
6320 compare_name_with_value (enum tree_code comp, tree var, tree val,
6321 bool *strict_overflow_p)
6323 bitmap_iterator bi;
6324 unsigned i;
6325 bitmap e;
6326 tree retval, t;
6327 int used_strict_overflow;
6328 bool sop;
6329 value_range_t equiv_vr;
6331 /* Get the set of equivalences for VAR. */
6332 e = get_value_range (var)->equiv;
6334 /* Start at -1. Set it to 0 if we do a comparison without relying
6335 on overflow, or 1 if all comparisons rely on overflow. */
6336 used_strict_overflow = -1;
6338 /* Compare vars' value range with val. */
6339 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6340 sop = false;
6341 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6342 if (retval)
6343 used_strict_overflow = sop ? 1 : 0;
6345 /* If the equiv set is empty we have done all work we need to do. */
6346 if (e == NULL)
6348 if (retval
6349 && used_strict_overflow > 0)
6350 *strict_overflow_p = true;
6351 return retval;
6354 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6356 equiv_vr = get_vr_for_comparison (i);
6357 sop = false;
6358 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6359 if (t)
6361 /* If we get different answers from different members
6362 of the equivalence set this check must be in a dead
6363 code region. Folding it to a trap representation
6364 would be correct here. For now just return don't-know. */
6365 if (retval != NULL
6366 && t != retval)
6368 retval = NULL_TREE;
6369 break;
6371 retval = t;
6373 if (!sop)
6374 used_strict_overflow = 0;
6375 else if (used_strict_overflow < 0)
6376 used_strict_overflow = 1;
6380 if (retval
6381 && used_strict_overflow > 0)
6382 *strict_overflow_p = true;
6384 return retval;
6388 /* Given a comparison code COMP and names N1 and N2, compare all the
6389 ranges equivalent to N1 against all the ranges equivalent to N2
6390 to determine the value of N1 COMP N2. Return the same value
6391 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6392 whether we relied on an overflow infinity in the comparison. */
6395 static tree
6396 compare_names (enum tree_code comp, tree n1, tree n2,
6397 bool *strict_overflow_p)
6399 tree t, retval;
6400 bitmap e1, e2;
6401 bitmap_iterator bi1, bi2;
6402 unsigned i1, i2;
6403 int used_strict_overflow;
6404 static bitmap_obstack *s_obstack = NULL;
6405 static bitmap s_e1 = NULL, s_e2 = NULL;
6407 /* Compare the ranges of every name equivalent to N1 against the
6408 ranges of every name equivalent to N2. */
6409 e1 = get_value_range (n1)->equiv;
6410 e2 = get_value_range (n2)->equiv;
6412 /* Use the fake bitmaps if e1 or e2 are not available. */
6413 if (s_obstack == NULL)
6415 s_obstack = XNEW (bitmap_obstack);
6416 bitmap_obstack_initialize (s_obstack);
6417 s_e1 = BITMAP_ALLOC (s_obstack);
6418 s_e2 = BITMAP_ALLOC (s_obstack);
6420 if (e1 == NULL)
6421 e1 = s_e1;
6422 if (e2 == NULL)
6423 e2 = s_e2;
6425 /* Add N1 and N2 to their own set of equivalences to avoid
6426 duplicating the body of the loop just to check N1 and N2
6427 ranges. */
6428 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6429 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6431 /* If the equivalence sets have a common intersection, then the two
6432 names can be compared without checking their ranges. */
6433 if (bitmap_intersect_p (e1, e2))
6435 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6436 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6438 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6439 ? boolean_true_node
6440 : boolean_false_node;
6443 /* Start at -1. Set it to 0 if we do a comparison without relying
6444 on overflow, or 1 if all comparisons rely on overflow. */
6445 used_strict_overflow = -1;
6447 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6448 N2 to their own set of equivalences to avoid duplicating the body
6449 of the loop just to check N1 and N2 ranges. */
6450 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6452 value_range_t vr1 = get_vr_for_comparison (i1);
6454 t = retval = NULL_TREE;
6455 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6457 bool sop = false;
6459 value_range_t vr2 = get_vr_for_comparison (i2);
6461 t = compare_ranges (comp, &vr1, &vr2, &sop);
6462 if (t)
6464 /* If we get different answers from different members
6465 of the equivalence set this check must be in a dead
6466 code region. Folding it to a trap representation
6467 would be correct here. For now just return don't-know. */
6468 if (retval != NULL
6469 && t != retval)
6471 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6472 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6473 return NULL_TREE;
6475 retval = t;
6477 if (!sop)
6478 used_strict_overflow = 0;
6479 else if (used_strict_overflow < 0)
6480 used_strict_overflow = 1;
6484 if (retval)
6486 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6487 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6488 if (used_strict_overflow > 0)
6489 *strict_overflow_p = true;
6490 return retval;
6494 /* None of the equivalent ranges are useful in computing this
6495 comparison. */
6496 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6497 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6498 return NULL_TREE;
6501 /* Helper function for vrp_evaluate_conditional_warnv. */
6503 static tree
6504 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6505 tree op0, tree op1,
6506 bool * strict_overflow_p)
6508 value_range_t *vr0, *vr1;
6510 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6511 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6513 if (vr0 && vr1)
6514 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6515 else if (vr0 && vr1 == NULL)
6516 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6517 else if (vr0 == NULL && vr1)
6518 return (compare_range_with_value
6519 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6520 return NULL;
6523 /* Helper function for vrp_evaluate_conditional_warnv. */
6525 static tree
6526 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6527 tree op1, bool use_equiv_p,
6528 bool *strict_overflow_p, bool *only_ranges)
6530 tree ret;
6531 if (only_ranges)
6532 *only_ranges = true;
6534 /* We only deal with integral and pointer types. */
6535 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6536 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6537 return NULL_TREE;
6539 if (use_equiv_p)
6541 if (only_ranges
6542 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6543 (code, op0, op1, strict_overflow_p)))
6544 return ret;
6545 *only_ranges = false;
6546 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6547 return compare_names (code, op0, op1, strict_overflow_p);
6548 else if (TREE_CODE (op0) == SSA_NAME)
6549 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6550 else if (TREE_CODE (op1) == SSA_NAME)
6551 return (compare_name_with_value
6552 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6554 else
6555 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6556 strict_overflow_p);
6557 return NULL_TREE;
6560 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6561 information. Return NULL if the conditional can not be evaluated.
6562 The ranges of all the names equivalent with the operands in COND
6563 will be used when trying to compute the value. If the result is
6564 based on undefined signed overflow, issue a warning if
6565 appropriate. */
6567 static tree
6568 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6570 bool sop;
6571 tree ret;
6572 bool only_ranges;
6574 /* Some passes and foldings leak constants with overflow flag set
6575 into the IL. Avoid doing wrong things with these and bail out. */
6576 if ((TREE_CODE (op0) == INTEGER_CST
6577 && TREE_OVERFLOW (op0))
6578 || (TREE_CODE (op1) == INTEGER_CST
6579 && TREE_OVERFLOW (op1)))
6580 return NULL_TREE;
6582 sop = false;
6583 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6584 &only_ranges);
6586 if (ret && sop)
6588 enum warn_strict_overflow_code wc;
6589 const char* warnmsg;
6591 if (is_gimple_min_invariant (ret))
6593 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6594 warnmsg = G_("assuming signed overflow does not occur when "
6595 "simplifying conditional to constant");
6597 else
6599 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6600 warnmsg = G_("assuming signed overflow does not occur when "
6601 "simplifying conditional");
6604 if (issue_strict_overflow_warning (wc))
6606 location_t location;
6608 if (!gimple_has_location (stmt))
6609 location = input_location;
6610 else
6611 location = gimple_location (stmt);
6612 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6616 if (warn_type_limits
6617 && ret && only_ranges
6618 && TREE_CODE_CLASS (code) == tcc_comparison
6619 && TREE_CODE (op0) == SSA_NAME)
6621 /* If the comparison is being folded and the operand on the LHS
6622 is being compared against a constant value that is outside of
6623 the natural range of OP0's type, then the predicate will
6624 always fold regardless of the value of OP0. If -Wtype-limits
6625 was specified, emit a warning. */
6626 tree type = TREE_TYPE (op0);
6627 value_range_t *vr0 = get_value_range (op0);
6629 if (vr0->type != VR_VARYING
6630 && INTEGRAL_TYPE_P (type)
6631 && vrp_val_is_min (vr0->min)
6632 && vrp_val_is_max (vr0->max)
6633 && is_gimple_min_invariant (op1))
6635 location_t location;
6637 if (!gimple_has_location (stmt))
6638 location = input_location;
6639 else
6640 location = gimple_location (stmt);
6642 warning_at (location, OPT_Wtype_limits,
6643 integer_zerop (ret)
6644 ? G_("comparison always false "
6645 "due to limited range of data type")
6646 : G_("comparison always true "
6647 "due to limited range of data type"));
6651 return ret;
6655 /* Visit conditional statement STMT. If we can determine which edge
6656 will be taken out of STMT's basic block, record it in
6657 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6658 SSA_PROP_VARYING. */
6660 static enum ssa_prop_result
6661 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6663 tree val;
6664 bool sop;
6666 *taken_edge_p = NULL;
6668 if (dump_file && (dump_flags & TDF_DETAILS))
6670 tree use;
6671 ssa_op_iter i;
6673 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6674 print_gimple_stmt (dump_file, stmt, 0, 0);
6675 fprintf (dump_file, "\nWith known ranges\n");
6677 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6679 fprintf (dump_file, "\t");
6680 print_generic_expr (dump_file, use, 0);
6681 fprintf (dump_file, ": ");
6682 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6685 fprintf (dump_file, "\n");
6688 /* Compute the value of the predicate COND by checking the known
6689 ranges of each of its operands.
6691 Note that we cannot evaluate all the equivalent ranges here
6692 because those ranges may not yet be final and with the current
6693 propagation strategy, we cannot determine when the value ranges
6694 of the names in the equivalence set have changed.
6696 For instance, given the following code fragment
6698 i_5 = PHI <8, i_13>
6700 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6701 if (i_14 == 1)
6704 Assume that on the first visit to i_14, i_5 has the temporary
6705 range [8, 8] because the second argument to the PHI function is
6706 not yet executable. We derive the range ~[0, 0] for i_14 and the
6707 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6708 the first time, since i_14 is equivalent to the range [8, 8], we
6709 determine that the predicate is always false.
6711 On the next round of propagation, i_13 is determined to be
6712 VARYING, which causes i_5 to drop down to VARYING. So, another
6713 visit to i_14 is scheduled. In this second visit, we compute the
6714 exact same range and equivalence set for i_14, namely ~[0, 0] and
6715 { i_5 }. But we did not have the previous range for i_5
6716 registered, so vrp_visit_assignment thinks that the range for
6717 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6718 is not visited again, which stops propagation from visiting
6719 statements in the THEN clause of that if().
6721 To properly fix this we would need to keep the previous range
6722 value for the names in the equivalence set. This way we would've
6723 discovered that from one visit to the other i_5 changed from
6724 range [8, 8] to VR_VARYING.
6726 However, fixing this apparent limitation may not be worth the
6727 additional checking. Testing on several code bases (GCC, DLV,
6728 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6729 4 more predicates folded in SPEC. */
6730 sop = false;
6732 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6733 gimple_cond_lhs (stmt),
6734 gimple_cond_rhs (stmt),
6735 false, &sop, NULL);
6736 if (val)
6738 if (!sop)
6739 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6740 else
6742 if (dump_file && (dump_flags & TDF_DETAILS))
6743 fprintf (dump_file,
6744 "\nIgnoring predicate evaluation because "
6745 "it assumes that signed overflow is undefined");
6746 val = NULL_TREE;
6750 if (dump_file && (dump_flags & TDF_DETAILS))
6752 fprintf (dump_file, "\nPredicate evaluates to: ");
6753 if (val == NULL_TREE)
6754 fprintf (dump_file, "DON'T KNOW\n");
6755 else
6756 print_generic_stmt (dump_file, val, 0);
6759 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6762 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6763 that includes the value VAL. The search is restricted to the range
6764 [START_IDX, n - 1] where n is the size of VEC.
6766 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6767 returned.
6769 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6770 it is placed in IDX and false is returned.
6772 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6773 returned. */
6775 static bool
6776 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6778 size_t n = gimple_switch_num_labels (stmt);
6779 size_t low, high;
6781 /* Find case label for minimum of the value range or the next one.
6782 At each iteration we are searching in [low, high - 1]. */
6784 for (low = start_idx, high = n; high != low; )
6786 tree t;
6787 int cmp;
6788 /* Note that i != high, so we never ask for n. */
6789 size_t i = (high + low) / 2;
6790 t = gimple_switch_label (stmt, i);
6792 /* Cache the result of comparing CASE_LOW and val. */
6793 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6795 if (cmp == 0)
6797 /* Ranges cannot be empty. */
6798 *idx = i;
6799 return true;
6801 else if (cmp > 0)
6802 high = i;
6803 else
6805 low = i + 1;
6806 if (CASE_HIGH (t) != NULL
6807 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6809 *idx = i;
6810 return true;
6815 *idx = high;
6816 return false;
6819 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6820 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6821 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6822 then MAX_IDX < MIN_IDX.
6823 Returns true if the default label is not needed. */
6825 static bool
6826 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6827 size_t *max_idx)
6829 size_t i, j;
6830 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6831 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6833 if (i == j
6834 && min_take_default
6835 && max_take_default)
6837 /* Only the default case label reached.
6838 Return an empty range. */
6839 *min_idx = 1;
6840 *max_idx = 0;
6841 return false;
6843 else
6845 bool take_default = min_take_default || max_take_default;
6846 tree low, high;
6847 size_t k;
6849 if (max_take_default)
6850 j--;
6852 /* If the case label range is continuous, we do not need
6853 the default case label. Verify that. */
6854 high = CASE_LOW (gimple_switch_label (stmt, i));
6855 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6856 high = CASE_HIGH (gimple_switch_label (stmt, i));
6857 for (k = i + 1; k <= j; ++k)
6859 low = CASE_LOW (gimple_switch_label (stmt, k));
6860 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6862 take_default = true;
6863 break;
6865 high = low;
6866 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6867 high = CASE_HIGH (gimple_switch_label (stmt, k));
6870 *min_idx = i;
6871 *max_idx = j;
6872 return !take_default;
6876 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
6877 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
6878 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
6879 Returns true if the default label is not needed. */
6881 static bool
6882 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
6883 size_t *max_idx1, size_t *min_idx2,
6884 size_t *max_idx2)
6886 size_t i, j, k, l;
6887 unsigned int n = gimple_switch_num_labels (stmt);
6888 bool take_default;
6889 tree case_low, case_high;
6890 tree min = vr->min, max = vr->max;
6892 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
6894 take_default = !find_case_label_range (stmt, min, max, &i, &j);
6896 /* Set second range to emtpy. */
6897 *min_idx2 = 1;
6898 *max_idx2 = 0;
6900 if (vr->type == VR_RANGE)
6902 *min_idx1 = i;
6903 *max_idx1 = j;
6904 return !take_default;
6907 /* Set first range to all case labels. */
6908 *min_idx1 = 1;
6909 *max_idx1 = n - 1;
6911 if (i > j)
6912 return false;
6914 /* Make sure all the values of case labels [i , j] are contained in
6915 range [MIN, MAX]. */
6916 case_low = CASE_LOW (gimple_switch_label (stmt, i));
6917 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
6918 if (tree_int_cst_compare (case_low, min) < 0)
6919 i += 1;
6920 if (case_high != NULL_TREE
6921 && tree_int_cst_compare (max, case_high) < 0)
6922 j -= 1;
6924 if (i > j)
6925 return false;
6927 /* If the range spans case labels [i, j], the corresponding anti-range spans
6928 the labels [1, i - 1] and [j + 1, n - 1]. */
6929 k = j + 1;
6930 l = n - 1;
6931 if (k > l)
6933 k = 1;
6934 l = 0;
6937 j = i - 1;
6938 i = 1;
6939 if (i > j)
6941 i = k;
6942 j = l;
6943 k = 1;
6944 l = 0;
6947 *min_idx1 = i;
6948 *max_idx1 = j;
6949 *min_idx2 = k;
6950 *max_idx2 = l;
6951 return false;
6954 /* Visit switch statement STMT. If we can determine which edge
6955 will be taken out of STMT's basic block, record it in
6956 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6957 SSA_PROP_VARYING. */
6959 static enum ssa_prop_result
6960 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6962 tree op, val;
6963 value_range_t *vr;
6964 size_t i = 0, j = 0, k, l;
6965 bool take_default;
6967 *taken_edge_p = NULL;
6968 op = gimple_switch_index (stmt);
6969 if (TREE_CODE (op) != SSA_NAME)
6970 return SSA_PROP_VARYING;
6972 vr = get_value_range (op);
6973 if (dump_file && (dump_flags & TDF_DETAILS))
6975 fprintf (dump_file, "\nVisiting switch expression with operand ");
6976 print_generic_expr (dump_file, op, 0);
6977 fprintf (dump_file, " with known range ");
6978 dump_value_range (dump_file, vr);
6979 fprintf (dump_file, "\n");
6982 if ((vr->type != VR_RANGE
6983 && vr->type != VR_ANTI_RANGE)
6984 || symbolic_range_p (vr))
6985 return SSA_PROP_VARYING;
6987 /* Find the single edge that is taken from the switch expression. */
6988 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
6990 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6991 label */
6992 if (j < i)
6994 gcc_assert (take_default);
6995 val = gimple_switch_default_label (stmt);
6997 else
6999 /* Check if labels with index i to j and maybe the default label
7000 are all reaching the same label. */
7002 val = gimple_switch_label (stmt, i);
7003 if (take_default
7004 && CASE_LABEL (gimple_switch_default_label (stmt))
7005 != CASE_LABEL (val))
7007 if (dump_file && (dump_flags & TDF_DETAILS))
7008 fprintf (dump_file, " not a single destination for this "
7009 "range\n");
7010 return SSA_PROP_VARYING;
7012 for (++i; i <= j; ++i)
7014 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7016 if (dump_file && (dump_flags & TDF_DETAILS))
7017 fprintf (dump_file, " not a single destination for this "
7018 "range\n");
7019 return SSA_PROP_VARYING;
7022 for (; k <= l; ++k)
7024 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7026 if (dump_file && (dump_flags & TDF_DETAILS))
7027 fprintf (dump_file, " not a single destination for this "
7028 "range\n");
7029 return SSA_PROP_VARYING;
7034 *taken_edge_p = find_edge (gimple_bb (stmt),
7035 label_to_block (CASE_LABEL (val)));
7037 if (dump_file && (dump_flags & TDF_DETAILS))
7039 fprintf (dump_file, " will take edge to ");
7040 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7043 return SSA_PROP_INTERESTING;
7047 /* Evaluate statement STMT. If the statement produces a useful range,
7048 return SSA_PROP_INTERESTING and record the SSA name with the
7049 interesting range into *OUTPUT_P.
7051 If STMT is a conditional branch and we can determine its truth
7052 value, the taken edge is recorded in *TAKEN_EDGE_P.
7054 If STMT produces a varying value, return SSA_PROP_VARYING. */
7056 static enum ssa_prop_result
7057 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7059 tree def;
7060 ssa_op_iter iter;
7062 if (dump_file && (dump_flags & TDF_DETAILS))
7064 fprintf (dump_file, "\nVisiting statement:\n");
7065 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7066 fprintf (dump_file, "\n");
7069 if (!stmt_interesting_for_vrp (stmt))
7070 gcc_assert (stmt_ends_bb_p (stmt));
7071 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7073 /* In general, assignments with virtual operands are not useful
7074 for deriving ranges, with the obvious exception of calls to
7075 builtin functions. */
7076 if ((is_gimple_call (stmt)
7077 && gimple_call_fndecl (stmt) != NULL_TREE
7078 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
7079 || !gimple_vuse (stmt))
7080 return vrp_visit_assignment_or_call (stmt, output_p);
7082 else if (gimple_code (stmt) == GIMPLE_COND)
7083 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7084 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7085 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7087 /* All other statements produce nothing of interest for VRP, so mark
7088 their outputs varying and prevent further simulation. */
7089 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7090 set_value_range_to_varying (get_value_range (def));
7092 return SSA_PROP_VARYING;
7095 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7096 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7097 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7098 possible such range. The resulting range is not canonicalized. */
7100 static void
7101 union_ranges (enum value_range_type *vr0type,
7102 tree *vr0min, tree *vr0max,
7103 enum value_range_type vr1type,
7104 tree vr1min, tree vr1max)
7106 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7107 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7109 /* [] is vr0, () is vr1 in the following classification comments. */
7110 if (mineq && maxeq)
7112 /* [( )] */
7113 if (*vr0type == vr1type)
7114 /* Nothing to do for equal ranges. */
7116 else if ((*vr0type == VR_RANGE
7117 && vr1type == VR_ANTI_RANGE)
7118 || (*vr0type == VR_ANTI_RANGE
7119 && vr1type == VR_RANGE))
7121 /* For anti-range with range union the result is varying. */
7122 goto give_up;
7124 else
7125 gcc_unreachable ();
7127 else if (operand_less_p (*vr0max, vr1min) == 1
7128 || operand_less_p (vr1max, *vr0min) == 1)
7130 /* [ ] ( ) or ( ) [ ]
7131 If the ranges have an empty intersection, result of the union
7132 operation is the anti-range or if both are anti-ranges
7133 it covers all. */
7134 if (*vr0type == VR_ANTI_RANGE
7135 && vr1type == VR_ANTI_RANGE)
7136 goto give_up;
7137 else if (*vr0type == VR_ANTI_RANGE
7138 && vr1type == VR_RANGE)
7140 else if (*vr0type == VR_RANGE
7141 && vr1type == VR_ANTI_RANGE)
7143 *vr0type = vr1type;
7144 *vr0min = vr1min;
7145 *vr0max = vr1max;
7147 else if (*vr0type == VR_RANGE
7148 && vr1type == VR_RANGE)
7150 /* The result is the convex hull of both ranges. */
7151 if (operand_less_p (*vr0max, vr1min) == 1)
7153 /* If the result can be an anti-range, create one. */
7154 if (TREE_CODE (*vr0max) == INTEGER_CST
7155 && TREE_CODE (vr1min) == INTEGER_CST
7156 && vrp_val_is_min (*vr0min)
7157 && vrp_val_is_max (vr1max))
7159 tree min = int_const_binop (PLUS_EXPR,
7160 *vr0max, integer_one_node);
7161 tree max = int_const_binop (MINUS_EXPR,
7162 vr1min, integer_one_node);
7163 if (!operand_less_p (max, min))
7165 *vr0type = VR_ANTI_RANGE;
7166 *vr0min = min;
7167 *vr0max = max;
7169 else
7170 *vr0max = vr1max;
7172 else
7173 *vr0max = vr1max;
7175 else
7177 /* If the result can be an anti-range, create one. */
7178 if (TREE_CODE (vr1max) == INTEGER_CST
7179 && TREE_CODE (*vr0min) == INTEGER_CST
7180 && vrp_val_is_min (vr1min)
7181 && vrp_val_is_max (*vr0max))
7183 tree min = int_const_binop (PLUS_EXPR,
7184 vr1max, integer_one_node);
7185 tree max = int_const_binop (MINUS_EXPR,
7186 *vr0min, integer_one_node);
7187 if (!operand_less_p (max, min))
7189 *vr0type = VR_ANTI_RANGE;
7190 *vr0min = min;
7191 *vr0max = max;
7193 else
7194 *vr0min = vr1min;
7196 else
7197 *vr0min = vr1min;
7200 else
7201 gcc_unreachable ();
7203 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7204 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7206 /* [ ( ) ] or [( ) ] or [ ( )] */
7207 if (*vr0type == VR_RANGE
7208 && vr1type == VR_RANGE)
7210 else if (*vr0type == VR_ANTI_RANGE
7211 && vr1type == VR_ANTI_RANGE)
7213 *vr0type = vr1type;
7214 *vr0min = vr1min;
7215 *vr0max = vr1max;
7217 else if (*vr0type == VR_ANTI_RANGE
7218 && vr1type == VR_RANGE)
7220 /* Arbitrarily choose the right or left gap. */
7221 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7222 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7223 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7224 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7225 else
7226 goto give_up;
7228 else if (*vr0type == VR_RANGE
7229 && vr1type == VR_ANTI_RANGE)
7230 /* The result covers everything. */
7231 goto give_up;
7232 else
7233 gcc_unreachable ();
7235 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7236 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7238 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7239 if (*vr0type == VR_RANGE
7240 && vr1type == VR_RANGE)
7242 *vr0type = vr1type;
7243 *vr0min = vr1min;
7244 *vr0max = vr1max;
7246 else if (*vr0type == VR_ANTI_RANGE
7247 && vr1type == VR_ANTI_RANGE)
7249 else if (*vr0type == VR_RANGE
7250 && vr1type == VR_ANTI_RANGE)
7252 *vr0type = VR_ANTI_RANGE;
7253 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7255 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7256 *vr0min = vr1min;
7258 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7260 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7261 *vr0max = vr1max;
7263 else
7264 goto give_up;
7266 else if (*vr0type == VR_ANTI_RANGE
7267 && vr1type == VR_RANGE)
7268 /* The result covers everything. */
7269 goto give_up;
7270 else
7271 gcc_unreachable ();
7273 else if ((operand_less_p (vr1min, *vr0max) == 1
7274 || operand_equal_p (vr1min, *vr0max, 0))
7275 && operand_less_p (*vr0min, vr1min) == 1)
7277 /* [ ( ] ) or [ ]( ) */
7278 if (*vr0type == VR_RANGE
7279 && vr1type == VR_RANGE)
7280 *vr0max = vr1max;
7281 else if (*vr0type == VR_ANTI_RANGE
7282 && vr1type == VR_ANTI_RANGE)
7283 *vr0min = vr1min;
7284 else if (*vr0type == VR_ANTI_RANGE
7285 && vr1type == VR_RANGE)
7287 if (TREE_CODE (vr1min) == INTEGER_CST)
7288 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7289 else
7290 goto give_up;
7292 else if (*vr0type == VR_RANGE
7293 && vr1type == VR_ANTI_RANGE)
7295 if (TREE_CODE (*vr0max) == INTEGER_CST)
7297 *vr0type = vr1type;
7298 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7299 *vr0max = vr1max;
7301 else
7302 goto give_up;
7304 else
7305 gcc_unreachable ();
7307 else if ((operand_less_p (*vr0min, vr1max) == 1
7308 || operand_equal_p (*vr0min, vr1max, 0))
7309 && operand_less_p (vr1min, *vr0min) == 1)
7311 /* ( [ ) ] or ( )[ ] */
7312 if (*vr0type == VR_RANGE
7313 && vr1type == VR_RANGE)
7314 *vr0min = vr1min;
7315 else if (*vr0type == VR_ANTI_RANGE
7316 && vr1type == VR_ANTI_RANGE)
7317 *vr0max = vr1max;
7318 else if (*vr0type == VR_ANTI_RANGE
7319 && vr1type == VR_RANGE)
7321 if (TREE_CODE (vr1max) == INTEGER_CST)
7322 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7323 else
7324 goto give_up;
7326 else if (*vr0type == VR_RANGE
7327 && vr1type == VR_ANTI_RANGE)
7329 if (TREE_CODE (*vr0min) == INTEGER_CST)
7331 *vr0type = vr1type;
7332 *vr0min = vr1min;
7333 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7335 else
7336 goto give_up;
7338 else
7339 gcc_unreachable ();
7341 else
7342 goto give_up;
7344 return;
7346 give_up:
7347 *vr0type = VR_VARYING;
7348 *vr0min = NULL_TREE;
7349 *vr0max = NULL_TREE;
7352 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7353 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7354 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7355 possible such range. The resulting range is not canonicalized. */
7357 static void
7358 intersect_ranges (enum value_range_type *vr0type,
7359 tree *vr0min, tree *vr0max,
7360 enum value_range_type vr1type,
7361 tree vr1min, tree vr1max)
7363 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7364 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7366 /* [] is vr0, () is vr1 in the following classification comments. */
7367 if (mineq && maxeq)
7369 /* [( )] */
7370 if (*vr0type == vr1type)
7371 /* Nothing to do for equal ranges. */
7373 else if ((*vr0type == VR_RANGE
7374 && vr1type == VR_ANTI_RANGE)
7375 || (*vr0type == VR_ANTI_RANGE
7376 && vr1type == VR_RANGE))
7378 /* For anti-range with range intersection the result is empty. */
7379 *vr0type = VR_UNDEFINED;
7380 *vr0min = NULL_TREE;
7381 *vr0max = NULL_TREE;
7383 else
7384 gcc_unreachable ();
7386 else if (operand_less_p (*vr0max, vr1min) == 1
7387 || operand_less_p (vr1max, *vr0min) == 1)
7389 /* [ ] ( ) or ( ) [ ]
7390 If the ranges have an empty intersection, the result of the
7391 intersect operation is the range for intersecting an
7392 anti-range with a range or empty when intersecting two ranges. */
7393 if (*vr0type == VR_RANGE
7394 && vr1type == VR_ANTI_RANGE)
7396 else if (*vr0type == VR_ANTI_RANGE
7397 && vr1type == VR_RANGE)
7399 *vr0type = vr1type;
7400 *vr0min = vr1min;
7401 *vr0max = vr1max;
7403 else if (*vr0type == VR_RANGE
7404 && vr1type == VR_RANGE)
7406 *vr0type = VR_UNDEFINED;
7407 *vr0min = NULL_TREE;
7408 *vr0max = NULL_TREE;
7410 else if (*vr0type == VR_ANTI_RANGE
7411 && vr1type == VR_ANTI_RANGE)
7413 /* If the anti-ranges are adjacent to each other merge them. */
7414 if (TREE_CODE (*vr0max) == INTEGER_CST
7415 && TREE_CODE (vr1min) == INTEGER_CST
7416 && operand_less_p (*vr0max, vr1min) == 1
7417 && integer_onep (int_const_binop (MINUS_EXPR,
7418 vr1min, *vr0max)))
7419 *vr0max = vr1max;
7420 else if (TREE_CODE (vr1max) == INTEGER_CST
7421 && TREE_CODE (*vr0min) == INTEGER_CST
7422 && operand_less_p (vr1max, *vr0min) == 1
7423 && integer_onep (int_const_binop (MINUS_EXPR,
7424 *vr0min, vr1max)))
7425 *vr0min = vr1min;
7426 /* Else arbitrarily take VR0. */
7429 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7430 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7432 /* [ ( ) ] or [( ) ] or [ ( )] */
7433 if (*vr0type == VR_RANGE
7434 && vr1type == VR_RANGE)
7436 /* If both are ranges the result is the inner one. */
7437 *vr0type = vr1type;
7438 *vr0min = vr1min;
7439 *vr0max = vr1max;
7441 else if (*vr0type == VR_RANGE
7442 && vr1type == VR_ANTI_RANGE)
7444 /* Choose the right gap if the left one is empty. */
7445 if (mineq)
7447 if (TREE_CODE (vr1max) == INTEGER_CST)
7448 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7449 else
7450 *vr0min = vr1max;
7452 /* Choose the left gap if the right one is empty. */
7453 else if (maxeq)
7455 if (TREE_CODE (vr1min) == INTEGER_CST)
7456 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7457 integer_one_node);
7458 else
7459 *vr0max = vr1min;
7461 /* Choose the anti-range if the range is effectively varying. */
7462 else if (vrp_val_is_min (*vr0min)
7463 && vrp_val_is_max (*vr0max))
7465 *vr0type = vr1type;
7466 *vr0min = vr1min;
7467 *vr0max = vr1max;
7469 /* Else choose the range. */
7471 else if (*vr0type == VR_ANTI_RANGE
7472 && vr1type == VR_ANTI_RANGE)
7473 /* If both are anti-ranges the result is the outer one. */
7475 else if (*vr0type == VR_ANTI_RANGE
7476 && vr1type == VR_RANGE)
7478 /* The intersection is empty. */
7479 *vr0type = VR_UNDEFINED;
7480 *vr0min = NULL_TREE;
7481 *vr0max = NULL_TREE;
7483 else
7484 gcc_unreachable ();
7486 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7487 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7489 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7490 if (*vr0type == VR_RANGE
7491 && vr1type == VR_RANGE)
7492 /* Choose the inner range. */
7494 else if (*vr0type == VR_ANTI_RANGE
7495 && vr1type == VR_RANGE)
7497 /* Choose the right gap if the left is empty. */
7498 if (mineq)
7500 *vr0type = VR_RANGE;
7501 if (TREE_CODE (*vr0max) == INTEGER_CST)
7502 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7503 integer_one_node);
7504 else
7505 *vr0min = *vr0max;
7506 *vr0max = vr1max;
7508 /* Choose the left gap if the right is empty. */
7509 else if (maxeq)
7511 *vr0type = VR_RANGE;
7512 if (TREE_CODE (*vr0min) == INTEGER_CST)
7513 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7514 integer_one_node);
7515 else
7516 *vr0max = *vr0min;
7517 *vr0min = vr1min;
7519 /* Choose the anti-range if the range is effectively varying. */
7520 else if (vrp_val_is_min (vr1min)
7521 && vrp_val_is_max (vr1max))
7523 /* Else choose the range. */
7524 else
7526 *vr0type = vr1type;
7527 *vr0min = vr1min;
7528 *vr0max = vr1max;
7531 else if (*vr0type == VR_ANTI_RANGE
7532 && vr1type == VR_ANTI_RANGE)
7534 /* If both are anti-ranges the result is the outer one. */
7535 *vr0type = vr1type;
7536 *vr0min = vr1min;
7537 *vr0max = vr1max;
7539 else if (vr1type == VR_ANTI_RANGE
7540 && *vr0type == VR_RANGE)
7542 /* The intersection is empty. */
7543 *vr0type = VR_UNDEFINED;
7544 *vr0min = NULL_TREE;
7545 *vr0max = NULL_TREE;
7547 else
7548 gcc_unreachable ();
7550 else if ((operand_less_p (vr1min, *vr0max) == 1
7551 || operand_equal_p (vr1min, *vr0max, 0))
7552 && operand_less_p (*vr0min, vr1min) == 1)
7554 /* [ ( ] ) or [ ]( ) */
7555 if (*vr0type == VR_ANTI_RANGE
7556 && vr1type == VR_ANTI_RANGE)
7557 *vr0max = vr1max;
7558 else if (*vr0type == VR_RANGE
7559 && vr1type == VR_RANGE)
7560 *vr0min = vr1min;
7561 else if (*vr0type == VR_RANGE
7562 && vr1type == VR_ANTI_RANGE)
7564 if (TREE_CODE (vr1min) == INTEGER_CST)
7565 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7566 integer_one_node);
7567 else
7568 *vr0max = vr1min;
7570 else if (*vr0type == VR_ANTI_RANGE
7571 && vr1type == VR_RANGE)
7573 *vr0type = VR_RANGE;
7574 if (TREE_CODE (*vr0max) == INTEGER_CST)
7575 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7576 integer_one_node);
7577 else
7578 *vr0min = *vr0max;
7579 *vr0max = vr1max;
7581 else
7582 gcc_unreachable ();
7584 else if ((operand_less_p (*vr0min, vr1max) == 1
7585 || operand_equal_p (*vr0min, vr1max, 0))
7586 && operand_less_p (vr1min, *vr0min) == 1)
7588 /* ( [ ) ] or ( )[ ] */
7589 if (*vr0type == VR_ANTI_RANGE
7590 && vr1type == VR_ANTI_RANGE)
7591 *vr0min = vr1min;
7592 else if (*vr0type == VR_RANGE
7593 && vr1type == VR_RANGE)
7594 *vr0max = vr1max;
7595 else if (*vr0type == VR_RANGE
7596 && vr1type == VR_ANTI_RANGE)
7598 if (TREE_CODE (vr1max) == INTEGER_CST)
7599 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7600 integer_one_node);
7601 else
7602 *vr0min = vr1max;
7604 else if (*vr0type == VR_ANTI_RANGE
7605 && vr1type == VR_RANGE)
7607 *vr0type = VR_RANGE;
7608 if (TREE_CODE (*vr0min) == INTEGER_CST)
7609 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7610 integer_one_node);
7611 else
7612 *vr0max = *vr0min;
7613 *vr0min = vr1min;
7615 else
7616 gcc_unreachable ();
7619 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7620 result for the intersection. That's always a conservative
7621 correct estimate. */
7623 return;
7627 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7628 in *VR0. This may not be the smallest possible such range. */
7630 static void
7631 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
7633 value_range_t saved;
7635 /* If either range is VR_VARYING the other one wins. */
7636 if (vr1->type == VR_VARYING)
7637 return;
7638 if (vr0->type == VR_VARYING)
7640 copy_value_range (vr0, vr1);
7641 return;
7644 /* When either range is VR_UNDEFINED the resulting range is
7645 VR_UNDEFINED, too. */
7646 if (vr0->type == VR_UNDEFINED)
7647 return;
7648 if (vr1->type == VR_UNDEFINED)
7650 set_value_range_to_undefined (vr0);
7651 return;
7654 /* Save the original vr0 so we can return it as conservative intersection
7655 result when our worker turns things to varying. */
7656 saved = *vr0;
7657 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
7658 vr1->type, vr1->min, vr1->max);
7659 /* Make sure to canonicalize the result though as the inversion of a
7660 VR_RANGE can still be a VR_RANGE. */
7661 set_and_canonicalize_value_range (vr0, vr0->type,
7662 vr0->min, vr0->max, vr0->equiv);
7663 /* If that failed, use the saved original VR0. */
7664 if (vr0->type == VR_VARYING)
7666 *vr0 = saved;
7667 return;
7669 /* If the result is VR_UNDEFINED there is no need to mess with
7670 the equivalencies. */
7671 if (vr0->type == VR_UNDEFINED)
7672 return;
7674 /* The resulting set of equivalences for range intersection is the union of
7675 the two sets. */
7676 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
7677 bitmap_ior_into (vr0->equiv, vr1->equiv);
7678 else if (vr1->equiv && !vr0->equiv)
7679 bitmap_copy (vr0->equiv, vr1->equiv);
7682 static void
7683 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
7685 if (dump_file && (dump_flags & TDF_DETAILS))
7687 fprintf (dump_file, "Intersecting\n ");
7688 dump_value_range (dump_file, vr0);
7689 fprintf (dump_file, "\nand\n ");
7690 dump_value_range (dump_file, vr1);
7691 fprintf (dump_file, "\n");
7693 vrp_intersect_ranges_1 (vr0, vr1);
7694 if (dump_file && (dump_flags & TDF_DETAILS))
7696 fprintf (dump_file, "to\n ");
7697 dump_value_range (dump_file, vr0);
7698 fprintf (dump_file, "\n");
7702 /* Meet operation for value ranges. Given two value ranges VR0 and
7703 VR1, store in VR0 a range that contains both VR0 and VR1. This
7704 may not be the smallest possible such range. */
7706 static void
7707 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
7709 value_range_t saved;
7711 if (vr0->type == VR_UNDEFINED)
7713 /* Drop equivalences. See PR53465. */
7714 set_value_range (vr0, vr1->type, vr1->min, vr1->max, NULL);
7715 return;
7718 if (vr1->type == VR_UNDEFINED)
7720 /* VR0 already has the resulting range, just drop equivalences.
7721 See PR53465. */
7722 if (vr0->equiv)
7723 bitmap_clear (vr0->equiv);
7724 return;
7727 if (vr0->type == VR_VARYING)
7729 /* Nothing to do. VR0 already has the resulting range. */
7730 return;
7733 if (vr1->type == VR_VARYING)
7735 set_value_range_to_varying (vr0);
7736 return;
7739 saved = *vr0;
7740 union_ranges (&vr0->type, &vr0->min, &vr0->max,
7741 vr1->type, vr1->min, vr1->max);
7742 if (vr0->type == VR_VARYING)
7744 /* Failed to find an efficient meet. Before giving up and setting
7745 the result to VARYING, see if we can at least derive a useful
7746 anti-range. FIXME, all this nonsense about distinguishing
7747 anti-ranges from ranges is necessary because of the odd
7748 semantics of range_includes_zero_p and friends. */
7749 if (((saved.type == VR_RANGE
7750 && range_includes_zero_p (saved.min, saved.max) == 0)
7751 || (saved.type == VR_ANTI_RANGE
7752 && range_includes_zero_p (saved.min, saved.max) == 1))
7753 && ((vr1->type == VR_RANGE
7754 && range_includes_zero_p (vr1->min, vr1->max) == 0)
7755 || (vr1->type == VR_ANTI_RANGE
7756 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
7758 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
7760 /* Since this meet operation did not result from the meeting of
7761 two equivalent names, VR0 cannot have any equivalences. */
7762 if (vr0->equiv)
7763 bitmap_clear (vr0->equiv);
7764 return;
7767 set_value_range_to_varying (vr0);
7768 return;
7770 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
7771 vr0->equiv);
7772 if (vr0->type == VR_VARYING)
7773 return;
7775 /* The resulting set of equivalences is always the intersection of
7776 the two sets. */
7777 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
7778 bitmap_and_into (vr0->equiv, vr1->equiv);
7779 else if (vr0->equiv && !vr1->equiv)
7780 bitmap_clear (vr0->equiv);
7783 static void
7784 vrp_meet (value_range_t *vr0, value_range_t *vr1)
7786 if (dump_file && (dump_flags & TDF_DETAILS))
7788 fprintf (dump_file, "Meeting\n ");
7789 dump_value_range (dump_file, vr0);
7790 fprintf (dump_file, "\nand\n ");
7791 dump_value_range (dump_file, vr1);
7792 fprintf (dump_file, "\n");
7794 vrp_meet_1 (vr0, vr1);
7795 if (dump_file && (dump_flags & TDF_DETAILS))
7797 fprintf (dump_file, "to\n ");
7798 dump_value_range (dump_file, vr0);
7799 fprintf (dump_file, "\n");
7804 /* Visit all arguments for PHI node PHI that flow through executable
7805 edges. If a valid value range can be derived from all the incoming
7806 value ranges, set a new range for the LHS of PHI. */
7808 static enum ssa_prop_result
7809 vrp_visit_phi_node (gimple phi)
7811 size_t i;
7812 tree lhs = PHI_RESULT (phi);
7813 value_range_t *lhs_vr = get_value_range (lhs);
7814 value_range_t vr_result = VR_INITIALIZER;
7815 bool first = true;
7816 int edges, old_edges;
7817 struct loop *l;
7819 if (dump_file && (dump_flags & TDF_DETAILS))
7821 fprintf (dump_file, "\nVisiting PHI node: ");
7822 print_gimple_stmt (dump_file, phi, 0, dump_flags);
7825 edges = 0;
7826 for (i = 0; i < gimple_phi_num_args (phi); i++)
7828 edge e = gimple_phi_arg_edge (phi, i);
7830 if (dump_file && (dump_flags & TDF_DETAILS))
7832 fprintf (dump_file,
7833 "\n Argument #%d (%d -> %d %sexecutable)\n",
7834 (int) i, e->src->index, e->dest->index,
7835 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
7838 if (e->flags & EDGE_EXECUTABLE)
7840 tree arg = PHI_ARG_DEF (phi, i);
7841 value_range_t vr_arg;
7843 ++edges;
7845 if (TREE_CODE (arg) == SSA_NAME)
7847 vr_arg = *(get_value_range (arg));
7849 else
7851 if (is_overflow_infinity (arg))
7853 arg = copy_node (arg);
7854 TREE_OVERFLOW (arg) = 0;
7857 vr_arg.type = VR_RANGE;
7858 vr_arg.min = arg;
7859 vr_arg.max = arg;
7860 vr_arg.equiv = NULL;
7863 if (dump_file && (dump_flags & TDF_DETAILS))
7865 fprintf (dump_file, "\t");
7866 print_generic_expr (dump_file, arg, dump_flags);
7867 fprintf (dump_file, "\n\tValue: ");
7868 dump_value_range (dump_file, &vr_arg);
7869 fprintf (dump_file, "\n");
7872 if (first)
7873 copy_value_range (&vr_result, &vr_arg);
7874 else
7875 vrp_meet (&vr_result, &vr_arg);
7876 first = false;
7878 if (vr_result.type == VR_VARYING)
7879 break;
7883 if (vr_result.type == VR_VARYING)
7884 goto varying;
7885 else if (vr_result.type == VR_UNDEFINED)
7886 goto update_range;
7888 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
7889 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
7891 /* To prevent infinite iterations in the algorithm, derive ranges
7892 when the new value is slightly bigger or smaller than the
7893 previous one. We don't do this if we have seen a new executable
7894 edge; this helps us avoid an overflow infinity for conditionals
7895 which are not in a loop. If the old value-range was VR_UNDEFINED
7896 use the updated range and iterate one more time. */
7897 if (edges > 0
7898 && gimple_phi_num_args (phi) > 1
7899 && edges == old_edges
7900 && lhs_vr->type != VR_UNDEFINED)
7902 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
7903 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
7905 /* For non VR_RANGE or for pointers fall back to varying if
7906 the range changed. */
7907 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
7908 || POINTER_TYPE_P (TREE_TYPE (lhs)))
7909 && (cmp_min != 0 || cmp_max != 0))
7910 goto varying;
7912 /* If the new minimum is smaller or larger than the previous
7913 one, go all the way to -INF. In the first case, to avoid
7914 iterating millions of times to reach -INF, and in the
7915 other case to avoid infinite bouncing between different
7916 minimums. */
7917 if (cmp_min > 0 || cmp_min < 0)
7919 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
7920 || !vrp_var_may_overflow (lhs, phi))
7921 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
7922 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
7923 vr_result.min =
7924 negative_overflow_infinity (TREE_TYPE (vr_result.min));
7927 /* Similarly, if the new maximum is smaller or larger than
7928 the previous one, go all the way to +INF. */
7929 if (cmp_max < 0 || cmp_max > 0)
7931 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
7932 || !vrp_var_may_overflow (lhs, phi))
7933 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
7934 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
7935 vr_result.max =
7936 positive_overflow_infinity (TREE_TYPE (vr_result.max));
7939 /* If we dropped either bound to +-INF then if this is a loop
7940 PHI node SCEV may known more about its value-range. */
7941 if ((cmp_min > 0 || cmp_min < 0
7942 || cmp_max < 0 || cmp_max > 0)
7943 && current_loops
7944 && (l = loop_containing_stmt (phi))
7945 && l->header == gimple_bb (phi))
7946 adjust_range_with_scev (&vr_result, l, phi, lhs);
7948 /* If we will end up with a (-INF, +INF) range, set it to
7949 VARYING. Same if the previous max value was invalid for
7950 the type and we end up with vr_result.min > vr_result.max. */
7951 if ((vrp_val_is_max (vr_result.max)
7952 && vrp_val_is_min (vr_result.min))
7953 || compare_values (vr_result.min,
7954 vr_result.max) > 0)
7955 goto varying;
7958 /* If the new range is different than the previous value, keep
7959 iterating. */
7960 update_range:
7961 if (update_value_range (lhs, &vr_result))
7963 if (dump_file && (dump_flags & TDF_DETAILS))
7965 fprintf (dump_file, "Found new range for ");
7966 print_generic_expr (dump_file, lhs, 0);
7967 fprintf (dump_file, ": ");
7968 dump_value_range (dump_file, &vr_result);
7969 fprintf (dump_file, "\n\n");
7972 return SSA_PROP_INTERESTING;
7975 /* Nothing changed, don't add outgoing edges. */
7976 return SSA_PROP_NOT_INTERESTING;
7978 /* No match found. Set the LHS to VARYING. */
7979 varying:
7980 set_value_range_to_varying (lhs_vr);
7981 return SSA_PROP_VARYING;
7984 /* Simplify boolean operations if the source is known
7985 to be already a boolean. */
7986 static bool
7987 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7989 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7990 tree lhs, op0, op1;
7991 bool need_conversion;
7993 /* We handle only !=/== case here. */
7994 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
7996 op0 = gimple_assign_rhs1 (stmt);
7997 if (!op_with_boolean_value_range_p (op0))
7998 return false;
8000 op1 = gimple_assign_rhs2 (stmt);
8001 if (!op_with_boolean_value_range_p (op1))
8002 return false;
8004 /* Reduce number of cases to handle to NE_EXPR. As there is no
8005 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8006 if (rhs_code == EQ_EXPR)
8008 if (TREE_CODE (op1) == INTEGER_CST)
8009 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8010 else
8011 return false;
8014 lhs = gimple_assign_lhs (stmt);
8015 need_conversion
8016 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8018 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8019 if (need_conversion
8020 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8021 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8022 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8023 return false;
8025 /* For A != 0 we can substitute A itself. */
8026 if (integer_zerop (op1))
8027 gimple_assign_set_rhs_with_ops (gsi,
8028 need_conversion
8029 ? NOP_EXPR : TREE_CODE (op0),
8030 op0, NULL_TREE);
8031 /* For A != B we substitute A ^ B. Either with conversion. */
8032 else if (need_conversion)
8034 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8035 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8036 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8037 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8039 /* Or without. */
8040 else
8041 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8042 update_stmt (gsi_stmt (*gsi));
8044 return true;
8047 /* Simplify a division or modulo operator to a right shift or
8048 bitwise and if the first operand is unsigned or is greater
8049 than zero and the second operand is an exact power of two. */
8051 static bool
8052 simplify_div_or_mod_using_ranges (gimple stmt)
8054 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8055 tree val = NULL;
8056 tree op0 = gimple_assign_rhs1 (stmt);
8057 tree op1 = gimple_assign_rhs2 (stmt);
8058 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8060 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8062 val = integer_one_node;
8064 else
8066 bool sop = false;
8068 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8070 if (val
8071 && sop
8072 && integer_onep (val)
8073 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8075 location_t location;
8077 if (!gimple_has_location (stmt))
8078 location = input_location;
8079 else
8080 location = gimple_location (stmt);
8081 warning_at (location, OPT_Wstrict_overflow,
8082 "assuming signed overflow does not occur when "
8083 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8087 if (val && integer_onep (val))
8089 tree t;
8091 if (rhs_code == TRUNC_DIV_EXPR)
8093 t = build_int_cst (integer_type_node, tree_log2 (op1));
8094 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8095 gimple_assign_set_rhs1 (stmt, op0);
8096 gimple_assign_set_rhs2 (stmt, t);
8098 else
8100 t = build_int_cst (TREE_TYPE (op1), 1);
8101 t = int_const_binop (MINUS_EXPR, op1, t);
8102 t = fold_convert (TREE_TYPE (op0), t);
8104 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8105 gimple_assign_set_rhs1 (stmt, op0);
8106 gimple_assign_set_rhs2 (stmt, t);
8109 update_stmt (stmt);
8110 return true;
8113 return false;
8116 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8117 ABS_EXPR. If the operand is <= 0, then simplify the
8118 ABS_EXPR into a NEGATE_EXPR. */
8120 static bool
8121 simplify_abs_using_ranges (gimple stmt)
8123 tree val = NULL;
8124 tree op = gimple_assign_rhs1 (stmt);
8125 tree type = TREE_TYPE (op);
8126 value_range_t *vr = get_value_range (op);
8128 if (TYPE_UNSIGNED (type))
8130 val = integer_zero_node;
8132 else if (vr)
8134 bool sop = false;
8136 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8137 if (!val)
8139 sop = false;
8140 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8141 &sop);
8143 if (val)
8145 if (integer_zerop (val))
8146 val = integer_one_node;
8147 else if (integer_onep (val))
8148 val = integer_zero_node;
8152 if (val
8153 && (integer_onep (val) || integer_zerop (val)))
8155 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8157 location_t location;
8159 if (!gimple_has_location (stmt))
8160 location = input_location;
8161 else
8162 location = gimple_location (stmt);
8163 warning_at (location, OPT_Wstrict_overflow,
8164 "assuming signed overflow does not occur when "
8165 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8168 gimple_assign_set_rhs1 (stmt, op);
8169 if (integer_onep (val))
8170 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8171 else
8172 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8173 update_stmt (stmt);
8174 return true;
8178 return false;
8181 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8182 If all the bits that are being cleared by & are already
8183 known to be zero from VR, or all the bits that are being
8184 set by | are already known to be one from VR, the bit
8185 operation is redundant. */
8187 static bool
8188 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8190 tree op0 = gimple_assign_rhs1 (stmt);
8191 tree op1 = gimple_assign_rhs2 (stmt);
8192 tree op = NULL_TREE;
8193 value_range_t vr0 = VR_INITIALIZER;
8194 value_range_t vr1 = VR_INITIALIZER;
8195 double_int may_be_nonzero0, may_be_nonzero1;
8196 double_int must_be_nonzero0, must_be_nonzero1;
8197 double_int mask;
8199 if (TREE_CODE (op0) == SSA_NAME)
8200 vr0 = *(get_value_range (op0));
8201 else if (is_gimple_min_invariant (op0))
8202 set_value_range_to_value (&vr0, op0, NULL);
8203 else
8204 return false;
8206 if (TREE_CODE (op1) == SSA_NAME)
8207 vr1 = *(get_value_range (op1));
8208 else if (is_gimple_min_invariant (op1))
8209 set_value_range_to_value (&vr1, op1, NULL);
8210 else
8211 return false;
8213 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8214 return false;
8215 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8216 return false;
8218 switch (gimple_assign_rhs_code (stmt))
8220 case BIT_AND_EXPR:
8221 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
8222 if (double_int_zero_p (mask))
8224 op = op0;
8225 break;
8227 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
8228 if (double_int_zero_p (mask))
8230 op = op1;
8231 break;
8233 break;
8234 case BIT_IOR_EXPR:
8235 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
8236 if (double_int_zero_p (mask))
8238 op = op1;
8239 break;
8241 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
8242 if (double_int_zero_p (mask))
8244 op = op0;
8245 break;
8247 break;
8248 default:
8249 gcc_unreachable ();
8252 if (op == NULL_TREE)
8253 return false;
8255 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8256 update_stmt (gsi_stmt (*gsi));
8257 return true;
8260 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8261 a known value range VR.
8263 If there is one and only one value which will satisfy the
8264 conditional, then return that value. Else return NULL. */
8266 static tree
8267 test_for_singularity (enum tree_code cond_code, tree op0,
8268 tree op1, value_range_t *vr)
8270 tree min = NULL;
8271 tree max = NULL;
8273 /* Extract minimum/maximum values which satisfy the
8274 the conditional as it was written. */
8275 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8277 /* This should not be negative infinity; there is no overflow
8278 here. */
8279 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8281 max = op1;
8282 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8284 tree one = build_int_cst (TREE_TYPE (op0), 1);
8285 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8286 if (EXPR_P (max))
8287 TREE_NO_WARNING (max) = 1;
8290 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8292 /* This should not be positive infinity; there is no overflow
8293 here. */
8294 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8296 min = op1;
8297 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8299 tree one = build_int_cst (TREE_TYPE (op0), 1);
8300 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8301 if (EXPR_P (min))
8302 TREE_NO_WARNING (min) = 1;
8306 /* Now refine the minimum and maximum values using any
8307 value range information we have for op0. */
8308 if (min && max)
8310 if (compare_values (vr->min, min) == 1)
8311 min = vr->min;
8312 if (compare_values (vr->max, max) == -1)
8313 max = vr->max;
8315 /* If the new min/max values have converged to a single value,
8316 then there is only one value which can satisfy the condition,
8317 return that value. */
8318 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8319 return min;
8321 return NULL;
8324 /* Simplify a conditional using a relational operator to an equality
8325 test if the range information indicates only one value can satisfy
8326 the original conditional. */
8328 static bool
8329 simplify_cond_using_ranges (gimple stmt)
8331 tree op0 = gimple_cond_lhs (stmt);
8332 tree op1 = gimple_cond_rhs (stmt);
8333 enum tree_code cond_code = gimple_cond_code (stmt);
8335 if (cond_code != NE_EXPR
8336 && cond_code != EQ_EXPR
8337 && TREE_CODE (op0) == SSA_NAME
8338 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8339 && is_gimple_min_invariant (op1))
8341 value_range_t *vr = get_value_range (op0);
8343 /* If we have range information for OP0, then we might be
8344 able to simplify this conditional. */
8345 if (vr->type == VR_RANGE)
8347 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8349 if (new_tree)
8351 if (dump_file)
8353 fprintf (dump_file, "Simplified relational ");
8354 print_gimple_stmt (dump_file, stmt, 0, 0);
8355 fprintf (dump_file, " into ");
8358 gimple_cond_set_code (stmt, EQ_EXPR);
8359 gimple_cond_set_lhs (stmt, op0);
8360 gimple_cond_set_rhs (stmt, new_tree);
8362 update_stmt (stmt);
8364 if (dump_file)
8366 print_gimple_stmt (dump_file, stmt, 0, 0);
8367 fprintf (dump_file, "\n");
8370 return true;
8373 /* Try again after inverting the condition. We only deal
8374 with integral types here, so no need to worry about
8375 issues with inverting FP comparisons. */
8376 cond_code = invert_tree_comparison (cond_code, false);
8377 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8379 if (new_tree)
8381 if (dump_file)
8383 fprintf (dump_file, "Simplified relational ");
8384 print_gimple_stmt (dump_file, stmt, 0, 0);
8385 fprintf (dump_file, " into ");
8388 gimple_cond_set_code (stmt, NE_EXPR);
8389 gimple_cond_set_lhs (stmt, op0);
8390 gimple_cond_set_rhs (stmt, new_tree);
8392 update_stmt (stmt);
8394 if (dump_file)
8396 print_gimple_stmt (dump_file, stmt, 0, 0);
8397 fprintf (dump_file, "\n");
8400 return true;
8405 return false;
8408 /* Simplify a switch statement using the value range of the switch
8409 argument. */
8411 static bool
8412 simplify_switch_using_ranges (gimple stmt)
8414 tree op = gimple_switch_index (stmt);
8415 value_range_t *vr;
8416 bool take_default;
8417 edge e;
8418 edge_iterator ei;
8419 size_t i = 0, j = 0, n, n2;
8420 tree vec2;
8421 switch_update su;
8422 size_t k = 1, l = 0;
8424 if (TREE_CODE (op) == SSA_NAME)
8426 vr = get_value_range (op);
8428 /* We can only handle integer ranges. */
8429 if ((vr->type != VR_RANGE
8430 && vr->type != VR_ANTI_RANGE)
8431 || symbolic_range_p (vr))
8432 return false;
8434 /* Find case label for min/max of the value range. */
8435 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8437 else if (TREE_CODE (op) == INTEGER_CST)
8439 take_default = !find_case_label_index (stmt, 1, op, &i);
8440 if (take_default)
8442 i = 1;
8443 j = 0;
8445 else
8447 j = i;
8450 else
8451 return false;
8453 n = gimple_switch_num_labels (stmt);
8455 /* Bail out if this is just all edges taken. */
8456 if (i == 1
8457 && j == n - 1
8458 && take_default)
8459 return false;
8461 /* Build a new vector of taken case labels. */
8462 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8463 n2 = 0;
8465 /* Add the default edge, if necessary. */
8466 if (take_default)
8467 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8469 for (; i <= j; ++i, ++n2)
8470 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8472 for (; k <= l; ++k, ++n2)
8473 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8475 /* Mark needed edges. */
8476 for (i = 0; i < n2; ++i)
8478 e = find_edge (gimple_bb (stmt),
8479 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8480 e->aux = (void *)-1;
8483 /* Queue not needed edges for later removal. */
8484 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8486 if (e->aux == (void *)-1)
8488 e->aux = NULL;
8489 continue;
8492 if (dump_file && (dump_flags & TDF_DETAILS))
8494 fprintf (dump_file, "removing unreachable case label\n");
8496 VEC_safe_push (edge, heap, to_remove_edges, e);
8497 e->flags &= ~EDGE_EXECUTABLE;
8500 /* And queue an update for the stmt. */
8501 su.stmt = stmt;
8502 su.vec = vec2;
8503 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
8504 return false;
8507 /* Simplify an integral conversion from an SSA name in STMT. */
8509 static bool
8510 simplify_conversion_using_ranges (gimple stmt)
8512 tree innerop, middleop, finaltype;
8513 gimple def_stmt;
8514 value_range_t *innervr;
8515 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
8516 unsigned inner_prec, middle_prec, final_prec;
8517 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
8519 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
8520 if (!INTEGRAL_TYPE_P (finaltype))
8521 return false;
8522 middleop = gimple_assign_rhs1 (stmt);
8523 def_stmt = SSA_NAME_DEF_STMT (middleop);
8524 if (!is_gimple_assign (def_stmt)
8525 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8526 return false;
8527 innerop = gimple_assign_rhs1 (def_stmt);
8528 if (TREE_CODE (innerop) != SSA_NAME)
8529 return false;
8531 /* Get the value-range of the inner operand. */
8532 innervr = get_value_range (innerop);
8533 if (innervr->type != VR_RANGE
8534 || TREE_CODE (innervr->min) != INTEGER_CST
8535 || TREE_CODE (innervr->max) != INTEGER_CST)
8536 return false;
8538 /* Simulate the conversion chain to check if the result is equal if
8539 the middle conversion is removed. */
8540 innermin = tree_to_double_int (innervr->min);
8541 innermax = tree_to_double_int (innervr->max);
8543 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
8544 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
8545 final_prec = TYPE_PRECISION (finaltype);
8547 /* If the first conversion is not injective, the second must not
8548 be widening. */
8549 if (double_int_cmp (double_int_sub (innermax, innermin),
8550 double_int_mask (middle_prec), true) > 0
8551 && middle_prec < final_prec)
8552 return false;
8553 /* We also want a medium value so that we can track the effect that
8554 narrowing conversions with sign change have. */
8555 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
8556 if (inner_unsigned_p)
8557 innermed = double_int_rshift (double_int_mask (inner_prec),
8558 1, inner_prec, false);
8559 else
8560 innermed = double_int_zero;
8561 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
8562 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
8563 innermed = innermin;
8565 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
8566 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
8567 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
8568 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
8570 /* Require that the final conversion applied to both the original
8571 and the intermediate range produces the same result. */
8572 final_unsigned_p = TYPE_UNSIGNED (finaltype);
8573 if (!double_int_equal_p (double_int_ext (middlemin,
8574 final_prec, final_unsigned_p),
8575 double_int_ext (innermin,
8576 final_prec, final_unsigned_p))
8577 || !double_int_equal_p (double_int_ext (middlemed,
8578 final_prec, final_unsigned_p),
8579 double_int_ext (innermed,
8580 final_prec, final_unsigned_p))
8581 || !double_int_equal_p (double_int_ext (middlemax,
8582 final_prec, final_unsigned_p),
8583 double_int_ext (innermax,
8584 final_prec, final_unsigned_p)))
8585 return false;
8587 gimple_assign_set_rhs1 (stmt, innerop);
8588 update_stmt (stmt);
8589 return true;
8592 /* Return whether the value range *VR fits in an integer type specified
8593 by PRECISION and UNSIGNED_P. */
8595 static bool
8596 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8598 tree src_type;
8599 unsigned src_precision;
8600 double_int tem;
8602 /* We can only handle integral and pointer types. */
8603 src_type = TREE_TYPE (vr->min);
8604 if (!INTEGRAL_TYPE_P (src_type)
8605 && !POINTER_TYPE_P (src_type))
8606 return false;
8608 /* An extension is always fine, so is an identity transform. */
8609 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8610 if (src_precision < precision
8611 || (src_precision == precision
8612 && TYPE_UNSIGNED (src_type) == unsigned_p))
8613 return true;
8615 /* Now we can only handle ranges with constant bounds. */
8616 if (vr->type != VR_RANGE
8617 || TREE_CODE (vr->min) != INTEGER_CST
8618 || TREE_CODE (vr->max) != INTEGER_CST)
8619 return false;
8621 /* For precision-preserving sign-changes the MSB of the double-int
8622 has to be clear. */
8623 if (src_precision == precision
8624 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8625 return false;
8627 /* Then we can perform the conversion on both ends and compare
8628 the result for equality. */
8629 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
8630 if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
8631 return false;
8632 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
8633 if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
8634 return false;
8636 return true;
8639 /* Simplify a conversion from integral SSA name to float in STMT. */
8641 static bool
8642 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8644 tree rhs1 = gimple_assign_rhs1 (stmt);
8645 value_range_t *vr = get_value_range (rhs1);
8646 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
8647 enum machine_mode mode;
8648 tree tem;
8649 gimple conv;
8651 /* We can only handle constant ranges. */
8652 if (vr->type != VR_RANGE
8653 || TREE_CODE (vr->min) != INTEGER_CST
8654 || TREE_CODE (vr->max) != INTEGER_CST)
8655 return false;
8657 /* First check if we can use a signed type in place of an unsigned. */
8658 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
8659 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
8660 != CODE_FOR_nothing)
8661 && range_fits_type_p (vr, GET_MODE_PRECISION
8662 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
8663 mode = TYPE_MODE (TREE_TYPE (rhs1));
8664 /* If we can do the conversion in the current input mode do nothing. */
8665 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
8666 TYPE_UNSIGNED (TREE_TYPE (rhs1))))
8667 return false;
8668 /* Otherwise search for a mode we can use, starting from the narrowest
8669 integer mode available. */
8670 else
8672 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
8675 /* If we cannot do a signed conversion to float from mode
8676 or if the value-range does not fit in the signed type
8677 try with a wider mode. */
8678 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
8679 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
8680 break;
8682 mode = GET_MODE_WIDER_MODE (mode);
8683 /* But do not widen the input. Instead leave that to the
8684 optabs expansion code. */
8685 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
8686 return false;
8688 while (mode != VOIDmode);
8689 if (mode == VOIDmode)
8690 return false;
8693 /* It works, insert a truncation or sign-change before the
8694 float conversion. */
8695 tem = make_ssa_name (build_nonstandard_integer_type
8696 (GET_MODE_PRECISION (mode), 0), NULL);
8697 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
8698 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
8699 gimple_assign_set_rhs1 (stmt, tem);
8700 update_stmt (stmt);
8702 return true;
8705 /* Simplify STMT using ranges if possible. */
8707 static bool
8708 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
8710 gimple stmt = gsi_stmt (*gsi);
8711 if (is_gimple_assign (stmt))
8713 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8714 tree rhs1 = gimple_assign_rhs1 (stmt);
8716 switch (rhs_code)
8718 case EQ_EXPR:
8719 case NE_EXPR:
8720 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
8721 if the RHS is zero or one, and the LHS are known to be boolean
8722 values. */
8723 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8724 return simplify_truth_ops_using_ranges (gsi, stmt);
8725 break;
8727 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
8728 and BIT_AND_EXPR respectively if the first operand is greater
8729 than zero and the second operand is an exact power of two. */
8730 case TRUNC_DIV_EXPR:
8731 case TRUNC_MOD_EXPR:
8732 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
8733 && integer_pow2p (gimple_assign_rhs2 (stmt)))
8734 return simplify_div_or_mod_using_ranges (stmt);
8735 break;
8737 /* Transform ABS (X) into X or -X as appropriate. */
8738 case ABS_EXPR:
8739 if (TREE_CODE (rhs1) == SSA_NAME
8740 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8741 return simplify_abs_using_ranges (stmt);
8742 break;
8744 case BIT_AND_EXPR:
8745 case BIT_IOR_EXPR:
8746 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
8747 if all the bits being cleared are already cleared or
8748 all the bits being set are already set. */
8749 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8750 return simplify_bit_ops_using_ranges (gsi, stmt);
8751 break;
8753 CASE_CONVERT:
8754 if (TREE_CODE (rhs1) == SSA_NAME
8755 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8756 return simplify_conversion_using_ranges (stmt);
8757 break;
8759 case FLOAT_EXPR:
8760 if (TREE_CODE (rhs1) == SSA_NAME
8761 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8762 return simplify_float_conversion_using_ranges (gsi, stmt);
8763 break;
8765 default:
8766 break;
8769 else if (gimple_code (stmt) == GIMPLE_COND)
8770 return simplify_cond_using_ranges (stmt);
8771 else if (gimple_code (stmt) == GIMPLE_SWITCH)
8772 return simplify_switch_using_ranges (stmt);
8774 return false;
8777 /* If the statement pointed by SI has a predicate whose value can be
8778 computed using the value range information computed by VRP, compute
8779 its value and return true. Otherwise, return false. */
8781 static bool
8782 fold_predicate_in (gimple_stmt_iterator *si)
8784 bool assignment_p = false;
8785 tree val;
8786 gimple stmt = gsi_stmt (*si);
8788 if (is_gimple_assign (stmt)
8789 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
8791 assignment_p = true;
8792 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
8793 gimple_assign_rhs1 (stmt),
8794 gimple_assign_rhs2 (stmt),
8795 stmt);
8797 else if (gimple_code (stmt) == GIMPLE_COND)
8798 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
8799 gimple_cond_lhs (stmt),
8800 gimple_cond_rhs (stmt),
8801 stmt);
8802 else
8803 return false;
8805 if (val)
8807 if (assignment_p)
8808 val = fold_convert (gimple_expr_type (stmt), val);
8810 if (dump_file)
8812 fprintf (dump_file, "Folding predicate ");
8813 print_gimple_expr (dump_file, stmt, 0, 0);
8814 fprintf (dump_file, " to ");
8815 print_generic_expr (dump_file, val, 0);
8816 fprintf (dump_file, "\n");
8819 if (is_gimple_assign (stmt))
8820 gimple_assign_set_rhs_from_tree (si, val);
8821 else
8823 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
8824 if (integer_zerop (val))
8825 gimple_cond_make_false (stmt);
8826 else if (integer_onep (val))
8827 gimple_cond_make_true (stmt);
8828 else
8829 gcc_unreachable ();
8832 return true;
8835 return false;
8838 /* Callback for substitute_and_fold folding the stmt at *SI. */
8840 static bool
8841 vrp_fold_stmt (gimple_stmt_iterator *si)
8843 if (fold_predicate_in (si))
8844 return true;
8846 return simplify_stmt_using_ranges (si);
8849 /* Stack of dest,src equivalency pairs that need to be restored after
8850 each attempt to thread a block's incoming edge to an outgoing edge.
8852 A NULL entry is used to mark the end of pairs which need to be
8853 restored. */
8854 static VEC(tree,heap) *equiv_stack;
8856 /* A trivial wrapper so that we can present the generic jump threading
8857 code with a simple API for simplifying statements. STMT is the
8858 statement we want to simplify, WITHIN_STMT provides the location
8859 for any overflow warnings. */
8861 static tree
8862 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
8864 /* We only use VRP information to simplify conditionals. This is
8865 overly conservative, but it's unclear if doing more would be
8866 worth the compile time cost. */
8867 if (gimple_code (stmt) != GIMPLE_COND)
8868 return NULL;
8870 return vrp_evaluate_conditional (gimple_cond_code (stmt),
8871 gimple_cond_lhs (stmt),
8872 gimple_cond_rhs (stmt), within_stmt);
8875 /* Blocks which have more than one predecessor and more than
8876 one successor present jump threading opportunities, i.e.,
8877 when the block is reached from a specific predecessor, we
8878 may be able to determine which of the outgoing edges will
8879 be traversed. When this optimization applies, we are able
8880 to avoid conditionals at runtime and we may expose secondary
8881 optimization opportunities.
8883 This routine is effectively a driver for the generic jump
8884 threading code. It basically just presents the generic code
8885 with edges that may be suitable for jump threading.
8887 Unlike DOM, we do not iterate VRP if jump threading was successful.
8888 While iterating may expose new opportunities for VRP, it is expected
8889 those opportunities would be very limited and the compile time cost
8890 to expose those opportunities would be significant.
8892 As jump threading opportunities are discovered, they are registered
8893 for later realization. */
8895 static void
8896 identify_jump_threads (void)
8898 basic_block bb;
8899 gimple dummy;
8900 int i;
8901 edge e;
8903 /* Ugh. When substituting values earlier in this pass we can
8904 wipe the dominance information. So rebuild the dominator
8905 information as we need it within the jump threading code. */
8906 calculate_dominance_info (CDI_DOMINATORS);
8908 /* We do not allow VRP information to be used for jump threading
8909 across a back edge in the CFG. Otherwise it becomes too
8910 difficult to avoid eliminating loop exit tests. Of course
8911 EDGE_DFS_BACK is not accurate at this time so we have to
8912 recompute it. */
8913 mark_dfs_back_edges ();
8915 /* Do not thread across edges we are about to remove. Just marking
8916 them as EDGE_DFS_BACK will do. */
8917 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
8918 e->flags |= EDGE_DFS_BACK;
8920 /* Allocate our unwinder stack to unwind any temporary equivalences
8921 that might be recorded. */
8922 equiv_stack = VEC_alloc (tree, heap, 20);
8924 /* To avoid lots of silly node creation, we create a single
8925 conditional and just modify it in-place when attempting to
8926 thread jumps. */
8927 dummy = gimple_build_cond (EQ_EXPR,
8928 integer_zero_node, integer_zero_node,
8929 NULL, NULL);
8931 /* Walk through all the blocks finding those which present a
8932 potential jump threading opportunity. We could set this up
8933 as a dominator walker and record data during the walk, but
8934 I doubt it's worth the effort for the classes of jump
8935 threading opportunities we are trying to identify at this
8936 point in compilation. */
8937 FOR_EACH_BB (bb)
8939 gimple last;
8941 /* If the generic jump threading code does not find this block
8942 interesting, then there is nothing to do. */
8943 if (! potentially_threadable_block (bb))
8944 continue;
8946 /* We only care about blocks ending in a COND_EXPR. While there
8947 may be some value in handling SWITCH_EXPR here, I doubt it's
8948 terribly important. */
8949 last = gsi_stmt (gsi_last_bb (bb));
8951 /* We're basically looking for a switch or any kind of conditional with
8952 integral or pointer type arguments. Note the type of the second
8953 argument will be the same as the first argument, so no need to
8954 check it explicitly. */
8955 if (gimple_code (last) == GIMPLE_SWITCH
8956 || (gimple_code (last) == GIMPLE_COND
8957 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
8958 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
8959 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
8960 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
8961 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
8963 edge_iterator ei;
8965 /* We've got a block with multiple predecessors and multiple
8966 successors which also ends in a suitable conditional or
8967 switch statement. For each predecessor, see if we can thread
8968 it to a specific successor. */
8969 FOR_EACH_EDGE (e, ei, bb->preds)
8971 /* Do not thread across back edges or abnormal edges
8972 in the CFG. */
8973 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
8974 continue;
8976 thread_across_edge (dummy, e, true, &equiv_stack,
8977 simplify_stmt_for_jump_threading);
8982 /* We do not actually update the CFG or SSA graphs at this point as
8983 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
8984 handle ASSERT_EXPRs gracefully. */
8987 /* We identified all the jump threading opportunities earlier, but could
8988 not transform the CFG at that time. This routine transforms the
8989 CFG and arranges for the dominator tree to be rebuilt if necessary.
8991 Note the SSA graph update will occur during the normal TODO
8992 processing by the pass manager. */
8993 static void
8994 finalize_jump_threads (void)
8996 thread_through_all_blocks (false);
8997 VEC_free (tree, heap, equiv_stack);
9001 /* Traverse all the blocks folding conditionals with known ranges. */
9003 static void
9004 vrp_finalize (void)
9006 size_t i;
9008 values_propagated = true;
9010 if (dump_file)
9012 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9013 dump_all_value_ranges (dump_file);
9014 fprintf (dump_file, "\n");
9017 substitute_and_fold (op_with_constant_singleton_value_range,
9018 vrp_fold_stmt, false);
9020 if (warn_array_bounds)
9021 check_all_array_refs ();
9023 /* We must identify jump threading opportunities before we release
9024 the datastructures built by VRP. */
9025 identify_jump_threads ();
9027 /* Free allocated memory. */
9028 for (i = 0; i < num_vr_values; i++)
9029 if (vr_value[i])
9031 BITMAP_FREE (vr_value[i]->equiv);
9032 free (vr_value[i]);
9035 free (vr_value);
9036 free (vr_phi_edge_counts);
9038 /* So that we can distinguish between VRP data being available
9039 and not available. */
9040 vr_value = NULL;
9041 vr_phi_edge_counts = NULL;
9045 /* Main entry point to VRP (Value Range Propagation). This pass is
9046 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9047 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9048 Programming Language Design and Implementation, pp. 67-78, 1995.
9049 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9051 This is essentially an SSA-CCP pass modified to deal with ranges
9052 instead of constants.
9054 While propagating ranges, we may find that two or more SSA name
9055 have equivalent, though distinct ranges. For instance,
9057 1 x_9 = p_3->a;
9058 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9059 3 if (p_4 == q_2)
9060 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9061 5 endif
9062 6 if (q_2)
9064 In the code above, pointer p_5 has range [q_2, q_2], but from the
9065 code we can also determine that p_5 cannot be NULL and, if q_2 had
9066 a non-varying range, p_5's range should also be compatible with it.
9068 These equivalences are created by two expressions: ASSERT_EXPR and
9069 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9070 result of another assertion, then we can use the fact that p_5 and
9071 p_4 are equivalent when evaluating p_5's range.
9073 Together with value ranges, we also propagate these equivalences
9074 between names so that we can take advantage of information from
9075 multiple ranges when doing final replacement. Note that this
9076 equivalency relation is transitive but not symmetric.
9078 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9079 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9080 in contexts where that assertion does not hold (e.g., in line 6).
9082 TODO, the main difference between this pass and Patterson's is that
9083 we do not propagate edge probabilities. We only compute whether
9084 edges can be taken or not. That is, instead of having a spectrum
9085 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9086 DON'T KNOW. In the future, it may be worthwhile to propagate
9087 probabilities to aid branch prediction. */
9089 static unsigned int
9090 execute_vrp (void)
9092 int i;
9093 edge e;
9094 switch_update *su;
9096 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9097 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9098 scev_initialize ();
9100 insert_range_assertions ();
9102 to_remove_edges = VEC_alloc (edge, heap, 10);
9103 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
9104 threadedge_initialize_values ();
9106 vrp_initialize ();
9107 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9108 vrp_finalize ();
9110 free_numbers_of_iterations_estimates ();
9112 /* ASSERT_EXPRs must be removed before finalizing jump threads
9113 as finalizing jump threads calls the CFG cleanup code which
9114 does not properly handle ASSERT_EXPRs. */
9115 remove_range_assertions ();
9117 /* If we exposed any new variables, go ahead and put them into
9118 SSA form now, before we handle jump threading. This simplifies
9119 interactions between rewriting of _DECL nodes into SSA form
9120 and rewriting SSA_NAME nodes into SSA form after block
9121 duplication and CFG manipulation. */
9122 update_ssa (TODO_update_ssa);
9124 finalize_jump_threads ();
9126 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9127 CFG in a broken state and requires a cfg_cleanup run. */
9128 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
9129 remove_edge (e);
9130 /* Update SWITCH_EXPR case label vector. */
9131 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
9133 size_t j;
9134 size_t n = TREE_VEC_LENGTH (su->vec);
9135 tree label;
9136 gimple_switch_set_num_labels (su->stmt, n);
9137 for (j = 0; j < n; j++)
9138 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9139 /* As we may have replaced the default label with a regular one
9140 make sure to make it a real default label again. This ensures
9141 optimal expansion. */
9142 label = gimple_switch_default_label (su->stmt);
9143 CASE_LOW (label) = NULL_TREE;
9144 CASE_HIGH (label) = NULL_TREE;
9147 if (VEC_length (edge, to_remove_edges) > 0)
9148 free_dominance_info (CDI_DOMINATORS);
9150 VEC_free (edge, heap, to_remove_edges);
9151 VEC_free (switch_update, heap, to_update_switch_stmts);
9152 threadedge_finalize_values ();
9154 scev_finalize ();
9155 loop_optimizer_finalize ();
9156 return 0;
9159 static bool
9160 gate_vrp (void)
9162 return flag_tree_vrp != 0;
9165 struct gimple_opt_pass pass_vrp =
9168 GIMPLE_PASS,
9169 "vrp", /* name */
9170 gate_vrp, /* gate */
9171 execute_vrp, /* execute */
9172 NULL, /* sub */
9173 NULL, /* next */
9174 0, /* static_pass_number */
9175 TV_TREE_VRP, /* tv_id */
9176 PROP_ssa, /* properties_required */
9177 0, /* properties_provided */
9178 0, /* properties_destroyed */
9179 0, /* todo_flags_start */
9180 TODO_cleanup_cfg
9181 | TODO_update_ssa
9182 | TODO_verify_ssa
9183 | TODO_verify_flow
9184 | TODO_ggc_collect /* todo_flags_finish */