2013-10-11 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / gcc / tree-vrp.c
blob8b6b91d21834a6771d217b88a5e15e6730f4e743
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-ssa.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "intl.h"
35 #include "cfgloop.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "tree-ssa-threadupdate.h"
40 #include "expr.h"
41 #include "optabs.h"
45 /* Range of values that can be associated with an SSA_NAME after VRP
46 has executed. */
47 struct value_range_d
49 /* Lattice value represented by this range. */
50 enum value_range_type type;
52 /* Minimum and maximum values represented by this range. These
53 values should be interpreted as follows:
55 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
56 be NULL.
58 - If TYPE == VR_RANGE then MIN holds the minimum value and
59 MAX holds the maximum value of the range [MIN, MAX].
61 - If TYPE == ANTI_RANGE the variable is known to NOT
62 take any values in the range [MIN, MAX]. */
63 tree min;
64 tree max;
66 /* Set of SSA names whose value ranges are equivalent to this one.
67 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
68 bitmap equiv;
71 typedef struct value_range_d value_range_t;
73 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
75 /* Set of SSA names found live during the RPO traversal of the function
76 for still active basic-blocks. */
77 static sbitmap *live;
79 /* Return true if the SSA name NAME is live on the edge E. */
81 static bool
82 live_on_edge (edge e, tree name)
84 return (live[e->dest->index]
85 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
88 /* Local functions. */
89 static int compare_values (tree val1, tree val2);
90 static int compare_values_warnv (tree val1, tree val2, bool *);
91 static void vrp_meet (value_range_t *, value_range_t *);
92 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
93 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
94 tree, tree, bool, bool *,
95 bool *);
97 /* Location information for ASSERT_EXPRs. Each instance of this
98 structure describes an ASSERT_EXPR for an SSA name. Since a single
99 SSA name may have more than one assertion associated with it, these
100 locations are kept in a linked list attached to the corresponding
101 SSA name. */
102 struct assert_locus_d
104 /* Basic block where the assertion would be inserted. */
105 basic_block bb;
107 /* Some assertions need to be inserted on an edge (e.g., assertions
108 generated by COND_EXPRs). In those cases, BB will be NULL. */
109 edge e;
111 /* Pointer to the statement that generated this assertion. */
112 gimple_stmt_iterator si;
114 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
115 enum tree_code comp_code;
117 /* Value being compared against. */
118 tree val;
120 /* Expression to compare. */
121 tree expr;
123 /* Next node in the linked list. */
124 struct assert_locus_d *next;
127 typedef struct assert_locus_d *assert_locus_t;
129 /* If bit I is present, it means that SSA name N_i has a list of
130 assertions that should be inserted in the IL. */
131 static bitmap need_assert_for;
133 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
134 holds a list of ASSERT_LOCUS_T nodes that describe where
135 ASSERT_EXPRs for SSA name N_I should be inserted. */
136 static assert_locus_t *asserts_for;
138 /* Value range array. After propagation, VR_VALUE[I] holds the range
139 of values that SSA name N_I may take. */
140 static unsigned num_vr_values;
141 static value_range_t **vr_value;
142 static bool values_propagated;
144 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
145 number of executable edges we saw the last time we visited the
146 node. */
147 static int *vr_phi_edge_counts;
149 typedef struct {
150 gimple stmt;
151 tree vec;
152 } switch_update;
154 static vec<edge> to_remove_edges;
155 static vec<switch_update> to_update_switch_stmts;
158 /* Return the maximum value for TYPE. */
160 static inline tree
161 vrp_val_max (const_tree type)
163 if (!INTEGRAL_TYPE_P (type))
164 return NULL_TREE;
166 return TYPE_MAX_VALUE (type);
169 /* Return the minimum value for TYPE. */
171 static inline tree
172 vrp_val_min (const_tree type)
174 if (!INTEGRAL_TYPE_P (type))
175 return NULL_TREE;
177 return TYPE_MIN_VALUE (type);
180 /* Return whether VAL is equal to the maximum value of its type. This
181 will be true for a positive overflow infinity. We can't do a
182 simple equality comparison with TYPE_MAX_VALUE because C typedefs
183 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
184 to the integer constant with the same value in the type. */
186 static inline bool
187 vrp_val_is_max (const_tree val)
189 tree type_max = vrp_val_max (TREE_TYPE (val));
190 return (val == type_max
191 || (type_max != NULL_TREE
192 && operand_equal_p (val, type_max, 0)));
195 /* Return whether VAL is equal to the minimum value of its type. This
196 will be true for a negative overflow infinity. */
198 static inline bool
199 vrp_val_is_min (const_tree val)
201 tree type_min = vrp_val_min (TREE_TYPE (val));
202 return (val == type_min
203 || (type_min != NULL_TREE
204 && operand_equal_p (val, type_min, 0)));
208 /* Return whether TYPE should use an overflow infinity distinct from
209 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
210 represent a signed overflow during VRP computations. An infinity
211 is distinct from a half-range, which will go from some number to
212 TYPE_{MIN,MAX}_VALUE. */
214 static inline bool
215 needs_overflow_infinity (const_tree type)
217 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
220 /* Return whether TYPE can support our overflow infinity
221 representation: we use the TREE_OVERFLOW flag, which only exists
222 for constants. If TYPE doesn't support this, we don't optimize
223 cases which would require signed overflow--we drop them to
224 VARYING. */
226 static inline bool
227 supports_overflow_infinity (const_tree type)
229 tree min = vrp_val_min (type), max = vrp_val_max (type);
230 #ifdef ENABLE_CHECKING
231 gcc_assert (needs_overflow_infinity (type));
232 #endif
233 return (min != NULL_TREE
234 && CONSTANT_CLASS_P (min)
235 && max != NULL_TREE
236 && CONSTANT_CLASS_P (max));
239 /* VAL is the maximum or minimum value of a type. Return a
240 corresponding overflow infinity. */
242 static inline tree
243 make_overflow_infinity (tree val)
245 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
246 val = copy_node (val);
247 TREE_OVERFLOW (val) = 1;
248 return val;
251 /* Return a negative overflow infinity for TYPE. */
253 static inline tree
254 negative_overflow_infinity (tree type)
256 gcc_checking_assert (supports_overflow_infinity (type));
257 return make_overflow_infinity (vrp_val_min (type));
260 /* Return a positive overflow infinity for TYPE. */
262 static inline tree
263 positive_overflow_infinity (tree type)
265 gcc_checking_assert (supports_overflow_infinity (type));
266 return make_overflow_infinity (vrp_val_max (type));
269 /* Return whether VAL is a negative overflow infinity. */
271 static inline bool
272 is_negative_overflow_infinity (const_tree val)
274 return (needs_overflow_infinity (TREE_TYPE (val))
275 && CONSTANT_CLASS_P (val)
276 && TREE_OVERFLOW (val)
277 && vrp_val_is_min (val));
280 /* Return whether VAL is a positive overflow infinity. */
282 static inline bool
283 is_positive_overflow_infinity (const_tree val)
285 return (needs_overflow_infinity (TREE_TYPE (val))
286 && CONSTANT_CLASS_P (val)
287 && TREE_OVERFLOW (val)
288 && vrp_val_is_max (val));
291 /* Return whether VAL is a positive or negative overflow infinity. */
293 static inline bool
294 is_overflow_infinity (const_tree val)
296 return (needs_overflow_infinity (TREE_TYPE (val))
297 && CONSTANT_CLASS_P (val)
298 && TREE_OVERFLOW (val)
299 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
302 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
304 static inline bool
305 stmt_overflow_infinity (gimple stmt)
307 if (is_gimple_assign (stmt)
308 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
309 GIMPLE_SINGLE_RHS)
310 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
311 return false;
314 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
315 the same value with TREE_OVERFLOW clear. This can be used to avoid
316 confusing a regular value with an overflow value. */
318 static inline tree
319 avoid_overflow_infinity (tree val)
321 if (!is_overflow_infinity (val))
322 return val;
324 if (vrp_val_is_max (val))
325 return vrp_val_max (TREE_TYPE (val));
326 else
328 gcc_checking_assert (vrp_val_is_min (val));
329 return vrp_val_min (TREE_TYPE (val));
334 /* Return true if ARG is marked with the nonnull attribute in the
335 current function signature. */
337 static bool
338 nonnull_arg_p (const_tree arg)
340 tree t, attrs, fntype;
341 unsigned HOST_WIDE_INT arg_num;
343 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
345 /* The static chain decl is always non null. */
346 if (arg == cfun->static_chain_decl)
347 return true;
349 fntype = TREE_TYPE (current_function_decl);
350 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
352 attrs = lookup_attribute ("nonnull", attrs);
354 /* If "nonnull" wasn't specified, we know nothing about the argument. */
355 if (attrs == NULL_TREE)
356 return false;
358 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
359 if (TREE_VALUE (attrs) == NULL_TREE)
360 return true;
362 /* Get the position number for ARG in the function signature. */
363 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
365 t = DECL_CHAIN (t), arg_num++)
367 if (t == arg)
368 break;
371 gcc_assert (t == arg);
373 /* Now see if ARG_NUM is mentioned in the nonnull list. */
374 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
376 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
377 return true;
381 return false;
385 /* Set value range VR to VR_UNDEFINED. */
387 static inline void
388 set_value_range_to_undefined (value_range_t *vr)
390 vr->type = VR_UNDEFINED;
391 vr->min = vr->max = NULL_TREE;
392 if (vr->equiv)
393 bitmap_clear (vr->equiv);
397 /* Set value range VR to VR_VARYING. */
399 static inline void
400 set_value_range_to_varying (value_range_t *vr)
402 vr->type = VR_VARYING;
403 vr->min = vr->max = NULL_TREE;
404 if (vr->equiv)
405 bitmap_clear (vr->equiv);
409 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
411 static void
412 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
413 tree max, bitmap equiv)
415 #if defined ENABLE_CHECKING
416 /* Check the validity of the range. */
417 if (t == VR_RANGE || t == VR_ANTI_RANGE)
419 int cmp;
421 gcc_assert (min && max);
423 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
424 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
426 cmp = compare_values (min, max);
427 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
429 if (needs_overflow_infinity (TREE_TYPE (min)))
430 gcc_assert (!is_overflow_infinity (min)
431 || !is_overflow_infinity (max));
434 if (t == VR_UNDEFINED || t == VR_VARYING)
435 gcc_assert (min == NULL_TREE && max == NULL_TREE);
437 if (t == VR_UNDEFINED || t == VR_VARYING)
438 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
439 #endif
441 vr->type = t;
442 vr->min = min;
443 vr->max = max;
445 /* Since updating the equivalence set involves deep copying the
446 bitmaps, only do it if absolutely necessary. */
447 if (vr->equiv == NULL
448 && equiv != NULL)
449 vr->equiv = BITMAP_ALLOC (NULL);
451 if (equiv != vr->equiv)
453 if (equiv && !bitmap_empty_p (equiv))
454 bitmap_copy (vr->equiv, equiv);
455 else
456 bitmap_clear (vr->equiv);
461 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
462 This means adjusting T, MIN and MAX representing the case of a
463 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
464 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
465 In corner cases where MAX+1 or MIN-1 wraps this will fall back
466 to varying.
467 This routine exists to ease canonicalization in the case where we
468 extract ranges from var + CST op limit. */
470 static void
471 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
472 tree min, tree max, bitmap equiv)
474 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
475 if (t == VR_UNDEFINED)
477 set_value_range_to_undefined (vr);
478 return;
480 else if (t == VR_VARYING)
482 set_value_range_to_varying (vr);
483 return;
486 /* Nothing to canonicalize for symbolic ranges. */
487 if (TREE_CODE (min) != INTEGER_CST
488 || TREE_CODE (max) != INTEGER_CST)
490 set_value_range (vr, t, min, max, equiv);
491 return;
494 /* Wrong order for min and max, to swap them and the VR type we need
495 to adjust them. */
496 if (tree_int_cst_lt (max, min))
498 tree one, tmp;
500 /* For one bit precision if max < min, then the swapped
501 range covers all values, so for VR_RANGE it is varying and
502 for VR_ANTI_RANGE empty range, so drop to varying as well. */
503 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
505 set_value_range_to_varying (vr);
506 return;
509 one = build_int_cst (TREE_TYPE (min), 1);
510 tmp = int_const_binop (PLUS_EXPR, max, one);
511 max = int_const_binop (MINUS_EXPR, min, one);
512 min = tmp;
514 /* There's one corner case, if we had [C+1, C] before we now have
515 that again. But this represents an empty value range, so drop
516 to varying in this case. */
517 if (tree_int_cst_lt (max, min))
519 set_value_range_to_varying (vr);
520 return;
523 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
526 /* Anti-ranges that can be represented as ranges should be so. */
527 if (t == VR_ANTI_RANGE)
529 bool is_min = vrp_val_is_min (min);
530 bool is_max = vrp_val_is_max (max);
532 if (is_min && is_max)
534 /* We cannot deal with empty ranges, drop to varying.
535 ??? This could be VR_UNDEFINED instead. */
536 set_value_range_to_varying (vr);
537 return;
539 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
540 && (is_min || is_max))
542 /* Non-empty boolean ranges can always be represented
543 as a singleton range. */
544 if (is_min)
545 min = max = vrp_val_max (TREE_TYPE (min));
546 else
547 min = max = vrp_val_min (TREE_TYPE (min));
548 t = VR_RANGE;
550 else if (is_min
551 /* As a special exception preserve non-null ranges. */
552 && !(TYPE_UNSIGNED (TREE_TYPE (min))
553 && integer_zerop (max)))
555 tree one = build_int_cst (TREE_TYPE (max), 1);
556 min = int_const_binop (PLUS_EXPR, max, one);
557 max = vrp_val_max (TREE_TYPE (max));
558 t = VR_RANGE;
560 else if (is_max)
562 tree one = build_int_cst (TREE_TYPE (min), 1);
563 max = int_const_binop (MINUS_EXPR, min, one);
564 min = vrp_val_min (TREE_TYPE (min));
565 t = VR_RANGE;
569 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
570 if (needs_overflow_infinity (TREE_TYPE (min))
571 && is_overflow_infinity (min)
572 && is_overflow_infinity (max))
574 set_value_range_to_varying (vr);
575 return;
578 set_value_range (vr, t, min, max, equiv);
581 /* Copy value range FROM into value range TO. */
583 static inline void
584 copy_value_range (value_range_t *to, value_range_t *from)
586 set_value_range (to, from->type, from->min, from->max, from->equiv);
589 /* Set value range VR to a single value. This function is only called
590 with values we get from statements, and exists to clear the
591 TREE_OVERFLOW flag so that we don't think we have an overflow
592 infinity when we shouldn't. */
594 static inline void
595 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
597 gcc_assert (is_gimple_min_invariant (val));
598 val = avoid_overflow_infinity (val);
599 set_value_range (vr, VR_RANGE, val, val, equiv);
602 /* Set value range VR to a non-negative range of type TYPE.
603 OVERFLOW_INFINITY indicates whether to use an overflow infinity
604 rather than TYPE_MAX_VALUE; this should be true if we determine
605 that the range is nonnegative based on the assumption that signed
606 overflow does not occur. */
608 static inline void
609 set_value_range_to_nonnegative (value_range_t *vr, tree type,
610 bool overflow_infinity)
612 tree zero;
614 if (overflow_infinity && !supports_overflow_infinity (type))
616 set_value_range_to_varying (vr);
617 return;
620 zero = build_int_cst (type, 0);
621 set_value_range (vr, VR_RANGE, zero,
622 (overflow_infinity
623 ? positive_overflow_infinity (type)
624 : TYPE_MAX_VALUE (type)),
625 vr->equiv);
628 /* Set value range VR to a non-NULL range of type TYPE. */
630 static inline void
631 set_value_range_to_nonnull (value_range_t *vr, tree type)
633 tree zero = build_int_cst (type, 0);
634 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
638 /* Set value range VR to a NULL range of type TYPE. */
640 static inline void
641 set_value_range_to_null (value_range_t *vr, tree type)
643 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
647 /* Set value range VR to a range of a truthvalue of type TYPE. */
649 static inline void
650 set_value_range_to_truthvalue (value_range_t *vr, tree type)
652 if (TYPE_PRECISION (type) == 1)
653 set_value_range_to_varying (vr);
654 else
655 set_value_range (vr, VR_RANGE,
656 build_int_cst (type, 0), build_int_cst (type, 1),
657 vr->equiv);
661 /* If abs (min) < abs (max), set VR to [-max, max], if
662 abs (min) >= abs (max), set VR to [-min, min]. */
664 static void
665 abs_extent_range (value_range_t *vr, tree min, tree max)
667 int cmp;
669 gcc_assert (TREE_CODE (min) == INTEGER_CST);
670 gcc_assert (TREE_CODE (max) == INTEGER_CST);
671 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
672 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
673 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
674 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
675 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
677 set_value_range_to_varying (vr);
678 return;
680 cmp = compare_values (min, max);
681 if (cmp == -1)
682 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
683 else if (cmp == 0 || cmp == 1)
685 max = min;
686 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
688 else
690 set_value_range_to_varying (vr);
691 return;
693 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
697 /* Return value range information for VAR.
699 If we have no values ranges recorded (ie, VRP is not running), then
700 return NULL. Otherwise create an empty range if none existed for VAR. */
702 static value_range_t *
703 get_value_range (const_tree var)
705 static const struct value_range_d vr_const_varying
706 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
707 value_range_t *vr;
708 tree sym;
709 unsigned ver = SSA_NAME_VERSION (var);
711 /* If we have no recorded ranges, then return NULL. */
712 if (! vr_value)
713 return NULL;
715 /* If we query the range for a new SSA name return an unmodifiable VARYING.
716 We should get here at most from the substitute-and-fold stage which
717 will never try to change values. */
718 if (ver >= num_vr_values)
719 return CONST_CAST (value_range_t *, &vr_const_varying);
721 vr = vr_value[ver];
722 if (vr)
723 return vr;
725 /* After propagation finished do not allocate new value-ranges. */
726 if (values_propagated)
727 return CONST_CAST (value_range_t *, &vr_const_varying);
729 /* Create a default value range. */
730 vr_value[ver] = vr = XCNEW (value_range_t);
732 /* Defer allocating the equivalence set. */
733 vr->equiv = NULL;
735 /* If VAR is a default definition of a parameter, the variable can
736 take any value in VAR's type. */
737 if (SSA_NAME_IS_DEFAULT_DEF (var))
739 sym = SSA_NAME_VAR (var);
740 if (TREE_CODE (sym) == PARM_DECL)
742 /* Try to use the "nonnull" attribute to create ~[0, 0]
743 anti-ranges for pointers. Note that this is only valid with
744 default definitions of PARM_DECLs. */
745 if (POINTER_TYPE_P (TREE_TYPE (sym))
746 && nonnull_arg_p (sym))
747 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
748 else
749 set_value_range_to_varying (vr);
751 else if (TREE_CODE (sym) == RESULT_DECL
752 && DECL_BY_REFERENCE (sym))
753 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
756 return vr;
759 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
761 static inline bool
762 vrp_operand_equal_p (const_tree val1, const_tree val2)
764 if (val1 == val2)
765 return true;
766 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
767 return false;
768 if (is_overflow_infinity (val1))
769 return is_overflow_infinity (val2);
770 return true;
773 /* Return true, if the bitmaps B1 and B2 are equal. */
775 static inline bool
776 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
778 return (b1 == b2
779 || ((!b1 || bitmap_empty_p (b1))
780 && (!b2 || bitmap_empty_p (b2)))
781 || (b1 && b2
782 && bitmap_equal_p (b1, b2)));
785 /* Update the value range and equivalence set for variable VAR to
786 NEW_VR. Return true if NEW_VR is different from VAR's previous
787 value.
789 NOTE: This function assumes that NEW_VR is a temporary value range
790 object created for the sole purpose of updating VAR's range. The
791 storage used by the equivalence set from NEW_VR will be freed by
792 this function. Do not call update_value_range when NEW_VR
793 is the range object associated with another SSA name. */
795 static inline bool
796 update_value_range (const_tree var, value_range_t *new_vr)
798 value_range_t *old_vr;
799 bool is_new;
801 /* Update the value range, if necessary. */
802 old_vr = get_value_range (var);
803 is_new = old_vr->type != new_vr->type
804 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
805 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
806 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
808 if (is_new)
810 /* Do not allow transitions up the lattice. The following
811 is slightly more awkward than just new_vr->type < old_vr->type
812 because VR_RANGE and VR_ANTI_RANGE need to be considered
813 the same. We may not have is_new when transitioning to
814 UNDEFINED or from VARYING. */
815 if (new_vr->type == VR_UNDEFINED
816 || old_vr->type == VR_VARYING)
817 set_value_range_to_varying (old_vr);
818 else
819 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
820 new_vr->equiv);
823 BITMAP_FREE (new_vr->equiv);
825 return is_new;
829 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
830 point where equivalence processing can be turned on/off. */
832 static void
833 add_equivalence (bitmap *equiv, const_tree var)
835 unsigned ver = SSA_NAME_VERSION (var);
836 value_range_t *vr = vr_value[ver];
838 if (*equiv == NULL)
839 *equiv = BITMAP_ALLOC (NULL);
840 bitmap_set_bit (*equiv, ver);
841 if (vr && vr->equiv)
842 bitmap_ior_into (*equiv, vr->equiv);
846 /* Return true if VR is ~[0, 0]. */
848 static inline bool
849 range_is_nonnull (value_range_t *vr)
851 return vr->type == VR_ANTI_RANGE
852 && integer_zerop (vr->min)
853 && integer_zerop (vr->max);
857 /* Return true if VR is [0, 0]. */
859 static inline bool
860 range_is_null (value_range_t *vr)
862 return vr->type == VR_RANGE
863 && integer_zerop (vr->min)
864 && integer_zerop (vr->max);
867 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
868 a singleton. */
870 static inline bool
871 range_int_cst_p (value_range_t *vr)
873 return (vr->type == VR_RANGE
874 && TREE_CODE (vr->max) == INTEGER_CST
875 && TREE_CODE (vr->min) == INTEGER_CST);
878 /* Return true if VR is a INTEGER_CST singleton. */
880 static inline bool
881 range_int_cst_singleton_p (value_range_t *vr)
883 return (range_int_cst_p (vr)
884 && !TREE_OVERFLOW (vr->min)
885 && !TREE_OVERFLOW (vr->max)
886 && tree_int_cst_equal (vr->min, vr->max));
889 /* Return true if value range VR involves at least one symbol. */
891 static inline bool
892 symbolic_range_p (value_range_t *vr)
894 return (!is_gimple_min_invariant (vr->min)
895 || !is_gimple_min_invariant (vr->max));
898 /* Return true if value range VR uses an overflow infinity. */
900 static inline bool
901 overflow_infinity_range_p (value_range_t *vr)
903 return (vr->type == VR_RANGE
904 && (is_overflow_infinity (vr->min)
905 || is_overflow_infinity (vr->max)));
908 /* Return false if we can not make a valid comparison based on VR;
909 this will be the case if it uses an overflow infinity and overflow
910 is not undefined (i.e., -fno-strict-overflow is in effect).
911 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
912 uses an overflow infinity. */
914 static bool
915 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
917 gcc_assert (vr->type == VR_RANGE);
918 if (is_overflow_infinity (vr->min))
920 *strict_overflow_p = true;
921 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
922 return false;
924 if (is_overflow_infinity (vr->max))
926 *strict_overflow_p = true;
927 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
928 return false;
930 return true;
934 /* Return true if the result of assignment STMT is know to be non-negative.
935 If the return value is based on the assumption that signed overflow is
936 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
937 *STRICT_OVERFLOW_P.*/
939 static bool
940 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
942 enum tree_code code = gimple_assign_rhs_code (stmt);
943 switch (get_gimple_rhs_class (code))
945 case GIMPLE_UNARY_RHS:
946 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
947 gimple_expr_type (stmt),
948 gimple_assign_rhs1 (stmt),
949 strict_overflow_p);
950 case GIMPLE_BINARY_RHS:
951 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
952 gimple_expr_type (stmt),
953 gimple_assign_rhs1 (stmt),
954 gimple_assign_rhs2 (stmt),
955 strict_overflow_p);
956 case GIMPLE_TERNARY_RHS:
957 return false;
958 case GIMPLE_SINGLE_RHS:
959 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
960 strict_overflow_p);
961 case GIMPLE_INVALID_RHS:
962 gcc_unreachable ();
963 default:
964 gcc_unreachable ();
968 /* Return true if return value of call STMT is know to be non-negative.
969 If the return value is based on the assumption that signed overflow is
970 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
971 *STRICT_OVERFLOW_P.*/
973 static bool
974 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
976 tree arg0 = gimple_call_num_args (stmt) > 0 ?
977 gimple_call_arg (stmt, 0) : NULL_TREE;
978 tree arg1 = gimple_call_num_args (stmt) > 1 ?
979 gimple_call_arg (stmt, 1) : NULL_TREE;
981 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
982 gimple_call_fndecl (stmt),
983 arg0,
984 arg1,
985 strict_overflow_p);
988 /* Return true if STMT is know to to compute a non-negative value.
989 If the return value is based on the assumption that signed overflow is
990 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
991 *STRICT_OVERFLOW_P.*/
993 static bool
994 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
996 switch (gimple_code (stmt))
998 case GIMPLE_ASSIGN:
999 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1000 case GIMPLE_CALL:
1001 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1002 default:
1003 gcc_unreachable ();
1007 /* Return true if the result of assignment STMT is know to be non-zero.
1008 If the return value is based on the assumption that signed overflow is
1009 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1010 *STRICT_OVERFLOW_P.*/
1012 static bool
1013 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1015 enum tree_code code = gimple_assign_rhs_code (stmt);
1016 switch (get_gimple_rhs_class (code))
1018 case GIMPLE_UNARY_RHS:
1019 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1020 gimple_expr_type (stmt),
1021 gimple_assign_rhs1 (stmt),
1022 strict_overflow_p);
1023 case GIMPLE_BINARY_RHS:
1024 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1025 gimple_expr_type (stmt),
1026 gimple_assign_rhs1 (stmt),
1027 gimple_assign_rhs2 (stmt),
1028 strict_overflow_p);
1029 case GIMPLE_TERNARY_RHS:
1030 return false;
1031 case GIMPLE_SINGLE_RHS:
1032 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1033 strict_overflow_p);
1034 case GIMPLE_INVALID_RHS:
1035 gcc_unreachable ();
1036 default:
1037 gcc_unreachable ();
1041 /* Return true if STMT is known to compute a non-zero value.
1042 If the return value is based on the assumption that signed overflow is
1043 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1044 *STRICT_OVERFLOW_P.*/
1046 static bool
1047 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1049 switch (gimple_code (stmt))
1051 case GIMPLE_ASSIGN:
1052 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1053 case GIMPLE_CALL:
1055 tree fndecl = gimple_call_fndecl (stmt);
1056 if (!fndecl) return false;
1057 if (flag_delete_null_pointer_checks && !flag_check_new
1058 && DECL_IS_OPERATOR_NEW (fndecl)
1059 && !TREE_NOTHROW (fndecl))
1060 return true;
1061 if (flag_delete_null_pointer_checks &&
1062 lookup_attribute ("returns_nonnull",
1063 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1064 return true;
1065 return gimple_alloca_call_p (stmt);
1067 default:
1068 gcc_unreachable ();
1072 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1073 obtained so far. */
1075 static bool
1076 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1078 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1079 return true;
1081 /* If we have an expression of the form &X->a, then the expression
1082 is nonnull if X is nonnull. */
1083 if (is_gimple_assign (stmt)
1084 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1086 tree expr = gimple_assign_rhs1 (stmt);
1087 tree base = get_base_address (TREE_OPERAND (expr, 0));
1089 if (base != NULL_TREE
1090 && TREE_CODE (base) == MEM_REF
1091 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1093 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1094 if (range_is_nonnull (vr))
1095 return true;
1099 return false;
1102 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1103 a gimple invariant, or SSA_NAME +- CST. */
1105 static bool
1106 valid_value_p (tree expr)
1108 if (TREE_CODE (expr) == SSA_NAME)
1109 return true;
1111 if (TREE_CODE (expr) == PLUS_EXPR
1112 || TREE_CODE (expr) == MINUS_EXPR)
1113 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1114 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1116 return is_gimple_min_invariant (expr);
1119 /* Return
1120 1 if VAL < VAL2
1121 0 if !(VAL < VAL2)
1122 -2 if those are incomparable. */
1123 static inline int
1124 operand_less_p (tree val, tree val2)
1126 /* LT is folded faster than GE and others. Inline the common case. */
1127 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1129 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1130 return INT_CST_LT_UNSIGNED (val, val2);
1131 else
1133 if (INT_CST_LT (val, val2))
1134 return 1;
1137 else
1139 tree tcmp;
1141 fold_defer_overflow_warnings ();
1143 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1145 fold_undefer_and_ignore_overflow_warnings ();
1147 if (!tcmp
1148 || TREE_CODE (tcmp) != INTEGER_CST)
1149 return -2;
1151 if (!integer_zerop (tcmp))
1152 return 1;
1155 /* val >= val2, not considering overflow infinity. */
1156 if (is_negative_overflow_infinity (val))
1157 return is_negative_overflow_infinity (val2) ? 0 : 1;
1158 else if (is_positive_overflow_infinity (val2))
1159 return is_positive_overflow_infinity (val) ? 0 : 1;
1161 return 0;
1164 /* Compare two values VAL1 and VAL2. Return
1166 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1167 -1 if VAL1 < VAL2,
1168 0 if VAL1 == VAL2,
1169 +1 if VAL1 > VAL2, and
1170 +2 if VAL1 != VAL2
1172 This is similar to tree_int_cst_compare but supports pointer values
1173 and values that cannot be compared at compile time.
1175 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1176 true if the return value is only valid if we assume that signed
1177 overflow is undefined. */
1179 static int
1180 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1182 if (val1 == val2)
1183 return 0;
1185 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1186 both integers. */
1187 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1188 == POINTER_TYPE_P (TREE_TYPE (val2)));
1189 /* Convert the two values into the same type. This is needed because
1190 sizetype causes sign extension even for unsigned types. */
1191 val2 = fold_convert (TREE_TYPE (val1), val2);
1192 STRIP_USELESS_TYPE_CONVERSION (val2);
1194 if ((TREE_CODE (val1) == SSA_NAME
1195 || TREE_CODE (val1) == PLUS_EXPR
1196 || TREE_CODE (val1) == MINUS_EXPR)
1197 && (TREE_CODE (val2) == SSA_NAME
1198 || TREE_CODE (val2) == PLUS_EXPR
1199 || TREE_CODE (val2) == MINUS_EXPR))
1201 tree n1, c1, n2, c2;
1202 enum tree_code code1, code2;
1204 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1205 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1206 same name, return -2. */
1207 if (TREE_CODE (val1) == SSA_NAME)
1209 code1 = SSA_NAME;
1210 n1 = val1;
1211 c1 = NULL_TREE;
1213 else
1215 code1 = TREE_CODE (val1);
1216 n1 = TREE_OPERAND (val1, 0);
1217 c1 = TREE_OPERAND (val1, 1);
1218 if (tree_int_cst_sgn (c1) == -1)
1220 if (is_negative_overflow_infinity (c1))
1221 return -2;
1222 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1223 if (!c1)
1224 return -2;
1225 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1229 if (TREE_CODE (val2) == SSA_NAME)
1231 code2 = SSA_NAME;
1232 n2 = val2;
1233 c2 = NULL_TREE;
1235 else
1237 code2 = TREE_CODE (val2);
1238 n2 = TREE_OPERAND (val2, 0);
1239 c2 = TREE_OPERAND (val2, 1);
1240 if (tree_int_cst_sgn (c2) == -1)
1242 if (is_negative_overflow_infinity (c2))
1243 return -2;
1244 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1245 if (!c2)
1246 return -2;
1247 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1251 /* Both values must use the same name. */
1252 if (n1 != n2)
1253 return -2;
1255 if (code1 == SSA_NAME
1256 && code2 == SSA_NAME)
1257 /* NAME == NAME */
1258 return 0;
1260 /* If overflow is defined we cannot simplify more. */
1261 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1262 return -2;
1264 if (strict_overflow_p != NULL
1265 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1266 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1267 *strict_overflow_p = true;
1269 if (code1 == SSA_NAME)
1271 if (code2 == PLUS_EXPR)
1272 /* NAME < NAME + CST */
1273 return -1;
1274 else if (code2 == MINUS_EXPR)
1275 /* NAME > NAME - CST */
1276 return 1;
1278 else if (code1 == PLUS_EXPR)
1280 if (code2 == SSA_NAME)
1281 /* NAME + CST > NAME */
1282 return 1;
1283 else if (code2 == PLUS_EXPR)
1284 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1285 return compare_values_warnv (c1, c2, strict_overflow_p);
1286 else if (code2 == MINUS_EXPR)
1287 /* NAME + CST1 > NAME - CST2 */
1288 return 1;
1290 else if (code1 == MINUS_EXPR)
1292 if (code2 == SSA_NAME)
1293 /* NAME - CST < NAME */
1294 return -1;
1295 else if (code2 == PLUS_EXPR)
1296 /* NAME - CST1 < NAME + CST2 */
1297 return -1;
1298 else if (code2 == MINUS_EXPR)
1299 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1300 C1 and C2 are swapped in the call to compare_values. */
1301 return compare_values_warnv (c2, c1, strict_overflow_p);
1304 gcc_unreachable ();
1307 /* We cannot compare non-constants. */
1308 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1309 return -2;
1311 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1313 /* We cannot compare overflowed values, except for overflow
1314 infinities. */
1315 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1317 if (strict_overflow_p != NULL)
1318 *strict_overflow_p = true;
1319 if (is_negative_overflow_infinity (val1))
1320 return is_negative_overflow_infinity (val2) ? 0 : -1;
1321 else if (is_negative_overflow_infinity (val2))
1322 return 1;
1323 else if (is_positive_overflow_infinity (val1))
1324 return is_positive_overflow_infinity (val2) ? 0 : 1;
1325 else if (is_positive_overflow_infinity (val2))
1326 return -1;
1327 return -2;
1330 return tree_int_cst_compare (val1, val2);
1332 else
1334 tree t;
1336 /* First see if VAL1 and VAL2 are not the same. */
1337 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1338 return 0;
1340 /* If VAL1 is a lower address than VAL2, return -1. */
1341 if (operand_less_p (val1, val2) == 1)
1342 return -1;
1344 /* If VAL1 is a higher address than VAL2, return +1. */
1345 if (operand_less_p (val2, val1) == 1)
1346 return 1;
1348 /* If VAL1 is different than VAL2, return +2.
1349 For integer constants we either have already returned -1 or 1
1350 or they are equivalent. We still might succeed in proving
1351 something about non-trivial operands. */
1352 if (TREE_CODE (val1) != INTEGER_CST
1353 || TREE_CODE (val2) != INTEGER_CST)
1355 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1356 if (t && integer_onep (t))
1357 return 2;
1360 return -2;
1364 /* Compare values like compare_values_warnv, but treat comparisons of
1365 nonconstants which rely on undefined overflow as incomparable. */
1367 static int
1368 compare_values (tree val1, tree val2)
1370 bool sop;
1371 int ret;
1373 sop = false;
1374 ret = compare_values_warnv (val1, val2, &sop);
1375 if (sop
1376 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1377 ret = -2;
1378 return ret;
1382 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1383 0 if VAL is not inside [MIN, MAX],
1384 -2 if we cannot tell either way.
1386 Benchmark compile/20001226-1.c compilation time after changing this
1387 function. */
1389 static inline int
1390 value_inside_range (tree val, tree min, tree max)
1392 int cmp1, cmp2;
1394 cmp1 = operand_less_p (val, min);
1395 if (cmp1 == -2)
1396 return -2;
1397 if (cmp1 == 1)
1398 return 0;
1400 cmp2 = operand_less_p (max, val);
1401 if (cmp2 == -2)
1402 return -2;
1404 return !cmp2;
1408 /* Return true if value ranges VR0 and VR1 have a non-empty
1409 intersection.
1411 Benchmark compile/20001226-1.c compilation time after changing this
1412 function.
1415 static inline bool
1416 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1418 /* The value ranges do not intersect if the maximum of the first range is
1419 less than the minimum of the second range or vice versa.
1420 When those relations are unknown, we can't do any better. */
1421 if (operand_less_p (vr0->max, vr1->min) != 0)
1422 return false;
1423 if (operand_less_p (vr1->max, vr0->min) != 0)
1424 return false;
1425 return true;
1429 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1430 include the value zero, -2 if we cannot tell. */
1432 static inline int
1433 range_includes_zero_p (tree min, tree max)
1435 tree zero = build_int_cst (TREE_TYPE (min), 0);
1436 return value_inside_range (zero, min, max);
1439 /* Return true if *VR is know to only contain nonnegative values. */
1441 static inline bool
1442 value_range_nonnegative_p (value_range_t *vr)
1444 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1445 which would return a useful value should be encoded as a
1446 VR_RANGE. */
1447 if (vr->type == VR_RANGE)
1449 int result = compare_values (vr->min, integer_zero_node);
1450 return (result == 0 || result == 1);
1453 return false;
1456 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1457 false otherwise or if no value range information is available. */
1459 bool
1460 ssa_name_nonnegative_p (const_tree t)
1462 value_range_t *vr = get_value_range (t);
1464 if (INTEGRAL_TYPE_P (t)
1465 && TYPE_UNSIGNED (t))
1466 return true;
1468 if (!vr)
1469 return false;
1471 return value_range_nonnegative_p (vr);
1474 /* If *VR has a value rante that is a single constant value return that,
1475 otherwise return NULL_TREE. */
1477 static tree
1478 value_range_constant_singleton (value_range_t *vr)
1480 if (vr->type == VR_RANGE
1481 && operand_equal_p (vr->min, vr->max, 0)
1482 && is_gimple_min_invariant (vr->min))
1483 return vr->min;
1485 return NULL_TREE;
1488 /* If OP has a value range with a single constant value return that,
1489 otherwise return NULL_TREE. This returns OP itself if OP is a
1490 constant. */
1492 static tree
1493 op_with_constant_singleton_value_range (tree op)
1495 if (is_gimple_min_invariant (op))
1496 return op;
1498 if (TREE_CODE (op) != SSA_NAME)
1499 return NULL_TREE;
1501 return value_range_constant_singleton (get_value_range (op));
1504 /* Return true if op is in a boolean [0, 1] value-range. */
1506 static bool
1507 op_with_boolean_value_range_p (tree op)
1509 value_range_t *vr;
1511 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1512 return true;
1514 if (integer_zerop (op)
1515 || integer_onep (op))
1516 return true;
1518 if (TREE_CODE (op) != SSA_NAME)
1519 return false;
1521 vr = get_value_range (op);
1522 return (vr->type == VR_RANGE
1523 && integer_zerop (vr->min)
1524 && integer_onep (vr->max));
1527 /* Extract value range information from an ASSERT_EXPR EXPR and store
1528 it in *VR_P. */
1530 static void
1531 extract_range_from_assert (value_range_t *vr_p, tree expr)
1533 tree var, cond, limit, min, max, type;
1534 value_range_t *limit_vr;
1535 enum tree_code cond_code;
1537 var = ASSERT_EXPR_VAR (expr);
1538 cond = ASSERT_EXPR_COND (expr);
1540 gcc_assert (COMPARISON_CLASS_P (cond));
1542 /* Find VAR in the ASSERT_EXPR conditional. */
1543 if (var == TREE_OPERAND (cond, 0)
1544 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1545 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1547 /* If the predicate is of the form VAR COMP LIMIT, then we just
1548 take LIMIT from the RHS and use the same comparison code. */
1549 cond_code = TREE_CODE (cond);
1550 limit = TREE_OPERAND (cond, 1);
1551 cond = TREE_OPERAND (cond, 0);
1553 else
1555 /* If the predicate is of the form LIMIT COMP VAR, then we need
1556 to flip around the comparison code to create the proper range
1557 for VAR. */
1558 cond_code = swap_tree_comparison (TREE_CODE (cond));
1559 limit = TREE_OPERAND (cond, 0);
1560 cond = TREE_OPERAND (cond, 1);
1563 limit = avoid_overflow_infinity (limit);
1565 type = TREE_TYPE (var);
1566 gcc_assert (limit != var);
1568 /* For pointer arithmetic, we only keep track of pointer equality
1569 and inequality. */
1570 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1572 set_value_range_to_varying (vr_p);
1573 return;
1576 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1577 try to use LIMIT's range to avoid creating symbolic ranges
1578 unnecessarily. */
1579 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1581 /* LIMIT's range is only interesting if it has any useful information. */
1582 if (limit_vr
1583 && (limit_vr->type == VR_UNDEFINED
1584 || limit_vr->type == VR_VARYING
1585 || symbolic_range_p (limit_vr)))
1586 limit_vr = NULL;
1588 /* Initially, the new range has the same set of equivalences of
1589 VAR's range. This will be revised before returning the final
1590 value. Since assertions may be chained via mutually exclusive
1591 predicates, we will need to trim the set of equivalences before
1592 we are done. */
1593 gcc_assert (vr_p->equiv == NULL);
1594 add_equivalence (&vr_p->equiv, var);
1596 /* Extract a new range based on the asserted comparison for VAR and
1597 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1598 will only use it for equality comparisons (EQ_EXPR). For any
1599 other kind of assertion, we cannot derive a range from LIMIT's
1600 anti-range that can be used to describe the new range. For
1601 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1602 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1603 no single range for x_2 that could describe LE_EXPR, so we might
1604 as well build the range [b_4, +INF] for it.
1605 One special case we handle is extracting a range from a
1606 range test encoded as (unsigned)var + CST <= limit. */
1607 if (TREE_CODE (cond) == NOP_EXPR
1608 || TREE_CODE (cond) == PLUS_EXPR)
1610 if (TREE_CODE (cond) == PLUS_EXPR)
1612 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1613 TREE_OPERAND (cond, 1));
1614 max = int_const_binop (PLUS_EXPR, limit, min);
1615 cond = TREE_OPERAND (cond, 0);
1617 else
1619 min = build_int_cst (TREE_TYPE (var), 0);
1620 max = limit;
1623 /* Make sure to not set TREE_OVERFLOW on the final type
1624 conversion. We are willingly interpreting large positive
1625 unsigned values as negative singed values here. */
1626 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1627 0, false);
1628 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1629 0, false);
1631 /* We can transform a max, min range to an anti-range or
1632 vice-versa. Use set_and_canonicalize_value_range which does
1633 this for us. */
1634 if (cond_code == LE_EXPR)
1635 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1636 min, max, vr_p->equiv);
1637 else if (cond_code == GT_EXPR)
1638 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1639 min, max, vr_p->equiv);
1640 else
1641 gcc_unreachable ();
1643 else if (cond_code == EQ_EXPR)
1645 enum value_range_type range_type;
1647 if (limit_vr)
1649 range_type = limit_vr->type;
1650 min = limit_vr->min;
1651 max = limit_vr->max;
1653 else
1655 range_type = VR_RANGE;
1656 min = limit;
1657 max = limit;
1660 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1662 /* When asserting the equality VAR == LIMIT and LIMIT is another
1663 SSA name, the new range will also inherit the equivalence set
1664 from LIMIT. */
1665 if (TREE_CODE (limit) == SSA_NAME)
1666 add_equivalence (&vr_p->equiv, limit);
1668 else if (cond_code == NE_EXPR)
1670 /* As described above, when LIMIT's range is an anti-range and
1671 this assertion is an inequality (NE_EXPR), then we cannot
1672 derive anything from the anti-range. For instance, if
1673 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1674 not imply that VAR's range is [0, 0]. So, in the case of
1675 anti-ranges, we just assert the inequality using LIMIT and
1676 not its anti-range.
1678 If LIMIT_VR is a range, we can only use it to build a new
1679 anti-range if LIMIT_VR is a single-valued range. For
1680 instance, if LIMIT_VR is [0, 1], the predicate
1681 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1682 Rather, it means that for value 0 VAR should be ~[0, 0]
1683 and for value 1, VAR should be ~[1, 1]. We cannot
1684 represent these ranges.
1686 The only situation in which we can build a valid
1687 anti-range is when LIMIT_VR is a single-valued range
1688 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1689 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1690 if (limit_vr
1691 && limit_vr->type == VR_RANGE
1692 && compare_values (limit_vr->min, limit_vr->max) == 0)
1694 min = limit_vr->min;
1695 max = limit_vr->max;
1697 else
1699 /* In any other case, we cannot use LIMIT's range to build a
1700 valid anti-range. */
1701 min = max = limit;
1704 /* If MIN and MAX cover the whole range for their type, then
1705 just use the original LIMIT. */
1706 if (INTEGRAL_TYPE_P (type)
1707 && vrp_val_is_min (min)
1708 && vrp_val_is_max (max))
1709 min = max = limit;
1711 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1712 min, max, vr_p->equiv);
1714 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1716 min = TYPE_MIN_VALUE (type);
1718 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1719 max = limit;
1720 else
1722 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1723 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1724 LT_EXPR. */
1725 max = limit_vr->max;
1728 /* If the maximum value forces us to be out of bounds, simply punt.
1729 It would be pointless to try and do anything more since this
1730 all should be optimized away above us. */
1731 if ((cond_code == LT_EXPR
1732 && compare_values (max, min) == 0)
1733 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1734 set_value_range_to_varying (vr_p);
1735 else
1737 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1738 if (cond_code == LT_EXPR)
1740 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1741 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1742 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1743 build_int_cst (TREE_TYPE (max), -1));
1744 else
1745 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1746 build_int_cst (TREE_TYPE (max), 1));
1747 if (EXPR_P (max))
1748 TREE_NO_WARNING (max) = 1;
1751 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1754 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1756 max = TYPE_MAX_VALUE (type);
1758 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1759 min = limit;
1760 else
1762 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1763 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1764 GT_EXPR. */
1765 min = limit_vr->min;
1768 /* If the minimum value forces us to be out of bounds, simply punt.
1769 It would be pointless to try and do anything more since this
1770 all should be optimized away above us. */
1771 if ((cond_code == GT_EXPR
1772 && compare_values (min, max) == 0)
1773 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1774 set_value_range_to_varying (vr_p);
1775 else
1777 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1778 if (cond_code == GT_EXPR)
1780 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1781 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1782 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1783 build_int_cst (TREE_TYPE (min), -1));
1784 else
1785 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1786 build_int_cst (TREE_TYPE (min), 1));
1787 if (EXPR_P (min))
1788 TREE_NO_WARNING (min) = 1;
1791 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1794 else
1795 gcc_unreachable ();
1797 /* Finally intersect the new range with what we already know about var. */
1798 vrp_intersect_ranges (vr_p, get_value_range (var));
1802 /* Extract range information from SSA name VAR and store it in VR. If
1803 VAR has an interesting range, use it. Otherwise, create the
1804 range [VAR, VAR] and return it. This is useful in situations where
1805 we may have conditionals testing values of VARYING names. For
1806 instance,
1808 x_3 = y_5;
1809 if (x_3 > y_5)
1812 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1813 always false. */
1815 static void
1816 extract_range_from_ssa_name (value_range_t *vr, tree var)
1818 value_range_t *var_vr = get_value_range (var);
1820 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1821 copy_value_range (vr, var_vr);
1822 else
1823 set_value_range (vr, VR_RANGE, var, var, NULL);
1825 add_equivalence (&vr->equiv, var);
1829 /* Wrapper around int_const_binop. If the operation overflows and we
1830 are not using wrapping arithmetic, then adjust the result to be
1831 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1832 NULL_TREE if we need to use an overflow infinity representation but
1833 the type does not support it. */
1835 static tree
1836 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1838 tree res;
1840 res = int_const_binop (code, val1, val2);
1842 /* If we are using unsigned arithmetic, operate symbolically
1843 on -INF and +INF as int_const_binop only handles signed overflow. */
1844 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1846 int checkz = compare_values (res, val1);
1847 bool overflow = false;
1849 /* Ensure that res = val1 [+*] val2 >= val1
1850 or that res = val1 - val2 <= val1. */
1851 if ((code == PLUS_EXPR
1852 && !(checkz == 1 || checkz == 0))
1853 || (code == MINUS_EXPR
1854 && !(checkz == 0 || checkz == -1)))
1856 overflow = true;
1858 /* Checking for multiplication overflow is done by dividing the
1859 output of the multiplication by the first input of the
1860 multiplication. If the result of that division operation is
1861 not equal to the second input of the multiplication, then the
1862 multiplication overflowed. */
1863 else if (code == MULT_EXPR && !integer_zerop (val1))
1865 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1866 res,
1867 val1);
1868 int check = compare_values (tmp, val2);
1870 if (check != 0)
1871 overflow = true;
1874 if (overflow)
1876 res = copy_node (res);
1877 TREE_OVERFLOW (res) = 1;
1881 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1882 /* If the singed operation wraps then int_const_binop has done
1883 everything we want. */
1885 else if ((TREE_OVERFLOW (res)
1886 && !TREE_OVERFLOW (val1)
1887 && !TREE_OVERFLOW (val2))
1888 || is_overflow_infinity (val1)
1889 || is_overflow_infinity (val2))
1891 /* If the operation overflowed but neither VAL1 nor VAL2 are
1892 overflown, return -INF or +INF depending on the operation
1893 and the combination of signs of the operands. */
1894 int sgn1 = tree_int_cst_sgn (val1);
1895 int sgn2 = tree_int_cst_sgn (val2);
1897 if (needs_overflow_infinity (TREE_TYPE (res))
1898 && !supports_overflow_infinity (TREE_TYPE (res)))
1899 return NULL_TREE;
1901 /* We have to punt on adding infinities of different signs,
1902 since we can't tell what the sign of the result should be.
1903 Likewise for subtracting infinities of the same sign. */
1904 if (((code == PLUS_EXPR && sgn1 != sgn2)
1905 || (code == MINUS_EXPR && sgn1 == sgn2))
1906 && is_overflow_infinity (val1)
1907 && is_overflow_infinity (val2))
1908 return NULL_TREE;
1910 /* Don't try to handle division or shifting of infinities. */
1911 if ((code == TRUNC_DIV_EXPR
1912 || code == FLOOR_DIV_EXPR
1913 || code == CEIL_DIV_EXPR
1914 || code == EXACT_DIV_EXPR
1915 || code == ROUND_DIV_EXPR
1916 || code == RSHIFT_EXPR)
1917 && (is_overflow_infinity (val1)
1918 || is_overflow_infinity (val2)))
1919 return NULL_TREE;
1921 /* Notice that we only need to handle the restricted set of
1922 operations handled by extract_range_from_binary_expr.
1923 Among them, only multiplication, addition and subtraction
1924 can yield overflow without overflown operands because we
1925 are working with integral types only... except in the
1926 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1927 for division too. */
1929 /* For multiplication, the sign of the overflow is given
1930 by the comparison of the signs of the operands. */
1931 if ((code == MULT_EXPR && sgn1 == sgn2)
1932 /* For addition, the operands must be of the same sign
1933 to yield an overflow. Its sign is therefore that
1934 of one of the operands, for example the first. For
1935 infinite operands X + -INF is negative, not positive. */
1936 || (code == PLUS_EXPR
1937 && (sgn1 >= 0
1938 ? !is_negative_overflow_infinity (val2)
1939 : is_positive_overflow_infinity (val2)))
1940 /* For subtraction, non-infinite operands must be of
1941 different signs to yield an overflow. Its sign is
1942 therefore that of the first operand or the opposite of
1943 that of the second operand. A first operand of 0 counts
1944 as positive here, for the corner case 0 - (-INF), which
1945 overflows, but must yield +INF. For infinite operands 0
1946 - INF is negative, not positive. */
1947 || (code == MINUS_EXPR
1948 && (sgn1 >= 0
1949 ? !is_positive_overflow_infinity (val2)
1950 : is_negative_overflow_infinity (val2)))
1951 /* We only get in here with positive shift count, so the
1952 overflow direction is the same as the sign of val1.
1953 Actually rshift does not overflow at all, but we only
1954 handle the case of shifting overflowed -INF and +INF. */
1955 || (code == RSHIFT_EXPR
1956 && sgn1 >= 0)
1957 /* For division, the only case is -INF / -1 = +INF. */
1958 || code == TRUNC_DIV_EXPR
1959 || code == FLOOR_DIV_EXPR
1960 || code == CEIL_DIV_EXPR
1961 || code == EXACT_DIV_EXPR
1962 || code == ROUND_DIV_EXPR)
1963 return (needs_overflow_infinity (TREE_TYPE (res))
1964 ? positive_overflow_infinity (TREE_TYPE (res))
1965 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1966 else
1967 return (needs_overflow_infinity (TREE_TYPE (res))
1968 ? negative_overflow_infinity (TREE_TYPE (res))
1969 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1972 return res;
1976 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
1977 bitmask if some bit is unset, it means for all numbers in the range
1978 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1979 bitmask if some bit is set, it means for all numbers in the range
1980 the bit is 1, otherwise it might be 0 or 1. */
1982 static bool
1983 zero_nonzero_bits_from_vr (value_range_t *vr,
1984 double_int *may_be_nonzero,
1985 double_int *must_be_nonzero)
1987 *may_be_nonzero = double_int_minus_one;
1988 *must_be_nonzero = double_int_zero;
1989 if (!range_int_cst_p (vr)
1990 || TREE_OVERFLOW (vr->min)
1991 || TREE_OVERFLOW (vr->max))
1992 return false;
1994 if (range_int_cst_singleton_p (vr))
1996 *may_be_nonzero = tree_to_double_int (vr->min);
1997 *must_be_nonzero = *may_be_nonzero;
1999 else if (tree_int_cst_sgn (vr->min) >= 0
2000 || tree_int_cst_sgn (vr->max) < 0)
2002 double_int dmin = tree_to_double_int (vr->min);
2003 double_int dmax = tree_to_double_int (vr->max);
2004 double_int xor_mask = dmin ^ dmax;
2005 *may_be_nonzero = dmin | dmax;
2006 *must_be_nonzero = dmin & dmax;
2007 if (xor_mask.high != 0)
2009 unsigned HOST_WIDE_INT mask
2010 = ((unsigned HOST_WIDE_INT) 1
2011 << floor_log2 (xor_mask.high)) - 1;
2012 may_be_nonzero->low = ALL_ONES;
2013 may_be_nonzero->high |= mask;
2014 must_be_nonzero->low = 0;
2015 must_be_nonzero->high &= ~mask;
2017 else if (xor_mask.low != 0)
2019 unsigned HOST_WIDE_INT mask
2020 = ((unsigned HOST_WIDE_INT) 1
2021 << floor_log2 (xor_mask.low)) - 1;
2022 may_be_nonzero->low |= mask;
2023 must_be_nonzero->low &= ~mask;
2027 return true;
2030 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2031 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2032 false otherwise. If *AR can be represented with a single range
2033 *VR1 will be VR_UNDEFINED. */
2035 static bool
2036 ranges_from_anti_range (value_range_t *ar,
2037 value_range_t *vr0, value_range_t *vr1)
2039 tree type = TREE_TYPE (ar->min);
2041 vr0->type = VR_UNDEFINED;
2042 vr1->type = VR_UNDEFINED;
2044 if (ar->type != VR_ANTI_RANGE
2045 || TREE_CODE (ar->min) != INTEGER_CST
2046 || TREE_CODE (ar->max) != INTEGER_CST
2047 || !vrp_val_min (type)
2048 || !vrp_val_max (type))
2049 return false;
2051 if (!vrp_val_is_min (ar->min))
2053 vr0->type = VR_RANGE;
2054 vr0->min = vrp_val_min (type);
2055 vr0->max
2056 = double_int_to_tree (type,
2057 tree_to_double_int (ar->min) - double_int_one);
2059 if (!vrp_val_is_max (ar->max))
2061 vr1->type = VR_RANGE;
2062 vr1->min
2063 = double_int_to_tree (type,
2064 tree_to_double_int (ar->max) + double_int_one);
2065 vr1->max = vrp_val_max (type);
2067 if (vr0->type == VR_UNDEFINED)
2069 *vr0 = *vr1;
2070 vr1->type = VR_UNDEFINED;
2073 return vr0->type != VR_UNDEFINED;
2076 /* Helper to extract a value-range *VR for a multiplicative operation
2077 *VR0 CODE *VR1. */
2079 static void
2080 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2081 enum tree_code code,
2082 value_range_t *vr0, value_range_t *vr1)
2084 enum value_range_type type;
2085 tree val[4];
2086 size_t i;
2087 tree min, max;
2088 bool sop;
2089 int cmp;
2091 /* Multiplications, divisions and shifts are a bit tricky to handle,
2092 depending on the mix of signs we have in the two ranges, we
2093 need to operate on different values to get the minimum and
2094 maximum values for the new range. One approach is to figure
2095 out all the variations of range combinations and do the
2096 operations.
2098 However, this involves several calls to compare_values and it
2099 is pretty convoluted. It's simpler to do the 4 operations
2100 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2101 MAX1) and then figure the smallest and largest values to form
2102 the new range. */
2103 gcc_assert (code == MULT_EXPR
2104 || code == TRUNC_DIV_EXPR
2105 || code == FLOOR_DIV_EXPR
2106 || code == CEIL_DIV_EXPR
2107 || code == EXACT_DIV_EXPR
2108 || code == ROUND_DIV_EXPR
2109 || code == RSHIFT_EXPR
2110 || code == LSHIFT_EXPR);
2111 gcc_assert ((vr0->type == VR_RANGE
2112 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2113 && vr0->type == vr1->type);
2115 type = vr0->type;
2117 /* Compute the 4 cross operations. */
2118 sop = false;
2119 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2120 if (val[0] == NULL_TREE)
2121 sop = true;
2123 if (vr1->max == vr1->min)
2124 val[1] = NULL_TREE;
2125 else
2127 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2128 if (val[1] == NULL_TREE)
2129 sop = true;
2132 if (vr0->max == vr0->min)
2133 val[2] = NULL_TREE;
2134 else
2136 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2137 if (val[2] == NULL_TREE)
2138 sop = true;
2141 if (vr0->min == vr0->max || vr1->min == vr1->max)
2142 val[3] = NULL_TREE;
2143 else
2145 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2146 if (val[3] == NULL_TREE)
2147 sop = true;
2150 if (sop)
2152 set_value_range_to_varying (vr);
2153 return;
2156 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2157 of VAL[i]. */
2158 min = val[0];
2159 max = val[0];
2160 for (i = 1; i < 4; i++)
2162 if (!is_gimple_min_invariant (min)
2163 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2164 || !is_gimple_min_invariant (max)
2165 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2166 break;
2168 if (val[i])
2170 if (!is_gimple_min_invariant (val[i])
2171 || (TREE_OVERFLOW (val[i])
2172 && !is_overflow_infinity (val[i])))
2174 /* If we found an overflowed value, set MIN and MAX
2175 to it so that we set the resulting range to
2176 VARYING. */
2177 min = max = val[i];
2178 break;
2181 if (compare_values (val[i], min) == -1)
2182 min = val[i];
2184 if (compare_values (val[i], max) == 1)
2185 max = val[i];
2189 /* If either MIN or MAX overflowed, then set the resulting range to
2190 VARYING. But we do accept an overflow infinity
2191 representation. */
2192 if (min == NULL_TREE
2193 || !is_gimple_min_invariant (min)
2194 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2195 || max == NULL_TREE
2196 || !is_gimple_min_invariant (max)
2197 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2199 set_value_range_to_varying (vr);
2200 return;
2203 /* We punt if:
2204 1) [-INF, +INF]
2205 2) [-INF, +-INF(OVF)]
2206 3) [+-INF(OVF), +INF]
2207 4) [+-INF(OVF), +-INF(OVF)]
2208 We learn nothing when we have INF and INF(OVF) on both sides.
2209 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2210 overflow. */
2211 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2212 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2214 set_value_range_to_varying (vr);
2215 return;
2218 cmp = compare_values (min, max);
2219 if (cmp == -2 || cmp == 1)
2221 /* If the new range has its limits swapped around (MIN > MAX),
2222 then the operation caused one of them to wrap around, mark
2223 the new range VARYING. */
2224 set_value_range_to_varying (vr);
2226 else
2227 set_value_range (vr, type, min, max, NULL);
2230 /* Some quadruple precision helpers. */
2231 static int
2232 quad_int_cmp (double_int l0, double_int h0,
2233 double_int l1, double_int h1, bool uns)
2235 int c = h0.cmp (h1, uns);
2236 if (c != 0) return c;
2237 return l0.ucmp (l1);
2240 static void
2241 quad_int_pair_sort (double_int *l0, double_int *h0,
2242 double_int *l1, double_int *h1, bool uns)
2244 if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2246 double_int tmp;
2247 tmp = *l0; *l0 = *l1; *l1 = tmp;
2248 tmp = *h0; *h0 = *h1; *h1 = tmp;
2252 /* Extract range information from a binary operation CODE based on
2253 the ranges of each of its operands, *VR0 and *VR1 with resulting
2254 type EXPR_TYPE. The resulting range is stored in *VR. */
2256 static void
2257 extract_range_from_binary_expr_1 (value_range_t *vr,
2258 enum tree_code code, tree expr_type,
2259 value_range_t *vr0_, value_range_t *vr1_)
2261 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2262 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2263 enum value_range_type type;
2264 tree min = NULL_TREE, max = NULL_TREE;
2265 int cmp;
2267 if (!INTEGRAL_TYPE_P (expr_type)
2268 && !POINTER_TYPE_P (expr_type))
2270 set_value_range_to_varying (vr);
2271 return;
2274 /* Not all binary expressions can be applied to ranges in a
2275 meaningful way. Handle only arithmetic operations. */
2276 if (code != PLUS_EXPR
2277 && code != MINUS_EXPR
2278 && code != POINTER_PLUS_EXPR
2279 && code != MULT_EXPR
2280 && code != TRUNC_DIV_EXPR
2281 && code != FLOOR_DIV_EXPR
2282 && code != CEIL_DIV_EXPR
2283 && code != EXACT_DIV_EXPR
2284 && code != ROUND_DIV_EXPR
2285 && code != TRUNC_MOD_EXPR
2286 && code != RSHIFT_EXPR
2287 && code != LSHIFT_EXPR
2288 && code != MIN_EXPR
2289 && code != MAX_EXPR
2290 && code != BIT_AND_EXPR
2291 && code != BIT_IOR_EXPR
2292 && code != BIT_XOR_EXPR)
2294 set_value_range_to_varying (vr);
2295 return;
2298 /* If both ranges are UNDEFINED, so is the result. */
2299 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2301 set_value_range_to_undefined (vr);
2302 return;
2304 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2305 code. At some point we may want to special-case operations that
2306 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2307 operand. */
2308 else if (vr0.type == VR_UNDEFINED)
2309 set_value_range_to_varying (&vr0);
2310 else if (vr1.type == VR_UNDEFINED)
2311 set_value_range_to_varying (&vr1);
2313 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2314 and express ~[] op X as ([]' op X) U ([]'' op X). */
2315 if (vr0.type == VR_ANTI_RANGE
2316 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2318 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2319 if (vrtem1.type != VR_UNDEFINED)
2321 value_range_t vrres = VR_INITIALIZER;
2322 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2323 &vrtem1, vr1_);
2324 vrp_meet (vr, &vrres);
2326 return;
2328 /* Likewise for X op ~[]. */
2329 if (vr1.type == VR_ANTI_RANGE
2330 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2332 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2333 if (vrtem1.type != VR_UNDEFINED)
2335 value_range_t vrres = VR_INITIALIZER;
2336 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2337 vr0_, &vrtem1);
2338 vrp_meet (vr, &vrres);
2340 return;
2343 /* The type of the resulting value range defaults to VR0.TYPE. */
2344 type = vr0.type;
2346 /* Refuse to operate on VARYING ranges, ranges of different kinds
2347 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2348 because we may be able to derive a useful range even if one of
2349 the operands is VR_VARYING or symbolic range. Similarly for
2350 divisions. TODO, we may be able to derive anti-ranges in
2351 some cases. */
2352 if (code != BIT_AND_EXPR
2353 && code != BIT_IOR_EXPR
2354 && code != TRUNC_DIV_EXPR
2355 && code != FLOOR_DIV_EXPR
2356 && code != CEIL_DIV_EXPR
2357 && code != EXACT_DIV_EXPR
2358 && code != ROUND_DIV_EXPR
2359 && code != TRUNC_MOD_EXPR
2360 && code != MIN_EXPR
2361 && code != MAX_EXPR
2362 && (vr0.type == VR_VARYING
2363 || vr1.type == VR_VARYING
2364 || vr0.type != vr1.type
2365 || symbolic_range_p (&vr0)
2366 || symbolic_range_p (&vr1)))
2368 set_value_range_to_varying (vr);
2369 return;
2372 /* Now evaluate the expression to determine the new range. */
2373 if (POINTER_TYPE_P (expr_type))
2375 if (code == MIN_EXPR || code == MAX_EXPR)
2377 /* For MIN/MAX expressions with pointers, we only care about
2378 nullness, if both are non null, then the result is nonnull.
2379 If both are null, then the result is null. Otherwise they
2380 are varying. */
2381 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2382 set_value_range_to_nonnull (vr, expr_type);
2383 else if (range_is_null (&vr0) && range_is_null (&vr1))
2384 set_value_range_to_null (vr, expr_type);
2385 else
2386 set_value_range_to_varying (vr);
2388 else if (code == POINTER_PLUS_EXPR)
2390 /* For pointer types, we are really only interested in asserting
2391 whether the expression evaluates to non-NULL. */
2392 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2393 set_value_range_to_nonnull (vr, expr_type);
2394 else if (range_is_null (&vr0) && range_is_null (&vr1))
2395 set_value_range_to_null (vr, expr_type);
2396 else
2397 set_value_range_to_varying (vr);
2399 else if (code == BIT_AND_EXPR)
2401 /* For pointer types, we are really only interested in asserting
2402 whether the expression evaluates to non-NULL. */
2403 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2404 set_value_range_to_nonnull (vr, expr_type);
2405 else if (range_is_null (&vr0) || range_is_null (&vr1))
2406 set_value_range_to_null (vr, expr_type);
2407 else
2408 set_value_range_to_varying (vr);
2410 else
2411 set_value_range_to_varying (vr);
2413 return;
2416 /* For integer ranges, apply the operation to each end of the
2417 range and see what we end up with. */
2418 if (code == PLUS_EXPR || code == MINUS_EXPR)
2420 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2421 ranges compute the precise range for such case if possible. */
2422 if (range_int_cst_p (&vr0)
2423 && range_int_cst_p (&vr1)
2424 /* We need as many bits as the possibly unsigned inputs. */
2425 && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2427 double_int min0 = tree_to_double_int (vr0.min);
2428 double_int max0 = tree_to_double_int (vr0.max);
2429 double_int min1 = tree_to_double_int (vr1.min);
2430 double_int max1 = tree_to_double_int (vr1.max);
2431 bool uns = TYPE_UNSIGNED (expr_type);
2432 double_int type_min
2433 = double_int::min_value (TYPE_PRECISION (expr_type), uns);
2434 double_int type_max
2435 = double_int::max_value (TYPE_PRECISION (expr_type), uns);
2436 double_int dmin, dmax;
2437 int min_ovf = 0;
2438 int max_ovf = 0;
2440 if (code == PLUS_EXPR)
2442 dmin = min0 + min1;
2443 dmax = max0 + max1;
2445 /* Check for overflow in double_int. */
2446 if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
2447 min_ovf = min0.cmp (dmin, uns);
2448 if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
2449 max_ovf = max0.cmp (dmax, uns);
2451 else /* if (code == MINUS_EXPR) */
2453 dmin = min0 - max1;
2454 dmax = max0 - min1;
2456 if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
2457 min_ovf = min0.cmp (max1, uns);
2458 if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
2459 max_ovf = max0.cmp (min1, uns);
2462 /* For non-wrapping arithmetic look at possibly smaller
2463 value-ranges of the type. */
2464 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2466 if (vrp_val_min (expr_type))
2467 type_min = tree_to_double_int (vrp_val_min (expr_type));
2468 if (vrp_val_max (expr_type))
2469 type_max = tree_to_double_int (vrp_val_max (expr_type));
2472 /* Check for type overflow. */
2473 if (min_ovf == 0)
2475 if (dmin.cmp (type_min, uns) == -1)
2476 min_ovf = -1;
2477 else if (dmin.cmp (type_max, uns) == 1)
2478 min_ovf = 1;
2480 if (max_ovf == 0)
2482 if (dmax.cmp (type_min, uns) == -1)
2483 max_ovf = -1;
2484 else if (dmax.cmp (type_max, uns) == 1)
2485 max_ovf = 1;
2488 if (TYPE_OVERFLOW_WRAPS (expr_type))
2490 /* If overflow wraps, truncate the values and adjust the
2491 range kind and bounds appropriately. */
2492 double_int tmin
2493 = dmin.ext (TYPE_PRECISION (expr_type), uns);
2494 double_int tmax
2495 = dmax.ext (TYPE_PRECISION (expr_type), uns);
2496 if (min_ovf == max_ovf)
2498 /* No overflow or both overflow or underflow. The
2499 range kind stays VR_RANGE. */
2500 min = double_int_to_tree (expr_type, tmin);
2501 max = double_int_to_tree (expr_type, tmax);
2503 else if (min_ovf == -1
2504 && max_ovf == 1)
2506 /* Underflow and overflow, drop to VR_VARYING. */
2507 set_value_range_to_varying (vr);
2508 return;
2510 else
2512 /* Min underflow or max overflow. The range kind
2513 changes to VR_ANTI_RANGE. */
2514 bool covers = false;
2515 double_int tem = tmin;
2516 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2517 || (max_ovf == 1 && min_ovf == 0));
2518 type = VR_ANTI_RANGE;
2519 tmin = tmax + double_int_one;
2520 if (tmin.cmp (tmax, uns) < 0)
2521 covers = true;
2522 tmax = tem + double_int_minus_one;
2523 if (tmax.cmp (tem, uns) > 0)
2524 covers = true;
2525 /* If the anti-range would cover nothing, drop to varying.
2526 Likewise if the anti-range bounds are outside of the
2527 types values. */
2528 if (covers || tmin.cmp (tmax, uns) > 0)
2530 set_value_range_to_varying (vr);
2531 return;
2533 min = double_int_to_tree (expr_type, tmin);
2534 max = double_int_to_tree (expr_type, tmax);
2537 else
2539 /* If overflow does not wrap, saturate to the types min/max
2540 value. */
2541 if (min_ovf == -1)
2543 if (needs_overflow_infinity (expr_type)
2544 && supports_overflow_infinity (expr_type))
2545 min = negative_overflow_infinity (expr_type);
2546 else
2547 min = double_int_to_tree (expr_type, type_min);
2549 else if (min_ovf == 1)
2551 if (needs_overflow_infinity (expr_type)
2552 && supports_overflow_infinity (expr_type))
2553 min = positive_overflow_infinity (expr_type);
2554 else
2555 min = double_int_to_tree (expr_type, type_max);
2557 else
2558 min = double_int_to_tree (expr_type, dmin);
2560 if (max_ovf == -1)
2562 if (needs_overflow_infinity (expr_type)
2563 && supports_overflow_infinity (expr_type))
2564 max = negative_overflow_infinity (expr_type);
2565 else
2566 max = double_int_to_tree (expr_type, type_min);
2568 else if (max_ovf == 1)
2570 if (needs_overflow_infinity (expr_type)
2571 && supports_overflow_infinity (expr_type))
2572 max = positive_overflow_infinity (expr_type);
2573 else
2574 max = double_int_to_tree (expr_type, type_max);
2576 else
2577 max = double_int_to_tree (expr_type, dmax);
2579 if (needs_overflow_infinity (expr_type)
2580 && supports_overflow_infinity (expr_type))
2582 if (is_negative_overflow_infinity (vr0.min)
2583 || (code == PLUS_EXPR
2584 ? is_negative_overflow_infinity (vr1.min)
2585 : is_positive_overflow_infinity (vr1.max)))
2586 min = negative_overflow_infinity (expr_type);
2587 if (is_positive_overflow_infinity (vr0.max)
2588 || (code == PLUS_EXPR
2589 ? is_positive_overflow_infinity (vr1.max)
2590 : is_negative_overflow_infinity (vr1.min)))
2591 max = positive_overflow_infinity (expr_type);
2594 else
2596 /* For other cases, for example if we have a PLUS_EXPR with two
2597 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2598 to compute a precise range for such a case.
2599 ??? General even mixed range kind operations can be expressed
2600 by for example transforming ~[3, 5] + [1, 2] to range-only
2601 operations and a union primitive:
2602 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2603 [-INF+1, 4] U [6, +INF(OVF)]
2604 though usually the union is not exactly representable with
2605 a single range or anti-range as the above is
2606 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2607 but one could use a scheme similar to equivalences for this. */
2608 set_value_range_to_varying (vr);
2609 return;
2612 else if (code == MIN_EXPR
2613 || code == MAX_EXPR)
2615 if (vr0.type == VR_RANGE
2616 && !symbolic_range_p (&vr0))
2618 type = VR_RANGE;
2619 if (vr1.type == VR_RANGE
2620 && !symbolic_range_p (&vr1))
2622 /* For operations that make the resulting range directly
2623 proportional to the original ranges, apply the operation to
2624 the same end of each range. */
2625 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2626 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2628 else if (code == MIN_EXPR)
2630 min = vrp_val_min (expr_type);
2631 max = vr0.max;
2633 else if (code == MAX_EXPR)
2635 min = vr0.min;
2636 max = vrp_val_max (expr_type);
2639 else if (vr1.type == VR_RANGE
2640 && !symbolic_range_p (&vr1))
2642 type = VR_RANGE;
2643 if (code == MIN_EXPR)
2645 min = vrp_val_min (expr_type);
2646 max = vr1.max;
2648 else if (code == MAX_EXPR)
2650 min = vr1.min;
2651 max = vrp_val_max (expr_type);
2654 else
2656 set_value_range_to_varying (vr);
2657 return;
2660 else if (code == MULT_EXPR)
2662 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2663 drop to varying. */
2664 if (range_int_cst_p (&vr0)
2665 && range_int_cst_p (&vr1)
2666 && TYPE_OVERFLOW_WRAPS (expr_type))
2668 double_int min0, max0, min1, max1, sizem1, size;
2669 double_int prod0l, prod0h, prod1l, prod1h,
2670 prod2l, prod2h, prod3l, prod3h;
2671 bool uns0, uns1, uns;
2673 sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
2674 size = sizem1 + double_int_one;
2676 min0 = tree_to_double_int (vr0.min);
2677 max0 = tree_to_double_int (vr0.max);
2678 min1 = tree_to_double_int (vr1.min);
2679 max1 = tree_to_double_int (vr1.max);
2681 uns0 = TYPE_UNSIGNED (expr_type);
2682 uns1 = uns0;
2684 /* Canonicalize the intervals. */
2685 if (TYPE_UNSIGNED (expr_type))
2687 double_int min2 = size - min0;
2688 if (!min2.is_zero () && min2.cmp (max0, true) < 0)
2690 min0 = -min2;
2691 max0 -= size;
2692 uns0 = false;
2695 min2 = size - min1;
2696 if (!min2.is_zero () && min2.cmp (max1, true) < 0)
2698 min1 = -min2;
2699 max1 -= size;
2700 uns1 = false;
2703 uns = uns0 & uns1;
2705 bool overflow;
2706 prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
2707 if (!uns0 && min0.is_negative ())
2708 prod0h -= min1;
2709 if (!uns1 && min1.is_negative ())
2710 prod0h -= min0;
2712 prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
2713 if (!uns0 && min0.is_negative ())
2714 prod1h -= max1;
2715 if (!uns1 && max1.is_negative ())
2716 prod1h -= min0;
2718 prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
2719 if (!uns0 && max0.is_negative ())
2720 prod2h -= min1;
2721 if (!uns1 && min1.is_negative ())
2722 prod2h -= max0;
2724 prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
2725 if (!uns0 && max0.is_negative ())
2726 prod3h -= max1;
2727 if (!uns1 && max1.is_negative ())
2728 prod3h -= max0;
2730 /* Sort the 4 products. */
2731 quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2732 quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2733 quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2734 quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2736 /* Max - min. */
2737 if (prod0l.is_zero ())
2739 prod1l = double_int_zero;
2740 prod1h = -prod0h;
2742 else
2744 prod1l = -prod0l;
2745 prod1h = ~prod0h;
2747 prod2l = prod3l + prod1l;
2748 prod2h = prod3h + prod1h;
2749 if (prod2l.ult (prod3l))
2750 prod2h += double_int_one; /* carry */
2752 if (!prod2h.is_zero ()
2753 || prod2l.cmp (sizem1, true) >= 0)
2755 /* the range covers all values. */
2756 set_value_range_to_varying (vr);
2757 return;
2760 /* The following should handle the wrapping and selecting
2761 VR_ANTI_RANGE for us. */
2762 min = double_int_to_tree (expr_type, prod0l);
2763 max = double_int_to_tree (expr_type, prod3l);
2764 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2765 return;
2768 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2769 drop to VR_VARYING. It would take more effort to compute a
2770 precise range for such a case. For example, if we have
2771 op0 == 65536 and op1 == 65536 with their ranges both being
2772 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2773 we cannot claim that the product is in ~[0,0]. Note that we
2774 are guaranteed to have vr0.type == vr1.type at this
2775 point. */
2776 if (vr0.type == VR_ANTI_RANGE
2777 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2779 set_value_range_to_varying (vr);
2780 return;
2783 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2784 return;
2786 else if (code == RSHIFT_EXPR
2787 || code == LSHIFT_EXPR)
2789 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2790 then drop to VR_VARYING. Outside of this range we get undefined
2791 behavior from the shift operation. We cannot even trust
2792 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2793 shifts, and the operation at the tree level may be widened. */
2794 if (range_int_cst_p (&vr1)
2795 && compare_tree_int (vr1.min, 0) >= 0
2796 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2798 if (code == RSHIFT_EXPR)
2800 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2801 return;
2803 /* We can map lshifts by constants to MULT_EXPR handling. */
2804 else if (code == LSHIFT_EXPR
2805 && range_int_cst_singleton_p (&vr1))
2807 bool saved_flag_wrapv;
2808 value_range_t vr1p = VR_INITIALIZER;
2809 vr1p.type = VR_RANGE;
2810 vr1p.min
2811 = double_int_to_tree (expr_type,
2812 double_int_one
2813 .llshift (TREE_INT_CST_LOW (vr1.min),
2814 TYPE_PRECISION (expr_type)));
2815 vr1p.max = vr1p.min;
2816 /* We have to use a wrapping multiply though as signed overflow
2817 on lshifts is implementation defined in C89. */
2818 saved_flag_wrapv = flag_wrapv;
2819 flag_wrapv = 1;
2820 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2821 &vr0, &vr1p);
2822 flag_wrapv = saved_flag_wrapv;
2823 return;
2825 else if (code == LSHIFT_EXPR
2826 && range_int_cst_p (&vr0))
2828 int prec = TYPE_PRECISION (expr_type);
2829 int overflow_pos = prec;
2830 int bound_shift;
2831 double_int bound, complement, low_bound, high_bound;
2832 bool uns = TYPE_UNSIGNED (expr_type);
2833 bool in_bounds = false;
2835 if (!uns)
2836 overflow_pos -= 1;
2838 bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
2839 /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
2840 overflow. However, for that to happen, vr1.max needs to be
2841 zero, which means vr1 is a singleton range of zero, which
2842 means it should be handled by the previous LSHIFT_EXPR
2843 if-clause. */
2844 bound = double_int_one.llshift (bound_shift, prec);
2845 complement = ~(bound - double_int_one);
2847 if (uns)
2849 low_bound = bound.zext (prec);
2850 high_bound = complement.zext (prec);
2851 if (tree_to_double_int (vr0.max).ult (low_bound))
2853 /* [5, 6] << [1, 2] == [10, 24]. */
2854 /* We're shifting out only zeroes, the value increases
2855 monotonically. */
2856 in_bounds = true;
2858 else if (high_bound.ult (tree_to_double_int (vr0.min)))
2860 /* [0xffffff00, 0xffffffff] << [1, 2]
2861 == [0xfffffc00, 0xfffffffe]. */
2862 /* We're shifting out only ones, the value decreases
2863 monotonically. */
2864 in_bounds = true;
2867 else
2869 /* [-1, 1] << [1, 2] == [-4, 4]. */
2870 low_bound = complement.sext (prec);
2871 high_bound = bound;
2872 if (tree_to_double_int (vr0.max).slt (high_bound)
2873 && low_bound.slt (tree_to_double_int (vr0.min)))
2875 /* For non-negative numbers, we're shifting out only
2876 zeroes, the value increases monotonically.
2877 For negative numbers, we're shifting out only ones, the
2878 value decreases monotomically. */
2879 in_bounds = true;
2883 if (in_bounds)
2885 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2886 return;
2890 set_value_range_to_varying (vr);
2891 return;
2893 else if (code == TRUNC_DIV_EXPR
2894 || code == FLOOR_DIV_EXPR
2895 || code == CEIL_DIV_EXPR
2896 || code == EXACT_DIV_EXPR
2897 || code == ROUND_DIV_EXPR)
2899 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2901 /* For division, if op1 has VR_RANGE but op0 does not, something
2902 can be deduced just from that range. Say [min, max] / [4, max]
2903 gives [min / 4, max / 4] range. */
2904 if (vr1.type == VR_RANGE
2905 && !symbolic_range_p (&vr1)
2906 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2908 vr0.type = type = VR_RANGE;
2909 vr0.min = vrp_val_min (expr_type);
2910 vr0.max = vrp_val_max (expr_type);
2912 else
2914 set_value_range_to_varying (vr);
2915 return;
2919 /* For divisions, if flag_non_call_exceptions is true, we must
2920 not eliminate a division by zero. */
2921 if (cfun->can_throw_non_call_exceptions
2922 && (vr1.type != VR_RANGE
2923 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2925 set_value_range_to_varying (vr);
2926 return;
2929 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2930 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2931 include 0. */
2932 if (vr0.type == VR_RANGE
2933 && (vr1.type != VR_RANGE
2934 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2936 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2937 int cmp;
2939 min = NULL_TREE;
2940 max = NULL_TREE;
2941 if (TYPE_UNSIGNED (expr_type)
2942 || value_range_nonnegative_p (&vr1))
2944 /* For unsigned division or when divisor is known
2945 to be non-negative, the range has to cover
2946 all numbers from 0 to max for positive max
2947 and all numbers from min to 0 for negative min. */
2948 cmp = compare_values (vr0.max, zero);
2949 if (cmp == -1)
2950 max = zero;
2951 else if (cmp == 0 || cmp == 1)
2952 max = vr0.max;
2953 else
2954 type = VR_VARYING;
2955 cmp = compare_values (vr0.min, zero);
2956 if (cmp == 1)
2957 min = zero;
2958 else if (cmp == 0 || cmp == -1)
2959 min = vr0.min;
2960 else
2961 type = VR_VARYING;
2963 else
2965 /* Otherwise the range is -max .. max or min .. -min
2966 depending on which bound is bigger in absolute value,
2967 as the division can change the sign. */
2968 abs_extent_range (vr, vr0.min, vr0.max);
2969 return;
2971 if (type == VR_VARYING)
2973 set_value_range_to_varying (vr);
2974 return;
2977 else
2979 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2980 return;
2983 else if (code == TRUNC_MOD_EXPR)
2985 if (vr1.type != VR_RANGE
2986 || range_includes_zero_p (vr1.min, vr1.max) != 0
2987 || vrp_val_is_min (vr1.min))
2989 set_value_range_to_varying (vr);
2990 return;
2992 type = VR_RANGE;
2993 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2994 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2995 if (tree_int_cst_lt (max, vr1.max))
2996 max = vr1.max;
2997 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2998 /* If the dividend is non-negative the modulus will be
2999 non-negative as well. */
3000 if (TYPE_UNSIGNED (expr_type)
3001 || value_range_nonnegative_p (&vr0))
3002 min = build_int_cst (TREE_TYPE (max), 0);
3003 else
3004 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
3006 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3008 bool int_cst_range0, int_cst_range1;
3009 double_int may_be_nonzero0, may_be_nonzero1;
3010 double_int must_be_nonzero0, must_be_nonzero1;
3012 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
3013 &must_be_nonzero0);
3014 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
3015 &must_be_nonzero1);
3017 type = VR_RANGE;
3018 if (code == BIT_AND_EXPR)
3020 double_int dmax;
3021 min = double_int_to_tree (expr_type,
3022 must_be_nonzero0 & must_be_nonzero1);
3023 dmax = may_be_nonzero0 & may_be_nonzero1;
3024 /* If both input ranges contain only negative values we can
3025 truncate the result range maximum to the minimum of the
3026 input range maxima. */
3027 if (int_cst_range0 && int_cst_range1
3028 && tree_int_cst_sgn (vr0.max) < 0
3029 && tree_int_cst_sgn (vr1.max) < 0)
3031 dmax = dmax.min (tree_to_double_int (vr0.max),
3032 TYPE_UNSIGNED (expr_type));
3033 dmax = dmax.min (tree_to_double_int (vr1.max),
3034 TYPE_UNSIGNED (expr_type));
3036 /* If either input range contains only non-negative values
3037 we can truncate the result range maximum to the respective
3038 maximum of the input range. */
3039 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3040 dmax = dmax.min (tree_to_double_int (vr0.max),
3041 TYPE_UNSIGNED (expr_type));
3042 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3043 dmax = dmax.min (tree_to_double_int (vr1.max),
3044 TYPE_UNSIGNED (expr_type));
3045 max = double_int_to_tree (expr_type, dmax);
3047 else if (code == BIT_IOR_EXPR)
3049 double_int dmin;
3050 max = double_int_to_tree (expr_type,
3051 may_be_nonzero0 | may_be_nonzero1);
3052 dmin = must_be_nonzero0 | must_be_nonzero1;
3053 /* If the input ranges contain only positive values we can
3054 truncate the minimum of the result range to the maximum
3055 of the input range minima. */
3056 if (int_cst_range0 && int_cst_range1
3057 && tree_int_cst_sgn (vr0.min) >= 0
3058 && tree_int_cst_sgn (vr1.min) >= 0)
3060 dmin = dmin.max (tree_to_double_int (vr0.min),
3061 TYPE_UNSIGNED (expr_type));
3062 dmin = dmin.max (tree_to_double_int (vr1.min),
3063 TYPE_UNSIGNED (expr_type));
3065 /* If either input range contains only negative values
3066 we can truncate the minimum of the result range to the
3067 respective minimum range. */
3068 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3069 dmin = dmin.max (tree_to_double_int (vr0.min),
3070 TYPE_UNSIGNED (expr_type));
3071 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3072 dmin = dmin.max (tree_to_double_int (vr1.min),
3073 TYPE_UNSIGNED (expr_type));
3074 min = double_int_to_tree (expr_type, dmin);
3076 else if (code == BIT_XOR_EXPR)
3078 double_int result_zero_bits, result_one_bits;
3079 result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3080 | ~(may_be_nonzero0 | may_be_nonzero1);
3081 result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3082 | must_be_nonzero1.and_not (may_be_nonzero0);
3083 max = double_int_to_tree (expr_type, ~result_zero_bits);
3084 min = double_int_to_tree (expr_type, result_one_bits);
3085 /* If the range has all positive or all negative values the
3086 result is better than VARYING. */
3087 if (tree_int_cst_sgn (min) < 0
3088 || tree_int_cst_sgn (max) >= 0)
3090 else
3091 max = min = NULL_TREE;
3094 else
3095 gcc_unreachable ();
3097 /* If either MIN or MAX overflowed, then set the resulting range to
3098 VARYING. But we do accept an overflow infinity
3099 representation. */
3100 if (min == NULL_TREE
3101 || !is_gimple_min_invariant (min)
3102 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3103 || max == NULL_TREE
3104 || !is_gimple_min_invariant (max)
3105 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3107 set_value_range_to_varying (vr);
3108 return;
3111 /* We punt if:
3112 1) [-INF, +INF]
3113 2) [-INF, +-INF(OVF)]
3114 3) [+-INF(OVF), +INF]
3115 4) [+-INF(OVF), +-INF(OVF)]
3116 We learn nothing when we have INF and INF(OVF) on both sides.
3117 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3118 overflow. */
3119 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3120 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3122 set_value_range_to_varying (vr);
3123 return;
3126 cmp = compare_values (min, max);
3127 if (cmp == -2 || cmp == 1)
3129 /* If the new range has its limits swapped around (MIN > MAX),
3130 then the operation caused one of them to wrap around, mark
3131 the new range VARYING. */
3132 set_value_range_to_varying (vr);
3134 else
3135 set_value_range (vr, type, min, max, NULL);
3138 /* Extract range information from a binary expression OP0 CODE OP1 based on
3139 the ranges of each of its operands with resulting type EXPR_TYPE.
3140 The resulting range is stored in *VR. */
3142 static void
3143 extract_range_from_binary_expr (value_range_t *vr,
3144 enum tree_code code,
3145 tree expr_type, tree op0, tree op1)
3147 value_range_t vr0 = VR_INITIALIZER;
3148 value_range_t vr1 = VR_INITIALIZER;
3150 /* Get value ranges for each operand. For constant operands, create
3151 a new value range with the operand to simplify processing. */
3152 if (TREE_CODE (op0) == SSA_NAME)
3153 vr0 = *(get_value_range (op0));
3154 else if (is_gimple_min_invariant (op0))
3155 set_value_range_to_value (&vr0, op0, NULL);
3156 else
3157 set_value_range_to_varying (&vr0);
3159 if (TREE_CODE (op1) == SSA_NAME)
3160 vr1 = *(get_value_range (op1));
3161 else if (is_gimple_min_invariant (op1))
3162 set_value_range_to_value (&vr1, op1, NULL);
3163 else
3164 set_value_range_to_varying (&vr1);
3166 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3169 /* Extract range information from a unary operation CODE based on
3170 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3171 The The resulting range is stored in *VR. */
3173 static void
3174 extract_range_from_unary_expr_1 (value_range_t *vr,
3175 enum tree_code code, tree type,
3176 value_range_t *vr0_, tree op0_type)
3178 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3180 /* VRP only operates on integral and pointer types. */
3181 if (!(INTEGRAL_TYPE_P (op0_type)
3182 || POINTER_TYPE_P (op0_type))
3183 || !(INTEGRAL_TYPE_P (type)
3184 || POINTER_TYPE_P (type)))
3186 set_value_range_to_varying (vr);
3187 return;
3190 /* If VR0 is UNDEFINED, so is the result. */
3191 if (vr0.type == VR_UNDEFINED)
3193 set_value_range_to_undefined (vr);
3194 return;
3197 /* Handle operations that we express in terms of others. */
3198 if (code == PAREN_EXPR)
3200 /* PAREN_EXPR is a simple copy. */
3201 copy_value_range (vr, &vr0);
3202 return;
3204 else if (code == NEGATE_EXPR)
3206 /* -X is simply 0 - X, so re-use existing code that also handles
3207 anti-ranges fine. */
3208 value_range_t zero = VR_INITIALIZER;
3209 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3210 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3211 return;
3213 else if (code == BIT_NOT_EXPR)
3215 /* ~X is simply -1 - X, so re-use existing code that also handles
3216 anti-ranges fine. */
3217 value_range_t minusone = VR_INITIALIZER;
3218 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3219 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3220 type, &minusone, &vr0);
3221 return;
3224 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3225 and express op ~[] as (op []') U (op []''). */
3226 if (vr0.type == VR_ANTI_RANGE
3227 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3229 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3230 if (vrtem1.type != VR_UNDEFINED)
3232 value_range_t vrres = VR_INITIALIZER;
3233 extract_range_from_unary_expr_1 (&vrres, code, type,
3234 &vrtem1, op0_type);
3235 vrp_meet (vr, &vrres);
3237 return;
3240 if (CONVERT_EXPR_CODE_P (code))
3242 tree inner_type = op0_type;
3243 tree outer_type = type;
3245 /* If the expression evaluates to a pointer, we are only interested in
3246 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3247 if (POINTER_TYPE_P (type))
3249 if (range_is_nonnull (&vr0))
3250 set_value_range_to_nonnull (vr, type);
3251 else if (range_is_null (&vr0))
3252 set_value_range_to_null (vr, type);
3253 else
3254 set_value_range_to_varying (vr);
3255 return;
3258 /* If VR0 is varying and we increase the type precision, assume
3259 a full range for the following transformation. */
3260 if (vr0.type == VR_VARYING
3261 && INTEGRAL_TYPE_P (inner_type)
3262 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3264 vr0.type = VR_RANGE;
3265 vr0.min = TYPE_MIN_VALUE (inner_type);
3266 vr0.max = TYPE_MAX_VALUE (inner_type);
3269 /* If VR0 is a constant range or anti-range and the conversion is
3270 not truncating we can convert the min and max values and
3271 canonicalize the resulting range. Otherwise we can do the
3272 conversion if the size of the range is less than what the
3273 precision of the target type can represent and the range is
3274 not an anti-range. */
3275 if ((vr0.type == VR_RANGE
3276 || vr0.type == VR_ANTI_RANGE)
3277 && TREE_CODE (vr0.min) == INTEGER_CST
3278 && TREE_CODE (vr0.max) == INTEGER_CST
3279 && (!is_overflow_infinity (vr0.min)
3280 || (vr0.type == VR_RANGE
3281 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3282 && needs_overflow_infinity (outer_type)
3283 && supports_overflow_infinity (outer_type)))
3284 && (!is_overflow_infinity (vr0.max)
3285 || (vr0.type == VR_RANGE
3286 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3287 && needs_overflow_infinity (outer_type)
3288 && supports_overflow_infinity (outer_type)))
3289 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3290 || (vr0.type == VR_RANGE
3291 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3292 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3293 size_int (TYPE_PRECISION (outer_type)))))))
3295 tree new_min, new_max;
3296 if (is_overflow_infinity (vr0.min))
3297 new_min = negative_overflow_infinity (outer_type);
3298 else
3299 new_min = force_fit_type_double (outer_type,
3300 tree_to_double_int (vr0.min),
3301 0, false);
3302 if (is_overflow_infinity (vr0.max))
3303 new_max = positive_overflow_infinity (outer_type);
3304 else
3305 new_max = force_fit_type_double (outer_type,
3306 tree_to_double_int (vr0.max),
3307 0, false);
3308 set_and_canonicalize_value_range (vr, vr0.type,
3309 new_min, new_max, NULL);
3310 return;
3313 set_value_range_to_varying (vr);
3314 return;
3316 else if (code == ABS_EXPR)
3318 tree min, max;
3319 int cmp;
3321 /* Pass through vr0 in the easy cases. */
3322 if (TYPE_UNSIGNED (type)
3323 || value_range_nonnegative_p (&vr0))
3325 copy_value_range (vr, &vr0);
3326 return;
3329 /* For the remaining varying or symbolic ranges we can't do anything
3330 useful. */
3331 if (vr0.type == VR_VARYING
3332 || symbolic_range_p (&vr0))
3334 set_value_range_to_varying (vr);
3335 return;
3338 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3339 useful range. */
3340 if (!TYPE_OVERFLOW_UNDEFINED (type)
3341 && ((vr0.type == VR_RANGE
3342 && vrp_val_is_min (vr0.min))
3343 || (vr0.type == VR_ANTI_RANGE
3344 && !vrp_val_is_min (vr0.min))))
3346 set_value_range_to_varying (vr);
3347 return;
3350 /* ABS_EXPR may flip the range around, if the original range
3351 included negative values. */
3352 if (is_overflow_infinity (vr0.min))
3353 min = positive_overflow_infinity (type);
3354 else if (!vrp_val_is_min (vr0.min))
3355 min = fold_unary_to_constant (code, type, vr0.min);
3356 else if (!needs_overflow_infinity (type))
3357 min = TYPE_MAX_VALUE (type);
3358 else if (supports_overflow_infinity (type))
3359 min = positive_overflow_infinity (type);
3360 else
3362 set_value_range_to_varying (vr);
3363 return;
3366 if (is_overflow_infinity (vr0.max))
3367 max = positive_overflow_infinity (type);
3368 else if (!vrp_val_is_min (vr0.max))
3369 max = fold_unary_to_constant (code, type, vr0.max);
3370 else if (!needs_overflow_infinity (type))
3371 max = TYPE_MAX_VALUE (type);
3372 else if (supports_overflow_infinity (type)
3373 /* We shouldn't generate [+INF, +INF] as set_value_range
3374 doesn't like this and ICEs. */
3375 && !is_positive_overflow_infinity (min))
3376 max = positive_overflow_infinity (type);
3377 else
3379 set_value_range_to_varying (vr);
3380 return;
3383 cmp = compare_values (min, max);
3385 /* If a VR_ANTI_RANGEs contains zero, then we have
3386 ~[-INF, min(MIN, MAX)]. */
3387 if (vr0.type == VR_ANTI_RANGE)
3389 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3391 /* Take the lower of the two values. */
3392 if (cmp != 1)
3393 max = min;
3395 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3396 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3397 flag_wrapv is set and the original anti-range doesn't include
3398 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3399 if (TYPE_OVERFLOW_WRAPS (type))
3401 tree type_min_value = TYPE_MIN_VALUE (type);
3403 min = (vr0.min != type_min_value
3404 ? int_const_binop (PLUS_EXPR, type_min_value,
3405 integer_one_node)
3406 : type_min_value);
3408 else
3410 if (overflow_infinity_range_p (&vr0))
3411 min = negative_overflow_infinity (type);
3412 else
3413 min = TYPE_MIN_VALUE (type);
3416 else
3418 /* All else has failed, so create the range [0, INF], even for
3419 flag_wrapv since TYPE_MIN_VALUE is in the original
3420 anti-range. */
3421 vr0.type = VR_RANGE;
3422 min = build_int_cst (type, 0);
3423 if (needs_overflow_infinity (type))
3425 if (supports_overflow_infinity (type))
3426 max = positive_overflow_infinity (type);
3427 else
3429 set_value_range_to_varying (vr);
3430 return;
3433 else
3434 max = TYPE_MAX_VALUE (type);
3438 /* If the range contains zero then we know that the minimum value in the
3439 range will be zero. */
3440 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3442 if (cmp == 1)
3443 max = min;
3444 min = build_int_cst (type, 0);
3446 else
3448 /* If the range was reversed, swap MIN and MAX. */
3449 if (cmp == 1)
3451 tree t = min;
3452 min = max;
3453 max = t;
3457 cmp = compare_values (min, max);
3458 if (cmp == -2 || cmp == 1)
3460 /* If the new range has its limits swapped around (MIN > MAX),
3461 then the operation caused one of them to wrap around, mark
3462 the new range VARYING. */
3463 set_value_range_to_varying (vr);
3465 else
3466 set_value_range (vr, vr0.type, min, max, NULL);
3467 return;
3470 /* For unhandled operations fall back to varying. */
3471 set_value_range_to_varying (vr);
3472 return;
3476 /* Extract range information from a unary expression CODE OP0 based on
3477 the range of its operand with resulting type TYPE.
3478 The resulting range is stored in *VR. */
3480 static void
3481 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3482 tree type, tree op0)
3484 value_range_t vr0 = VR_INITIALIZER;
3486 /* Get value ranges for the operand. For constant operands, create
3487 a new value range with the operand to simplify processing. */
3488 if (TREE_CODE (op0) == SSA_NAME)
3489 vr0 = *(get_value_range (op0));
3490 else if (is_gimple_min_invariant (op0))
3491 set_value_range_to_value (&vr0, op0, NULL);
3492 else
3493 set_value_range_to_varying (&vr0);
3495 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3499 /* Extract range information from a conditional expression STMT based on
3500 the ranges of each of its operands and the expression code. */
3502 static void
3503 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3505 tree op0, op1;
3506 value_range_t vr0 = VR_INITIALIZER;
3507 value_range_t vr1 = VR_INITIALIZER;
3509 /* Get value ranges for each operand. For constant operands, create
3510 a new value range with the operand to simplify processing. */
3511 op0 = gimple_assign_rhs2 (stmt);
3512 if (TREE_CODE (op0) == SSA_NAME)
3513 vr0 = *(get_value_range (op0));
3514 else if (is_gimple_min_invariant (op0))
3515 set_value_range_to_value (&vr0, op0, NULL);
3516 else
3517 set_value_range_to_varying (&vr0);
3519 op1 = gimple_assign_rhs3 (stmt);
3520 if (TREE_CODE (op1) == SSA_NAME)
3521 vr1 = *(get_value_range (op1));
3522 else if (is_gimple_min_invariant (op1))
3523 set_value_range_to_value (&vr1, op1, NULL);
3524 else
3525 set_value_range_to_varying (&vr1);
3527 /* The resulting value range is the union of the operand ranges */
3528 copy_value_range (vr, &vr0);
3529 vrp_meet (vr, &vr1);
3533 /* Extract range information from a comparison expression EXPR based
3534 on the range of its operand and the expression code. */
3536 static void
3537 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3538 tree type, tree op0, tree op1)
3540 bool sop = false;
3541 tree val;
3543 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3544 NULL);
3546 /* A disadvantage of using a special infinity as an overflow
3547 representation is that we lose the ability to record overflow
3548 when we don't have an infinity. So we have to ignore a result
3549 which relies on overflow. */
3551 if (val && !is_overflow_infinity (val) && !sop)
3553 /* Since this expression was found on the RHS of an assignment,
3554 its type may be different from _Bool. Convert VAL to EXPR's
3555 type. */
3556 val = fold_convert (type, val);
3557 if (is_gimple_min_invariant (val))
3558 set_value_range_to_value (vr, val, vr->equiv);
3559 else
3560 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3562 else
3563 /* The result of a comparison is always true or false. */
3564 set_value_range_to_truthvalue (vr, type);
3567 /* Try to derive a nonnegative or nonzero range out of STMT relying
3568 primarily on generic routines in fold in conjunction with range data.
3569 Store the result in *VR */
3571 static void
3572 extract_range_basic (value_range_t *vr, gimple stmt)
3574 bool sop = false;
3575 tree type = gimple_expr_type (stmt);
3577 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3579 tree fndecl = gimple_call_fndecl (stmt), arg;
3580 int mini, maxi, zerov = 0, prec;
3582 switch (DECL_FUNCTION_CODE (fndecl))
3584 case BUILT_IN_CONSTANT_P:
3585 /* If the call is __builtin_constant_p and the argument is a
3586 function parameter resolve it to false. This avoids bogus
3587 array bound warnings.
3588 ??? We could do this as early as inlining is finished. */
3589 arg = gimple_call_arg (stmt, 0);
3590 if (TREE_CODE (arg) == SSA_NAME
3591 && SSA_NAME_IS_DEFAULT_DEF (arg)
3592 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3594 set_value_range_to_null (vr, type);
3595 return;
3597 break;
3598 /* Both __builtin_ffs* and __builtin_popcount return
3599 [0, prec]. */
3600 CASE_INT_FN (BUILT_IN_FFS):
3601 CASE_INT_FN (BUILT_IN_POPCOUNT):
3602 arg = gimple_call_arg (stmt, 0);
3603 prec = TYPE_PRECISION (TREE_TYPE (arg));
3604 mini = 0;
3605 maxi = prec;
3606 if (TREE_CODE (arg) == SSA_NAME)
3608 value_range_t *vr0 = get_value_range (arg);
3609 /* If arg is non-zero, then ffs or popcount
3610 are non-zero. */
3611 if (((vr0->type == VR_RANGE
3612 && integer_nonzerop (vr0->min))
3613 || (vr0->type == VR_ANTI_RANGE
3614 && integer_zerop (vr0->min)))
3615 && !TREE_OVERFLOW (vr0->min))
3616 mini = 1;
3617 /* If some high bits are known to be zero,
3618 we can decrease the maximum. */
3619 if (vr0->type == VR_RANGE
3620 && TREE_CODE (vr0->max) == INTEGER_CST
3621 && !TREE_OVERFLOW (vr0->max))
3622 maxi = tree_floor_log2 (vr0->max) + 1;
3624 goto bitop_builtin;
3625 /* __builtin_parity* returns [0, 1]. */
3626 CASE_INT_FN (BUILT_IN_PARITY):
3627 mini = 0;
3628 maxi = 1;
3629 goto bitop_builtin;
3630 /* __builtin_c[lt]z* return [0, prec-1], except for
3631 when the argument is 0, but that is undefined behavior.
3632 On many targets where the CLZ RTL or optab value is defined
3633 for 0 the value is prec, so include that in the range
3634 by default. */
3635 CASE_INT_FN (BUILT_IN_CLZ):
3636 arg = gimple_call_arg (stmt, 0);
3637 prec = TYPE_PRECISION (TREE_TYPE (arg));
3638 mini = 0;
3639 maxi = prec;
3640 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3641 != CODE_FOR_nothing
3642 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3643 zerov)
3644 /* Handle only the single common value. */
3645 && zerov != prec)
3646 /* Magic value to give up, unless vr0 proves
3647 arg is non-zero. */
3648 mini = -2;
3649 if (TREE_CODE (arg) == SSA_NAME)
3651 value_range_t *vr0 = get_value_range (arg);
3652 /* From clz of VR_RANGE minimum we can compute
3653 result maximum. */
3654 if (vr0->type == VR_RANGE
3655 && TREE_CODE (vr0->min) == INTEGER_CST
3656 && !TREE_OVERFLOW (vr0->min))
3658 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3659 if (maxi != prec)
3660 mini = 0;
3662 else if (vr0->type == VR_ANTI_RANGE
3663 && integer_zerop (vr0->min)
3664 && !TREE_OVERFLOW (vr0->min))
3666 maxi = prec - 1;
3667 mini = 0;
3669 if (mini == -2)
3670 break;
3671 /* From clz of VR_RANGE maximum we can compute
3672 result minimum. */
3673 if (vr0->type == VR_RANGE
3674 && TREE_CODE (vr0->max) == INTEGER_CST
3675 && !TREE_OVERFLOW (vr0->max))
3677 mini = prec - 1 - tree_floor_log2 (vr0->max);
3678 if (mini == prec)
3679 break;
3682 if (mini == -2)
3683 break;
3684 goto bitop_builtin;
3685 /* __builtin_ctz* return [0, prec-1], except for
3686 when the argument is 0, but that is undefined behavior.
3687 If there is a ctz optab for this mode and
3688 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3689 otherwise just assume 0 won't be seen. */
3690 CASE_INT_FN (BUILT_IN_CTZ):
3691 arg = gimple_call_arg (stmt, 0);
3692 prec = TYPE_PRECISION (TREE_TYPE (arg));
3693 mini = 0;
3694 maxi = prec - 1;
3695 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3696 != CODE_FOR_nothing
3697 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3698 zerov))
3700 /* Handle only the two common values. */
3701 if (zerov == -1)
3702 mini = -1;
3703 else if (zerov == prec)
3704 maxi = prec;
3705 else
3706 /* Magic value to give up, unless vr0 proves
3707 arg is non-zero. */
3708 mini = -2;
3710 if (TREE_CODE (arg) == SSA_NAME)
3712 value_range_t *vr0 = get_value_range (arg);
3713 /* If arg is non-zero, then use [0, prec - 1]. */
3714 if (((vr0->type == VR_RANGE
3715 && integer_nonzerop (vr0->min))
3716 || (vr0->type == VR_ANTI_RANGE
3717 && integer_zerop (vr0->min)))
3718 && !TREE_OVERFLOW (vr0->min))
3720 mini = 0;
3721 maxi = prec - 1;
3723 /* If some high bits are known to be zero,
3724 we can decrease the result maximum. */
3725 if (vr0->type == VR_RANGE
3726 && TREE_CODE (vr0->max) == INTEGER_CST
3727 && !TREE_OVERFLOW (vr0->max))
3729 maxi = tree_floor_log2 (vr0->max);
3730 /* For vr0 [0, 0] give up. */
3731 if (maxi == -1)
3732 break;
3735 if (mini == -2)
3736 break;
3737 goto bitop_builtin;
3738 /* __builtin_clrsb* returns [0, prec-1]. */
3739 CASE_INT_FN (BUILT_IN_CLRSB):
3740 arg = gimple_call_arg (stmt, 0);
3741 prec = TYPE_PRECISION (TREE_TYPE (arg));
3742 mini = 0;
3743 maxi = prec - 1;
3744 goto bitop_builtin;
3745 bitop_builtin:
3746 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3747 build_int_cst (type, maxi), NULL);
3748 return;
3749 default:
3750 break;
3753 if (INTEGRAL_TYPE_P (type)
3754 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3755 set_value_range_to_nonnegative (vr, type,
3756 sop || stmt_overflow_infinity (stmt));
3757 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3758 && !sop)
3759 set_value_range_to_nonnull (vr, type);
3760 else
3761 set_value_range_to_varying (vr);
3765 /* Try to compute a useful range out of assignment STMT and store it
3766 in *VR. */
3768 static void
3769 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3771 enum tree_code code = gimple_assign_rhs_code (stmt);
3773 if (code == ASSERT_EXPR)
3774 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3775 else if (code == SSA_NAME)
3776 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3777 else if (TREE_CODE_CLASS (code) == tcc_binary)
3778 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3779 gimple_expr_type (stmt),
3780 gimple_assign_rhs1 (stmt),
3781 gimple_assign_rhs2 (stmt));
3782 else if (TREE_CODE_CLASS (code) == tcc_unary)
3783 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3784 gimple_expr_type (stmt),
3785 gimple_assign_rhs1 (stmt));
3786 else if (code == COND_EXPR)
3787 extract_range_from_cond_expr (vr, stmt);
3788 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3789 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3790 gimple_expr_type (stmt),
3791 gimple_assign_rhs1 (stmt),
3792 gimple_assign_rhs2 (stmt));
3793 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3794 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3795 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3796 else
3797 set_value_range_to_varying (vr);
3799 if (vr->type == VR_VARYING)
3800 extract_range_basic (vr, stmt);
3803 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3804 would be profitable to adjust VR using scalar evolution information
3805 for VAR. If so, update VR with the new limits. */
3807 static void
3808 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3809 gimple stmt, tree var)
3811 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3812 enum ev_direction dir;
3814 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3815 better opportunities than a regular range, but I'm not sure. */
3816 if (vr->type == VR_ANTI_RANGE)
3817 return;
3819 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3821 /* Like in PR19590, scev can return a constant function. */
3822 if (is_gimple_min_invariant (chrec))
3824 set_value_range_to_value (vr, chrec, vr->equiv);
3825 return;
3828 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3829 return;
3831 init = initial_condition_in_loop_num (chrec, loop->num);
3832 tem = op_with_constant_singleton_value_range (init);
3833 if (tem)
3834 init = tem;
3835 step = evolution_part_in_loop_num (chrec, loop->num);
3836 tem = op_with_constant_singleton_value_range (step);
3837 if (tem)
3838 step = tem;
3840 /* If STEP is symbolic, we can't know whether INIT will be the
3841 minimum or maximum value in the range. Also, unless INIT is
3842 a simple expression, compare_values and possibly other functions
3843 in tree-vrp won't be able to handle it. */
3844 if (step == NULL_TREE
3845 || !is_gimple_min_invariant (step)
3846 || !valid_value_p (init))
3847 return;
3849 dir = scev_direction (chrec);
3850 if (/* Do not adjust ranges if we do not know whether the iv increases
3851 or decreases, ... */
3852 dir == EV_DIR_UNKNOWN
3853 /* ... or if it may wrap. */
3854 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3855 true))
3856 return;
3858 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3859 negative_overflow_infinity and positive_overflow_infinity,
3860 because we have concluded that the loop probably does not
3861 wrap. */
3863 type = TREE_TYPE (var);
3864 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3865 tmin = lower_bound_in_type (type, type);
3866 else
3867 tmin = TYPE_MIN_VALUE (type);
3868 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3869 tmax = upper_bound_in_type (type, type);
3870 else
3871 tmax = TYPE_MAX_VALUE (type);
3873 /* Try to use estimated number of iterations for the loop to constrain the
3874 final value in the evolution. */
3875 if (TREE_CODE (step) == INTEGER_CST
3876 && is_gimple_val (init)
3877 && (TREE_CODE (init) != SSA_NAME
3878 || get_value_range (init)->type == VR_RANGE))
3880 double_int nit;
3882 /* We are only entering here for loop header PHI nodes, so using
3883 the number of latch executions is the correct thing to use. */
3884 if (max_loop_iterations (loop, &nit))
3886 value_range_t maxvr = VR_INITIALIZER;
3887 double_int dtmp;
3888 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3889 bool overflow = false;
3891 dtmp = tree_to_double_int (step)
3892 .mul_with_sign (nit, unsigned_p, &overflow);
3893 /* If the multiplication overflowed we can't do a meaningful
3894 adjustment. Likewise if the result doesn't fit in the type
3895 of the induction variable. For a signed type we have to
3896 check whether the result has the expected signedness which
3897 is that of the step as number of iterations is unsigned. */
3898 if (!overflow
3899 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3900 && (unsigned_p
3901 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3903 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3904 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3905 TREE_TYPE (init), init, tem);
3906 /* Likewise if the addition did. */
3907 if (maxvr.type == VR_RANGE)
3909 tmin = maxvr.min;
3910 tmax = maxvr.max;
3916 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3918 min = tmin;
3919 max = tmax;
3921 /* For VARYING or UNDEFINED ranges, just about anything we get
3922 from scalar evolutions should be better. */
3924 if (dir == EV_DIR_DECREASES)
3925 max = init;
3926 else
3927 min = init;
3929 /* If we would create an invalid range, then just assume we
3930 know absolutely nothing. This may be over-conservative,
3931 but it's clearly safe, and should happen only in unreachable
3932 parts of code, or for invalid programs. */
3933 if (compare_values (min, max) == 1)
3934 return;
3936 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3938 else if (vr->type == VR_RANGE)
3940 min = vr->min;
3941 max = vr->max;
3943 if (dir == EV_DIR_DECREASES)
3945 /* INIT is the maximum value. If INIT is lower than VR->MAX
3946 but no smaller than VR->MIN, set VR->MAX to INIT. */
3947 if (compare_values (init, max) == -1)
3948 max = init;
3950 /* According to the loop information, the variable does not
3951 overflow. If we think it does, probably because of an
3952 overflow due to arithmetic on a different INF value,
3953 reset now. */
3954 if (is_negative_overflow_infinity (min)
3955 || compare_values (min, tmin) == -1)
3956 min = tmin;
3959 else
3961 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3962 if (compare_values (init, min) == 1)
3963 min = init;
3965 if (is_positive_overflow_infinity (max)
3966 || compare_values (tmax, max) == -1)
3967 max = tmax;
3970 /* If we just created an invalid range with the minimum
3971 greater than the maximum, we fail conservatively.
3972 This should happen only in unreachable
3973 parts of code, or for invalid programs. */
3974 if (compare_values (min, max) == 1)
3975 return;
3977 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3981 /* Return true if VAR may overflow at STMT. This checks any available
3982 loop information to see if we can determine that VAR does not
3983 overflow. */
3985 static bool
3986 vrp_var_may_overflow (tree var, gimple stmt)
3988 struct loop *l;
3989 tree chrec, init, step;
3991 if (current_loops == NULL)
3992 return true;
3994 l = loop_containing_stmt (stmt);
3995 if (l == NULL
3996 || !loop_outer (l))
3997 return true;
3999 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
4000 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4001 return true;
4003 init = initial_condition_in_loop_num (chrec, l->num);
4004 step = evolution_part_in_loop_num (chrec, l->num);
4006 if (step == NULL_TREE
4007 || !is_gimple_min_invariant (step)
4008 || !valid_value_p (init))
4009 return true;
4011 /* If we get here, we know something useful about VAR based on the
4012 loop information. If it wraps, it may overflow. */
4014 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4015 true))
4016 return true;
4018 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
4020 print_generic_expr (dump_file, var, 0);
4021 fprintf (dump_file, ": loop information indicates does not overflow\n");
4024 return false;
4028 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4030 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4031 all the values in the ranges.
4033 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4035 - Return NULL_TREE if it is not always possible to determine the
4036 value of the comparison.
4038 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4039 overflow infinity was used in the test. */
4042 static tree
4043 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
4044 bool *strict_overflow_p)
4046 /* VARYING or UNDEFINED ranges cannot be compared. */
4047 if (vr0->type == VR_VARYING
4048 || vr0->type == VR_UNDEFINED
4049 || vr1->type == VR_VARYING
4050 || vr1->type == VR_UNDEFINED)
4051 return NULL_TREE;
4053 /* Anti-ranges need to be handled separately. */
4054 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4056 /* If both are anti-ranges, then we cannot compute any
4057 comparison. */
4058 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4059 return NULL_TREE;
4061 /* These comparisons are never statically computable. */
4062 if (comp == GT_EXPR
4063 || comp == GE_EXPR
4064 || comp == LT_EXPR
4065 || comp == LE_EXPR)
4066 return NULL_TREE;
4068 /* Equality can be computed only between a range and an
4069 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4070 if (vr0->type == VR_RANGE)
4072 /* To simplify processing, make VR0 the anti-range. */
4073 value_range_t *tmp = vr0;
4074 vr0 = vr1;
4075 vr1 = tmp;
4078 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4080 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4081 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4082 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4084 return NULL_TREE;
4087 if (!usable_range_p (vr0, strict_overflow_p)
4088 || !usable_range_p (vr1, strict_overflow_p))
4089 return NULL_TREE;
4091 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4092 operands around and change the comparison code. */
4093 if (comp == GT_EXPR || comp == GE_EXPR)
4095 value_range_t *tmp;
4096 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4097 tmp = vr0;
4098 vr0 = vr1;
4099 vr1 = tmp;
4102 if (comp == EQ_EXPR)
4104 /* Equality may only be computed if both ranges represent
4105 exactly one value. */
4106 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4107 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4109 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4110 strict_overflow_p);
4111 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4112 strict_overflow_p);
4113 if (cmp_min == 0 && cmp_max == 0)
4114 return boolean_true_node;
4115 else if (cmp_min != -2 && cmp_max != -2)
4116 return boolean_false_node;
4118 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4119 else if (compare_values_warnv (vr0->min, vr1->max,
4120 strict_overflow_p) == 1
4121 || compare_values_warnv (vr1->min, vr0->max,
4122 strict_overflow_p) == 1)
4123 return boolean_false_node;
4125 return NULL_TREE;
4127 else if (comp == NE_EXPR)
4129 int cmp1, cmp2;
4131 /* If VR0 is completely to the left or completely to the right
4132 of VR1, they are always different. Notice that we need to
4133 make sure that both comparisons yield similar results to
4134 avoid comparing values that cannot be compared at
4135 compile-time. */
4136 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4137 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4138 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4139 return boolean_true_node;
4141 /* If VR0 and VR1 represent a single value and are identical,
4142 return false. */
4143 else if (compare_values_warnv (vr0->min, vr0->max,
4144 strict_overflow_p) == 0
4145 && compare_values_warnv (vr1->min, vr1->max,
4146 strict_overflow_p) == 0
4147 && compare_values_warnv (vr0->min, vr1->min,
4148 strict_overflow_p) == 0
4149 && compare_values_warnv (vr0->max, vr1->max,
4150 strict_overflow_p) == 0)
4151 return boolean_false_node;
4153 /* Otherwise, they may or may not be different. */
4154 else
4155 return NULL_TREE;
4157 else if (comp == LT_EXPR || comp == LE_EXPR)
4159 int tst;
4161 /* If VR0 is to the left of VR1, return true. */
4162 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4163 if ((comp == LT_EXPR && tst == -1)
4164 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4166 if (overflow_infinity_range_p (vr0)
4167 || overflow_infinity_range_p (vr1))
4168 *strict_overflow_p = true;
4169 return boolean_true_node;
4172 /* If VR0 is to the right of VR1, return false. */
4173 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4174 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4175 || (comp == LE_EXPR && tst == 1))
4177 if (overflow_infinity_range_p (vr0)
4178 || overflow_infinity_range_p (vr1))
4179 *strict_overflow_p = true;
4180 return boolean_false_node;
4183 /* Otherwise, we don't know. */
4184 return NULL_TREE;
4187 gcc_unreachable ();
4191 /* Given a value range VR, a value VAL and a comparison code COMP, return
4192 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4193 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4194 always returns false. Return NULL_TREE if it is not always
4195 possible to determine the value of the comparison. Also set
4196 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4197 infinity was used in the test. */
4199 static tree
4200 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4201 bool *strict_overflow_p)
4203 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4204 return NULL_TREE;
4206 /* Anti-ranges need to be handled separately. */
4207 if (vr->type == VR_ANTI_RANGE)
4209 /* For anti-ranges, the only predicates that we can compute at
4210 compile time are equality and inequality. */
4211 if (comp == GT_EXPR
4212 || comp == GE_EXPR
4213 || comp == LT_EXPR
4214 || comp == LE_EXPR)
4215 return NULL_TREE;
4217 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4218 if (value_inside_range (val, vr->min, vr->max) == 1)
4219 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4221 return NULL_TREE;
4224 if (!usable_range_p (vr, strict_overflow_p))
4225 return NULL_TREE;
4227 if (comp == EQ_EXPR)
4229 /* EQ_EXPR may only be computed if VR represents exactly
4230 one value. */
4231 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4233 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4234 if (cmp == 0)
4235 return boolean_true_node;
4236 else if (cmp == -1 || cmp == 1 || cmp == 2)
4237 return boolean_false_node;
4239 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4240 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4241 return boolean_false_node;
4243 return NULL_TREE;
4245 else if (comp == NE_EXPR)
4247 /* If VAL is not inside VR, then they are always different. */
4248 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4249 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4250 return boolean_true_node;
4252 /* If VR represents exactly one value equal to VAL, then return
4253 false. */
4254 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4255 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4256 return boolean_false_node;
4258 /* Otherwise, they may or may not be different. */
4259 return NULL_TREE;
4261 else if (comp == LT_EXPR || comp == LE_EXPR)
4263 int tst;
4265 /* If VR is to the left of VAL, return true. */
4266 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4267 if ((comp == LT_EXPR && tst == -1)
4268 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4270 if (overflow_infinity_range_p (vr))
4271 *strict_overflow_p = true;
4272 return boolean_true_node;
4275 /* If VR is to the right of VAL, return false. */
4276 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4277 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4278 || (comp == LE_EXPR && tst == 1))
4280 if (overflow_infinity_range_p (vr))
4281 *strict_overflow_p = true;
4282 return boolean_false_node;
4285 /* Otherwise, we don't know. */
4286 return NULL_TREE;
4288 else if (comp == GT_EXPR || comp == GE_EXPR)
4290 int tst;
4292 /* If VR is to the right of VAL, return true. */
4293 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4294 if ((comp == GT_EXPR && tst == 1)
4295 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4297 if (overflow_infinity_range_p (vr))
4298 *strict_overflow_p = true;
4299 return boolean_true_node;
4302 /* If VR is to the left of VAL, return false. */
4303 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4304 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4305 || (comp == GE_EXPR && tst == -1))
4307 if (overflow_infinity_range_p (vr))
4308 *strict_overflow_p = true;
4309 return boolean_false_node;
4312 /* Otherwise, we don't know. */
4313 return NULL_TREE;
4316 gcc_unreachable ();
4320 /* Debugging dumps. */
4322 void dump_value_range (FILE *, value_range_t *);
4323 void debug_value_range (value_range_t *);
4324 void dump_all_value_ranges (FILE *);
4325 void debug_all_value_ranges (void);
4326 void dump_vr_equiv (FILE *, bitmap);
4327 void debug_vr_equiv (bitmap);
4330 /* Dump value range VR to FILE. */
4332 void
4333 dump_value_range (FILE *file, value_range_t *vr)
4335 if (vr == NULL)
4336 fprintf (file, "[]");
4337 else if (vr->type == VR_UNDEFINED)
4338 fprintf (file, "UNDEFINED");
4339 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4341 tree type = TREE_TYPE (vr->min);
4343 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4345 if (is_negative_overflow_infinity (vr->min))
4346 fprintf (file, "-INF(OVF)");
4347 else if (INTEGRAL_TYPE_P (type)
4348 && !TYPE_UNSIGNED (type)
4349 && vrp_val_is_min (vr->min))
4350 fprintf (file, "-INF");
4351 else
4352 print_generic_expr (file, vr->min, 0);
4354 fprintf (file, ", ");
4356 if (is_positive_overflow_infinity (vr->max))
4357 fprintf (file, "+INF(OVF)");
4358 else if (INTEGRAL_TYPE_P (type)
4359 && vrp_val_is_max (vr->max))
4360 fprintf (file, "+INF");
4361 else
4362 print_generic_expr (file, vr->max, 0);
4364 fprintf (file, "]");
4366 if (vr->equiv)
4368 bitmap_iterator bi;
4369 unsigned i, c = 0;
4371 fprintf (file, " EQUIVALENCES: { ");
4373 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4375 print_generic_expr (file, ssa_name (i), 0);
4376 fprintf (file, " ");
4377 c++;
4380 fprintf (file, "} (%u elements)", c);
4383 else if (vr->type == VR_VARYING)
4384 fprintf (file, "VARYING");
4385 else
4386 fprintf (file, "INVALID RANGE");
4390 /* Dump value range VR to stderr. */
4392 DEBUG_FUNCTION void
4393 debug_value_range (value_range_t *vr)
4395 dump_value_range (stderr, vr);
4396 fprintf (stderr, "\n");
4400 /* Dump value ranges of all SSA_NAMEs to FILE. */
4402 void
4403 dump_all_value_ranges (FILE *file)
4405 size_t i;
4407 for (i = 0; i < num_vr_values; i++)
4409 if (vr_value[i])
4411 print_generic_expr (file, ssa_name (i), 0);
4412 fprintf (file, ": ");
4413 dump_value_range (file, vr_value[i]);
4414 fprintf (file, "\n");
4418 fprintf (file, "\n");
4422 /* Dump all value ranges to stderr. */
4424 DEBUG_FUNCTION void
4425 debug_all_value_ranges (void)
4427 dump_all_value_ranges (stderr);
4431 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4432 create a new SSA name N and return the assertion assignment
4433 'V = ASSERT_EXPR <V, V OP W>'. */
4435 static gimple
4436 build_assert_expr_for (tree cond, tree v)
4438 tree a;
4439 gimple assertion;
4441 gcc_assert (TREE_CODE (v) == SSA_NAME
4442 && COMPARISON_CLASS_P (cond));
4444 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4445 assertion = gimple_build_assign (NULL_TREE, a);
4447 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4448 operand of the ASSERT_EXPR. Create it so the new name and the old one
4449 are registered in the replacement table so that we can fix the SSA web
4450 after adding all the ASSERT_EXPRs. */
4451 create_new_def_for (v, assertion, NULL);
4453 return assertion;
4457 /* Return false if EXPR is a predicate expression involving floating
4458 point values. */
4460 static inline bool
4461 fp_predicate (gimple stmt)
4463 GIMPLE_CHECK (stmt, GIMPLE_COND);
4465 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4469 /* If OP can be inferred to be non-zero after STMT executes, return true. */
4471 static bool
4472 infer_nonnull_range (gimple stmt, tree op)
4474 /* We can only assume that a pointer dereference will yield
4475 non-NULL if -fdelete-null-pointer-checks is enabled. */
4476 if (!flag_delete_null_pointer_checks
4477 || !POINTER_TYPE_P (TREE_TYPE (op))
4478 || gimple_code (stmt) == GIMPLE_ASM)
4479 return false;
4481 unsigned num_uses, num_loads, num_stores;
4483 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4484 if (num_loads + num_stores > 0)
4485 return true;
4487 if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt))
4489 tree fntype = gimple_call_fntype (stmt);
4490 tree attrs = TYPE_ATTRIBUTES (fntype);
4491 for (; attrs; attrs = TREE_CHAIN (attrs))
4493 attrs = lookup_attribute ("nonnull", attrs);
4495 /* If "nonnull" wasn't specified, we know nothing about
4496 the argument. */
4497 if (attrs == NULL_TREE)
4498 return false;
4500 /* If "nonnull" applies to all the arguments, then ARG
4501 is non-null. */
4502 if (TREE_VALUE (attrs) == NULL_TREE)
4503 return true;
4505 /* Now see if op appears in the nonnull list. */
4506 for (tree t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
4508 int idx = TREE_INT_CST_LOW (TREE_VALUE (t)) - 1;
4509 tree arg = gimple_call_arg (stmt, idx);
4510 if (op == arg)
4511 return true;
4516 return false;
4519 /* If the range of values taken by OP can be inferred after STMT executes,
4520 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4521 describes the inferred range. Return true if a range could be
4522 inferred. */
4524 static bool
4525 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4527 *val_p = NULL_TREE;
4528 *comp_code_p = ERROR_MARK;
4530 /* Do not attempt to infer anything in names that flow through
4531 abnormal edges. */
4532 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4533 return false;
4535 /* Similarly, don't infer anything from statements that may throw
4536 exceptions. ??? Relax this requirement? */
4537 if (stmt_could_throw_p (stmt))
4538 return false;
4540 /* If STMT is the last statement of a basic block with no
4541 successors, there is no point inferring anything about any of its
4542 operands. We would not be able to find a proper insertion point
4543 for the assertion, anyway. */
4544 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4545 return false;
4547 if (infer_nonnull_range (stmt, op))
4549 *val_p = build_int_cst (TREE_TYPE (op), 0);
4550 *comp_code_p = NE_EXPR;
4551 return true;
4554 return false;
4558 void dump_asserts_for (FILE *, tree);
4559 void debug_asserts_for (tree);
4560 void dump_all_asserts (FILE *);
4561 void debug_all_asserts (void);
4563 /* Dump all the registered assertions for NAME to FILE. */
4565 void
4566 dump_asserts_for (FILE *file, tree name)
4568 assert_locus_t loc;
4570 fprintf (file, "Assertions to be inserted for ");
4571 print_generic_expr (file, name, 0);
4572 fprintf (file, "\n");
4574 loc = asserts_for[SSA_NAME_VERSION (name)];
4575 while (loc)
4577 fprintf (file, "\t");
4578 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4579 fprintf (file, "\n\tBB #%d", loc->bb->index);
4580 if (loc->e)
4582 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4583 loc->e->dest->index);
4584 dump_edge_info (file, loc->e, dump_flags, 0);
4586 fprintf (file, "\n\tPREDICATE: ");
4587 print_generic_expr (file, name, 0);
4588 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4589 print_generic_expr (file, loc->val, 0);
4590 fprintf (file, "\n\n");
4591 loc = loc->next;
4594 fprintf (file, "\n");
4598 /* Dump all the registered assertions for NAME to stderr. */
4600 DEBUG_FUNCTION void
4601 debug_asserts_for (tree name)
4603 dump_asserts_for (stderr, name);
4607 /* Dump all the registered assertions for all the names to FILE. */
4609 void
4610 dump_all_asserts (FILE *file)
4612 unsigned i;
4613 bitmap_iterator bi;
4615 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4616 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4617 dump_asserts_for (file, ssa_name (i));
4618 fprintf (file, "\n");
4622 /* Dump all the registered assertions for all the names to stderr. */
4624 DEBUG_FUNCTION void
4625 debug_all_asserts (void)
4627 dump_all_asserts (stderr);
4631 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4632 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4633 E->DEST, then register this location as a possible insertion point
4634 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4636 BB, E and SI provide the exact insertion point for the new
4637 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4638 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4639 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4640 must not be NULL. */
4642 static void
4643 register_new_assert_for (tree name, tree expr,
4644 enum tree_code comp_code,
4645 tree val,
4646 basic_block bb,
4647 edge e,
4648 gimple_stmt_iterator si)
4650 assert_locus_t n, loc, last_loc;
4651 basic_block dest_bb;
4653 gcc_checking_assert (bb == NULL || e == NULL);
4655 if (e == NULL)
4656 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4657 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4659 /* Never build an assert comparing against an integer constant with
4660 TREE_OVERFLOW set. This confuses our undefined overflow warning
4661 machinery. */
4662 if (TREE_CODE (val) == INTEGER_CST
4663 && TREE_OVERFLOW (val))
4664 val = build_int_cst_wide (TREE_TYPE (val),
4665 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4667 /* The new assertion A will be inserted at BB or E. We need to
4668 determine if the new location is dominated by a previously
4669 registered location for A. If we are doing an edge insertion,
4670 assume that A will be inserted at E->DEST. Note that this is not
4671 necessarily true.
4673 If E is a critical edge, it will be split. But even if E is
4674 split, the new block will dominate the same set of blocks that
4675 E->DEST dominates.
4677 The reverse, however, is not true, blocks dominated by E->DEST
4678 will not be dominated by the new block created to split E. So,
4679 if the insertion location is on a critical edge, we will not use
4680 the new location to move another assertion previously registered
4681 at a block dominated by E->DEST. */
4682 dest_bb = (bb) ? bb : e->dest;
4684 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4685 VAL at a block dominating DEST_BB, then we don't need to insert a new
4686 one. Similarly, if the same assertion already exists at a block
4687 dominated by DEST_BB and the new location is not on a critical
4688 edge, then update the existing location for the assertion (i.e.,
4689 move the assertion up in the dominance tree).
4691 Note, this is implemented as a simple linked list because there
4692 should not be more than a handful of assertions registered per
4693 name. If this becomes a performance problem, a table hashed by
4694 COMP_CODE and VAL could be implemented. */
4695 loc = asserts_for[SSA_NAME_VERSION (name)];
4696 last_loc = loc;
4697 while (loc)
4699 if (loc->comp_code == comp_code
4700 && (loc->val == val
4701 || operand_equal_p (loc->val, val, 0))
4702 && (loc->expr == expr
4703 || operand_equal_p (loc->expr, expr, 0)))
4705 /* If E is not a critical edge and DEST_BB
4706 dominates the existing location for the assertion, move
4707 the assertion up in the dominance tree by updating its
4708 location information. */
4709 if ((e == NULL || !EDGE_CRITICAL_P (e))
4710 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4712 loc->bb = dest_bb;
4713 loc->e = e;
4714 loc->si = si;
4715 return;
4719 /* Update the last node of the list and move to the next one. */
4720 last_loc = loc;
4721 loc = loc->next;
4724 /* If we didn't find an assertion already registered for
4725 NAME COMP_CODE VAL, add a new one at the end of the list of
4726 assertions associated with NAME. */
4727 n = XNEW (struct assert_locus_d);
4728 n->bb = dest_bb;
4729 n->e = e;
4730 n->si = si;
4731 n->comp_code = comp_code;
4732 n->val = val;
4733 n->expr = expr;
4734 n->next = NULL;
4736 if (last_loc)
4737 last_loc->next = n;
4738 else
4739 asserts_for[SSA_NAME_VERSION (name)] = n;
4741 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4744 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4745 Extract a suitable test code and value and store them into *CODE_P and
4746 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4748 If no extraction was possible, return FALSE, otherwise return TRUE.
4750 If INVERT is true, then we invert the result stored into *CODE_P. */
4752 static bool
4753 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4754 tree cond_op0, tree cond_op1,
4755 bool invert, enum tree_code *code_p,
4756 tree *val_p)
4758 enum tree_code comp_code;
4759 tree val;
4761 /* Otherwise, we have a comparison of the form NAME COMP VAL
4762 or VAL COMP NAME. */
4763 if (name == cond_op1)
4765 /* If the predicate is of the form VAL COMP NAME, flip
4766 COMP around because we need to register NAME as the
4767 first operand in the predicate. */
4768 comp_code = swap_tree_comparison (cond_code);
4769 val = cond_op0;
4771 else
4773 /* The comparison is of the form NAME COMP VAL, so the
4774 comparison code remains unchanged. */
4775 comp_code = cond_code;
4776 val = cond_op1;
4779 /* Invert the comparison code as necessary. */
4780 if (invert)
4781 comp_code = invert_tree_comparison (comp_code, 0);
4783 /* VRP does not handle float types. */
4784 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4785 return false;
4787 /* Do not register always-false predicates.
4788 FIXME: this works around a limitation in fold() when dealing with
4789 enumerations. Given 'enum { N1, N2 } x;', fold will not
4790 fold 'if (x > N2)' to 'if (0)'. */
4791 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4792 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4794 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4795 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4797 if (comp_code == GT_EXPR
4798 && (!max
4799 || compare_values (val, max) == 0))
4800 return false;
4802 if (comp_code == LT_EXPR
4803 && (!min
4804 || compare_values (val, min) == 0))
4805 return false;
4807 *code_p = comp_code;
4808 *val_p = val;
4809 return true;
4812 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4813 (otherwise return VAL). VAL and MASK must be zero-extended for
4814 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4815 (to transform signed values into unsigned) and at the end xor
4816 SGNBIT back. */
4818 static double_int
4819 masked_increment (double_int val, double_int mask, double_int sgnbit,
4820 unsigned int prec)
4822 double_int bit = double_int_one, res;
4823 unsigned int i;
4825 val ^= sgnbit;
4826 for (i = 0; i < prec; i++, bit += bit)
4828 res = mask;
4829 if ((res & bit).is_zero ())
4830 continue;
4831 res = bit - double_int_one;
4832 res = (val + bit).and_not (res);
4833 res &= mask;
4834 if (res.ugt (val))
4835 return res ^ sgnbit;
4837 return val ^ sgnbit;
4840 /* Try to register an edge assertion for SSA name NAME on edge E for
4841 the condition COND contributing to the conditional jump pointed to by BSI.
4842 Invert the condition COND if INVERT is true.
4843 Return true if an assertion for NAME could be registered. */
4845 static bool
4846 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4847 enum tree_code cond_code,
4848 tree cond_op0, tree cond_op1, bool invert)
4850 tree val;
4851 enum tree_code comp_code;
4852 bool retval = false;
4854 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4855 cond_op0,
4856 cond_op1,
4857 invert, &comp_code, &val))
4858 return false;
4860 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4861 reachable from E. */
4862 if (live_on_edge (e, name)
4863 && !has_single_use (name))
4865 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4866 retval = true;
4869 /* In the case of NAME <= CST and NAME being defined as
4870 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4871 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4872 This catches range and anti-range tests. */
4873 if ((comp_code == LE_EXPR
4874 || comp_code == GT_EXPR)
4875 && TREE_CODE (val) == INTEGER_CST
4876 && TYPE_UNSIGNED (TREE_TYPE (val)))
4878 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4879 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4881 /* Extract CST2 from the (optional) addition. */
4882 if (is_gimple_assign (def_stmt)
4883 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4885 name2 = gimple_assign_rhs1 (def_stmt);
4886 cst2 = gimple_assign_rhs2 (def_stmt);
4887 if (TREE_CODE (name2) == SSA_NAME
4888 && TREE_CODE (cst2) == INTEGER_CST)
4889 def_stmt = SSA_NAME_DEF_STMT (name2);
4892 /* Extract NAME2 from the (optional) sign-changing cast. */
4893 if (gimple_assign_cast_p (def_stmt))
4895 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4896 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4897 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4898 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4899 name3 = gimple_assign_rhs1 (def_stmt);
4902 /* If name3 is used later, create an ASSERT_EXPR for it. */
4903 if (name3 != NULL_TREE
4904 && TREE_CODE (name3) == SSA_NAME
4905 && (cst2 == NULL_TREE
4906 || TREE_CODE (cst2) == INTEGER_CST)
4907 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4908 && live_on_edge (e, name3)
4909 && !has_single_use (name3))
4911 tree tmp;
4913 /* Build an expression for the range test. */
4914 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4915 if (cst2 != NULL_TREE)
4916 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4918 if (dump_file)
4920 fprintf (dump_file, "Adding assert for ");
4921 print_generic_expr (dump_file, name3, 0);
4922 fprintf (dump_file, " from ");
4923 print_generic_expr (dump_file, tmp, 0);
4924 fprintf (dump_file, "\n");
4927 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4929 retval = true;
4932 /* If name2 is used later, create an ASSERT_EXPR for it. */
4933 if (name2 != NULL_TREE
4934 && TREE_CODE (name2) == SSA_NAME
4935 && TREE_CODE (cst2) == INTEGER_CST
4936 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4937 && live_on_edge (e, name2)
4938 && !has_single_use (name2))
4940 tree tmp;
4942 /* Build an expression for the range test. */
4943 tmp = name2;
4944 if (TREE_TYPE (name) != TREE_TYPE (name2))
4945 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4946 if (cst2 != NULL_TREE)
4947 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4949 if (dump_file)
4951 fprintf (dump_file, "Adding assert for ");
4952 print_generic_expr (dump_file, name2, 0);
4953 fprintf (dump_file, " from ");
4954 print_generic_expr (dump_file, tmp, 0);
4955 fprintf (dump_file, "\n");
4958 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4960 retval = true;
4964 /* In the case of post-in/decrement tests like if (i++) ... and uses
4965 of the in/decremented value on the edge the extra name we want to
4966 assert for is not on the def chain of the name compared. Instead
4967 it is in the set of use stmts. */
4968 if ((comp_code == NE_EXPR
4969 || comp_code == EQ_EXPR)
4970 && TREE_CODE (val) == INTEGER_CST)
4972 imm_use_iterator ui;
4973 gimple use_stmt;
4974 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4976 /* Cut off to use-stmts that are in the predecessor. */
4977 if (gimple_bb (use_stmt) != e->src)
4978 continue;
4980 if (!is_gimple_assign (use_stmt))
4981 continue;
4983 enum tree_code code = gimple_assign_rhs_code (use_stmt);
4984 if (code != PLUS_EXPR
4985 && code != MINUS_EXPR)
4986 continue;
4988 tree cst = gimple_assign_rhs2 (use_stmt);
4989 if (TREE_CODE (cst) != INTEGER_CST)
4990 continue;
4992 tree name2 = gimple_assign_lhs (use_stmt);
4993 if (live_on_edge (e, name2))
4995 cst = int_const_binop (code, val, cst);
4996 register_new_assert_for (name2, name2, comp_code, cst,
4997 NULL, e, bsi);
4998 retval = true;
5003 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5004 && TREE_CODE (val) == INTEGER_CST)
5006 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5007 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5008 tree val2 = NULL_TREE;
5009 double_int mask = double_int_zero;
5010 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5011 unsigned int nprec = prec;
5012 enum tree_code rhs_code = ERROR_MARK;
5014 if (is_gimple_assign (def_stmt))
5015 rhs_code = gimple_assign_rhs_code (def_stmt);
5017 /* Add asserts for NAME cmp CST and NAME being defined
5018 as NAME = (int) NAME2. */
5019 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5020 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5021 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5022 && gimple_assign_cast_p (def_stmt))
5024 name2 = gimple_assign_rhs1 (def_stmt);
5025 if (CONVERT_EXPR_CODE_P (rhs_code)
5026 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5027 && TYPE_UNSIGNED (TREE_TYPE (name2))
5028 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5029 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5030 || !tree_int_cst_equal (val,
5031 TYPE_MIN_VALUE (TREE_TYPE (val))))
5032 && live_on_edge (e, name2)
5033 && !has_single_use (name2))
5035 tree tmp, cst;
5036 enum tree_code new_comp_code = comp_code;
5038 cst = fold_convert (TREE_TYPE (name2),
5039 TYPE_MIN_VALUE (TREE_TYPE (val)));
5040 /* Build an expression for the range test. */
5041 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5042 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5043 fold_convert (TREE_TYPE (name2), val));
5044 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5046 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5047 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5048 build_int_cst (TREE_TYPE (name2), 1));
5051 if (dump_file)
5053 fprintf (dump_file, "Adding assert for ");
5054 print_generic_expr (dump_file, name2, 0);
5055 fprintf (dump_file, " from ");
5056 print_generic_expr (dump_file, tmp, 0);
5057 fprintf (dump_file, "\n");
5060 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5061 e, bsi);
5063 retval = true;
5067 /* Add asserts for NAME cmp CST and NAME being defined as
5068 NAME = NAME2 >> CST2.
5070 Extract CST2 from the right shift. */
5071 if (rhs_code == RSHIFT_EXPR)
5073 name2 = gimple_assign_rhs1 (def_stmt);
5074 cst2 = gimple_assign_rhs2 (def_stmt);
5075 if (TREE_CODE (name2) == SSA_NAME
5076 && host_integerp (cst2, 1)
5077 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5078 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
5079 && prec <= HOST_BITS_PER_DOUBLE_INT
5080 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5081 && live_on_edge (e, name2)
5082 && !has_single_use (name2))
5084 mask = double_int::mask (tree_low_cst (cst2, 1));
5085 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5088 if (val2 != NULL_TREE
5089 && TREE_CODE (val2) == INTEGER_CST
5090 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5091 TREE_TYPE (val),
5092 val2, cst2), val))
5094 enum tree_code new_comp_code = comp_code;
5095 tree tmp, new_val;
5097 tmp = name2;
5098 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5100 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5102 tree type = build_nonstandard_integer_type (prec, 1);
5103 tmp = build1 (NOP_EXPR, type, name2);
5104 val2 = fold_convert (type, val2);
5106 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5107 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
5108 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5110 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5112 double_int minval
5113 = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5114 new_val = val2;
5115 if (minval == tree_to_double_int (new_val))
5116 new_val = NULL_TREE;
5118 else
5120 double_int maxval
5121 = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5122 mask |= tree_to_double_int (val2);
5123 if (mask == maxval)
5124 new_val = NULL_TREE;
5125 else
5126 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
5129 if (new_val)
5131 if (dump_file)
5133 fprintf (dump_file, "Adding assert for ");
5134 print_generic_expr (dump_file, name2, 0);
5135 fprintf (dump_file, " from ");
5136 print_generic_expr (dump_file, tmp, 0);
5137 fprintf (dump_file, "\n");
5140 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5141 NULL, e, bsi);
5142 retval = true;
5146 /* Add asserts for NAME cmp CST and NAME being defined as
5147 NAME = NAME2 & CST2.
5149 Extract CST2 from the and.
5151 Also handle
5152 NAME = (unsigned) NAME2;
5153 casts where NAME's type is unsigned and has smaller precision
5154 than NAME2's type as if it was NAME = NAME2 & MASK. */
5155 names[0] = NULL_TREE;
5156 names[1] = NULL_TREE;
5157 cst2 = NULL_TREE;
5158 if (rhs_code == BIT_AND_EXPR
5159 || (CONVERT_EXPR_CODE_P (rhs_code)
5160 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5161 && TYPE_UNSIGNED (TREE_TYPE (val))
5162 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5163 > prec
5164 && !retval))
5166 name2 = gimple_assign_rhs1 (def_stmt);
5167 if (rhs_code == BIT_AND_EXPR)
5168 cst2 = gimple_assign_rhs2 (def_stmt);
5169 else
5171 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5172 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5174 if (TREE_CODE (name2) == SSA_NAME
5175 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5176 && TREE_CODE (cst2) == INTEGER_CST
5177 && !integer_zerop (cst2)
5178 && nprec <= HOST_BITS_PER_DOUBLE_INT
5179 && (nprec > 1
5180 || TYPE_UNSIGNED (TREE_TYPE (val))))
5182 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
5183 if (gimple_assign_cast_p (def_stmt2))
5185 names[1] = gimple_assign_rhs1 (def_stmt2);
5186 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5187 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5188 || (TYPE_PRECISION (TREE_TYPE (name2))
5189 != TYPE_PRECISION (TREE_TYPE (names[1])))
5190 || !live_on_edge (e, names[1])
5191 || has_single_use (names[1]))
5192 names[1] = NULL_TREE;
5194 if (live_on_edge (e, name2)
5195 && !has_single_use (name2))
5196 names[0] = name2;
5199 if (names[0] || names[1])
5201 double_int minv, maxv = double_int_zero, valv, cst2v;
5202 double_int tem, sgnbit;
5203 bool valid_p = false, valn = false, cst2n = false;
5204 enum tree_code ccode = comp_code;
5206 valv = tree_to_double_int (val).zext (nprec);
5207 cst2v = tree_to_double_int (cst2).zext (nprec);
5208 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5210 valn = valv.sext (nprec).is_negative ();
5211 cst2n = cst2v.sext (nprec).is_negative ();
5213 /* If CST2 doesn't have most significant bit set,
5214 but VAL is negative, we have comparison like
5215 if ((x & 0x123) > -4) (always true). Just give up. */
5216 if (!cst2n && valn)
5217 ccode = ERROR_MARK;
5218 if (cst2n)
5219 sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5220 else
5221 sgnbit = double_int_zero;
5222 minv = valv & cst2v;
5223 switch (ccode)
5225 case EQ_EXPR:
5226 /* Minimum unsigned value for equality is VAL & CST2
5227 (should be equal to VAL, otherwise we probably should
5228 have folded the comparison into false) and
5229 maximum unsigned value is VAL | ~CST2. */
5230 maxv = valv | ~cst2v;
5231 maxv = maxv.zext (nprec);
5232 valid_p = true;
5233 break;
5234 case NE_EXPR:
5235 tem = valv | ~cst2v;
5236 tem = tem.zext (nprec);
5237 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5238 if (valv.is_zero ())
5240 cst2n = false;
5241 sgnbit = double_int_zero;
5242 goto gt_expr;
5244 /* If (VAL | ~CST2) is all ones, handle it as
5245 (X & CST2) < VAL. */
5246 if (tem == double_int::mask (nprec))
5248 cst2n = false;
5249 valn = false;
5250 sgnbit = double_int_zero;
5251 goto lt_expr;
5253 if (!cst2n
5254 && cst2v.sext (nprec).is_negative ())
5255 sgnbit
5256 = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5257 if (!sgnbit.is_zero ())
5259 if (valv == sgnbit)
5261 cst2n = true;
5262 valn = true;
5263 goto gt_expr;
5265 if (tem == double_int::mask (nprec - 1))
5267 cst2n = true;
5268 goto lt_expr;
5270 if (!cst2n)
5271 sgnbit = double_int_zero;
5273 break;
5274 case GE_EXPR:
5275 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5276 is VAL and maximum unsigned value is ~0. For signed
5277 comparison, if CST2 doesn't have most significant bit
5278 set, handle it similarly. If CST2 has MSB set,
5279 the minimum is the same, and maximum is ~0U/2. */
5280 if (minv != valv)
5282 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5283 VAL. */
5284 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5285 if (minv == valv)
5286 break;
5288 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5289 valid_p = true;
5290 break;
5291 case GT_EXPR:
5292 gt_expr:
5293 /* Find out smallest MINV where MINV > VAL
5294 && (MINV & CST2) == MINV, if any. If VAL is signed and
5295 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5296 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5297 if (minv == valv)
5298 break;
5299 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5300 valid_p = true;
5301 break;
5302 case LE_EXPR:
5303 /* Minimum unsigned value for <= is 0 and maximum
5304 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5305 Otherwise, find smallest VAL2 where VAL2 > VAL
5306 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5307 as maximum.
5308 For signed comparison, if CST2 doesn't have most
5309 significant bit set, handle it similarly. If CST2 has
5310 MSB set, the maximum is the same and minimum is INT_MIN. */
5311 if (minv == valv)
5312 maxv = valv;
5313 else
5315 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5316 if (maxv == valv)
5317 break;
5318 maxv -= double_int_one;
5320 maxv |= ~cst2v;
5321 maxv = maxv.zext (nprec);
5322 minv = sgnbit;
5323 valid_p = true;
5324 break;
5325 case LT_EXPR:
5326 lt_expr:
5327 /* Minimum unsigned value for < is 0 and maximum
5328 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5329 Otherwise, find smallest VAL2 where VAL2 > VAL
5330 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5331 as maximum.
5332 For signed comparison, if CST2 doesn't have most
5333 significant bit set, handle it similarly. If CST2 has
5334 MSB set, the maximum is the same and minimum is INT_MIN. */
5335 if (minv == valv)
5337 if (valv == sgnbit)
5338 break;
5339 maxv = valv;
5341 else
5343 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5344 if (maxv == valv)
5345 break;
5347 maxv -= double_int_one;
5348 maxv |= ~cst2v;
5349 maxv = maxv.zext (nprec);
5350 minv = sgnbit;
5351 valid_p = true;
5352 break;
5353 default:
5354 break;
5356 if (valid_p
5357 && (maxv - minv).zext (nprec) != double_int::mask (nprec))
5359 tree tmp, new_val, type;
5360 int i;
5362 for (i = 0; i < 2; i++)
5363 if (names[i])
5365 double_int maxv2 = maxv;
5366 tmp = names[i];
5367 type = TREE_TYPE (names[i]);
5368 if (!TYPE_UNSIGNED (type))
5370 type = build_nonstandard_integer_type (nprec, 1);
5371 tmp = build1 (NOP_EXPR, type, names[i]);
5373 if (!minv.is_zero ())
5375 tmp = build2 (PLUS_EXPR, type, tmp,
5376 double_int_to_tree (type, -minv));
5377 maxv2 = maxv - minv;
5379 new_val = double_int_to_tree (type, maxv2);
5381 if (dump_file)
5383 fprintf (dump_file, "Adding assert for ");
5384 print_generic_expr (dump_file, names[i], 0);
5385 fprintf (dump_file, " from ");
5386 print_generic_expr (dump_file, tmp, 0);
5387 fprintf (dump_file, "\n");
5390 register_new_assert_for (names[i], tmp, LE_EXPR,
5391 new_val, NULL, e, bsi);
5392 retval = true;
5398 return retval;
5401 /* OP is an operand of a truth value expression which is known to have
5402 a particular value. Register any asserts for OP and for any
5403 operands in OP's defining statement.
5405 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5406 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5408 static bool
5409 register_edge_assert_for_1 (tree op, enum tree_code code,
5410 edge e, gimple_stmt_iterator bsi)
5412 bool retval = false;
5413 gimple op_def;
5414 tree val;
5415 enum tree_code rhs_code;
5417 /* We only care about SSA_NAMEs. */
5418 if (TREE_CODE (op) != SSA_NAME)
5419 return false;
5421 /* We know that OP will have a zero or nonzero value. If OP is used
5422 more than once go ahead and register an assert for OP.
5424 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5425 it will always be set for OP (because OP is used in a COND_EXPR in
5426 the subgraph). */
5427 if (!has_single_use (op))
5429 val = build_int_cst (TREE_TYPE (op), 0);
5430 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5431 retval = true;
5434 /* Now look at how OP is set. If it's set from a comparison,
5435 a truth operation or some bit operations, then we may be able
5436 to register information about the operands of that assignment. */
5437 op_def = SSA_NAME_DEF_STMT (op);
5438 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5439 return retval;
5441 rhs_code = gimple_assign_rhs_code (op_def);
5443 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5445 bool invert = (code == EQ_EXPR ? true : false);
5446 tree op0 = gimple_assign_rhs1 (op_def);
5447 tree op1 = gimple_assign_rhs2 (op_def);
5449 if (TREE_CODE (op0) == SSA_NAME)
5450 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5451 invert);
5452 if (TREE_CODE (op1) == SSA_NAME)
5453 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5454 invert);
5456 else if ((code == NE_EXPR
5457 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5458 || (code == EQ_EXPR
5459 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5461 /* Recurse on each operand. */
5462 tree op0 = gimple_assign_rhs1 (op_def);
5463 tree op1 = gimple_assign_rhs2 (op_def);
5464 if (TREE_CODE (op0) == SSA_NAME
5465 && has_single_use (op0))
5466 retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5467 if (TREE_CODE (op1) == SSA_NAME
5468 && has_single_use (op1))
5469 retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5471 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5472 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5474 /* Recurse, flipping CODE. */
5475 code = invert_tree_comparison (code, false);
5476 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5477 code, e, bsi);
5479 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5481 /* Recurse through the copy. */
5482 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5483 code, e, bsi);
5485 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5487 /* Recurse through the type conversion. */
5488 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5489 code, e, bsi);
5492 return retval;
5495 /* Try to register an edge assertion for SSA name NAME on edge E for
5496 the condition COND contributing to the conditional jump pointed to by SI.
5497 Return true if an assertion for NAME could be registered. */
5499 static bool
5500 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5501 enum tree_code cond_code, tree cond_op0,
5502 tree cond_op1)
5504 tree val;
5505 enum tree_code comp_code;
5506 bool retval = false;
5507 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5509 /* Do not attempt to infer anything in names that flow through
5510 abnormal edges. */
5511 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5512 return false;
5514 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5515 cond_op0, cond_op1,
5516 is_else_edge,
5517 &comp_code, &val))
5518 return false;
5520 /* Register ASSERT_EXPRs for name. */
5521 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5522 cond_op1, is_else_edge);
5525 /* If COND is effectively an equality test of an SSA_NAME against
5526 the value zero or one, then we may be able to assert values
5527 for SSA_NAMEs which flow into COND. */
5529 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5530 statement of NAME we can assert both operands of the BIT_AND_EXPR
5531 have nonzero value. */
5532 if (((comp_code == EQ_EXPR && integer_onep (val))
5533 || (comp_code == NE_EXPR && integer_zerop (val))))
5535 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5537 if (is_gimple_assign (def_stmt)
5538 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5540 tree op0 = gimple_assign_rhs1 (def_stmt);
5541 tree op1 = gimple_assign_rhs2 (def_stmt);
5542 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5543 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5547 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5548 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5549 have zero value. */
5550 if (((comp_code == EQ_EXPR && integer_zerop (val))
5551 || (comp_code == NE_EXPR && integer_onep (val))))
5553 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5555 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5556 necessarily zero value, or if type-precision is one. */
5557 if (is_gimple_assign (def_stmt)
5558 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5559 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5560 || comp_code == EQ_EXPR)))
5562 tree op0 = gimple_assign_rhs1 (def_stmt);
5563 tree op1 = gimple_assign_rhs2 (def_stmt);
5564 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5565 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5569 return retval;
5573 /* Determine whether the outgoing edges of BB should receive an
5574 ASSERT_EXPR for each of the operands of BB's LAST statement.
5575 The last statement of BB must be a COND_EXPR.
5577 If any of the sub-graphs rooted at BB have an interesting use of
5578 the predicate operands, an assert location node is added to the
5579 list of assertions for the corresponding operands. */
5581 static bool
5582 find_conditional_asserts (basic_block bb, gimple last)
5584 bool need_assert;
5585 gimple_stmt_iterator bsi;
5586 tree op;
5587 edge_iterator ei;
5588 edge e;
5589 ssa_op_iter iter;
5591 need_assert = false;
5592 bsi = gsi_for_stmt (last);
5594 /* Look for uses of the operands in each of the sub-graphs
5595 rooted at BB. We need to check each of the outgoing edges
5596 separately, so that we know what kind of ASSERT_EXPR to
5597 insert. */
5598 FOR_EACH_EDGE (e, ei, bb->succs)
5600 if (e->dest == bb)
5601 continue;
5603 /* Register the necessary assertions for each operand in the
5604 conditional predicate. */
5605 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5607 need_assert |= register_edge_assert_for (op, e, bsi,
5608 gimple_cond_code (last),
5609 gimple_cond_lhs (last),
5610 gimple_cond_rhs (last));
5614 return need_assert;
5617 struct case_info
5619 tree expr;
5620 basic_block bb;
5623 /* Compare two case labels sorting first by the destination bb index
5624 and then by the case value. */
5626 static int
5627 compare_case_labels (const void *p1, const void *p2)
5629 const struct case_info *ci1 = (const struct case_info *) p1;
5630 const struct case_info *ci2 = (const struct case_info *) p2;
5631 int idx1 = ci1->bb->index;
5632 int idx2 = ci2->bb->index;
5634 if (idx1 < idx2)
5635 return -1;
5636 else if (idx1 == idx2)
5638 /* Make sure the default label is first in a group. */
5639 if (!CASE_LOW (ci1->expr))
5640 return -1;
5641 else if (!CASE_LOW (ci2->expr))
5642 return 1;
5643 else
5644 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5645 CASE_LOW (ci2->expr));
5647 else
5648 return 1;
5651 /* Determine whether the outgoing edges of BB should receive an
5652 ASSERT_EXPR for each of the operands of BB's LAST statement.
5653 The last statement of BB must be a SWITCH_EXPR.
5655 If any of the sub-graphs rooted at BB have an interesting use of
5656 the predicate operands, an assert location node is added to the
5657 list of assertions for the corresponding operands. */
5659 static bool
5660 find_switch_asserts (basic_block bb, gimple last)
5662 bool need_assert;
5663 gimple_stmt_iterator bsi;
5664 tree op;
5665 edge e;
5666 struct case_info *ci;
5667 size_t n = gimple_switch_num_labels (last);
5668 #if GCC_VERSION >= 4000
5669 unsigned int idx;
5670 #else
5671 /* Work around GCC 3.4 bug (PR 37086). */
5672 volatile unsigned int idx;
5673 #endif
5675 need_assert = false;
5676 bsi = gsi_for_stmt (last);
5677 op = gimple_switch_index (last);
5678 if (TREE_CODE (op) != SSA_NAME)
5679 return false;
5681 /* Build a vector of case labels sorted by destination label. */
5682 ci = XNEWVEC (struct case_info, n);
5683 for (idx = 0; idx < n; ++idx)
5685 ci[idx].expr = gimple_switch_label (last, idx);
5686 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5688 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5690 for (idx = 0; idx < n; ++idx)
5692 tree min, max;
5693 tree cl = ci[idx].expr;
5694 basic_block cbb = ci[idx].bb;
5696 min = CASE_LOW (cl);
5697 max = CASE_HIGH (cl);
5699 /* If there are multiple case labels with the same destination
5700 we need to combine them to a single value range for the edge. */
5701 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5703 /* Skip labels until the last of the group. */
5704 do {
5705 ++idx;
5706 } while (idx < n && cbb == ci[idx].bb);
5707 --idx;
5709 /* Pick up the maximum of the case label range. */
5710 if (CASE_HIGH (ci[idx].expr))
5711 max = CASE_HIGH (ci[idx].expr);
5712 else
5713 max = CASE_LOW (ci[idx].expr);
5716 /* Nothing to do if the range includes the default label until we
5717 can register anti-ranges. */
5718 if (min == NULL_TREE)
5719 continue;
5721 /* Find the edge to register the assert expr on. */
5722 e = find_edge (bb, cbb);
5724 /* Register the necessary assertions for the operand in the
5725 SWITCH_EXPR. */
5726 need_assert |= register_edge_assert_for (op, e, bsi,
5727 max ? GE_EXPR : EQ_EXPR,
5729 fold_convert (TREE_TYPE (op),
5730 min));
5731 if (max)
5733 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5735 fold_convert (TREE_TYPE (op),
5736 max));
5740 XDELETEVEC (ci);
5741 return need_assert;
5745 /* Traverse all the statements in block BB looking for statements that
5746 may generate useful assertions for the SSA names in their operand.
5747 If a statement produces a useful assertion A for name N_i, then the
5748 list of assertions already generated for N_i is scanned to
5749 determine if A is actually needed.
5751 If N_i already had the assertion A at a location dominating the
5752 current location, then nothing needs to be done. Otherwise, the
5753 new location for A is recorded instead.
5755 1- For every statement S in BB, all the variables used by S are
5756 added to bitmap FOUND_IN_SUBGRAPH.
5758 2- If statement S uses an operand N in a way that exposes a known
5759 value range for N, then if N was not already generated by an
5760 ASSERT_EXPR, create a new assert location for N. For instance,
5761 if N is a pointer and the statement dereferences it, we can
5762 assume that N is not NULL.
5764 3- COND_EXPRs are a special case of #2. We can derive range
5765 information from the predicate but need to insert different
5766 ASSERT_EXPRs for each of the sub-graphs rooted at the
5767 conditional block. If the last statement of BB is a conditional
5768 expression of the form 'X op Y', then
5770 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5772 b) If the conditional is the only entry point to the sub-graph
5773 corresponding to the THEN_CLAUSE, recurse into it. On
5774 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5775 an ASSERT_EXPR is added for the corresponding variable.
5777 c) Repeat step (b) on the ELSE_CLAUSE.
5779 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5781 For instance,
5783 if (a == 9)
5784 b = a;
5785 else
5786 b = c + 1;
5788 In this case, an assertion on the THEN clause is useful to
5789 determine that 'a' is always 9 on that edge. However, an assertion
5790 on the ELSE clause would be unnecessary.
5792 4- If BB does not end in a conditional expression, then we recurse
5793 into BB's dominator children.
5795 At the end of the recursive traversal, every SSA name will have a
5796 list of locations where ASSERT_EXPRs should be added. When a new
5797 location for name N is found, it is registered by calling
5798 register_new_assert_for. That function keeps track of all the
5799 registered assertions to prevent adding unnecessary assertions.
5800 For instance, if a pointer P_4 is dereferenced more than once in a
5801 dominator tree, only the location dominating all the dereference of
5802 P_4 will receive an ASSERT_EXPR.
5804 If this function returns true, then it means that there are names
5805 for which we need to generate ASSERT_EXPRs. Those assertions are
5806 inserted by process_assert_insertions. */
5808 static bool
5809 find_assert_locations_1 (basic_block bb, sbitmap live)
5811 gimple_stmt_iterator si;
5812 gimple last;
5813 bool need_assert;
5815 need_assert = false;
5816 last = last_stmt (bb);
5818 /* If BB's last statement is a conditional statement involving integer
5819 operands, determine if we need to add ASSERT_EXPRs. */
5820 if (last
5821 && gimple_code (last) == GIMPLE_COND
5822 && !fp_predicate (last)
5823 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5824 need_assert |= find_conditional_asserts (bb, last);
5826 /* If BB's last statement is a switch statement involving integer
5827 operands, determine if we need to add ASSERT_EXPRs. */
5828 if (last
5829 && gimple_code (last) == GIMPLE_SWITCH
5830 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5831 need_assert |= find_switch_asserts (bb, last);
5833 /* Traverse all the statements in BB marking used names and looking
5834 for statements that may infer assertions for their used operands. */
5835 for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5837 gimple stmt;
5838 tree op;
5839 ssa_op_iter i;
5841 stmt = gsi_stmt (si);
5843 if (is_gimple_debug (stmt))
5844 continue;
5846 /* See if we can derive an assertion for any of STMT's operands. */
5847 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5849 tree value;
5850 enum tree_code comp_code;
5852 /* If op is not live beyond this stmt, do not bother to insert
5853 asserts for it. */
5854 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5855 continue;
5857 /* If OP is used in such a way that we can infer a value
5858 range for it, and we don't find a previous assertion for
5859 it, create a new assertion location node for OP. */
5860 if (infer_value_range (stmt, op, &comp_code, &value))
5862 /* If we are able to infer a nonzero value range for OP,
5863 then walk backwards through the use-def chain to see if OP
5864 was set via a typecast.
5866 If so, then we can also infer a nonzero value range
5867 for the operand of the NOP_EXPR. */
5868 if (comp_code == NE_EXPR && integer_zerop (value))
5870 tree t = op;
5871 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5873 while (is_gimple_assign (def_stmt)
5874 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5875 && TREE_CODE
5876 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5877 && POINTER_TYPE_P
5878 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5880 t = gimple_assign_rhs1 (def_stmt);
5881 def_stmt = SSA_NAME_DEF_STMT (t);
5883 /* Note we want to register the assert for the
5884 operand of the NOP_EXPR after SI, not after the
5885 conversion. */
5886 if (! has_single_use (t))
5888 register_new_assert_for (t, t, comp_code, value,
5889 bb, NULL, si);
5890 need_assert = true;
5895 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5896 need_assert = true;
5900 /* Update live. */
5901 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5902 bitmap_set_bit (live, SSA_NAME_VERSION (op));
5903 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5904 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5907 /* Traverse all PHI nodes in BB, updating live. */
5908 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5910 use_operand_p arg_p;
5911 ssa_op_iter i;
5912 gimple phi = gsi_stmt (si);
5913 tree res = gimple_phi_result (phi);
5915 if (virtual_operand_p (res))
5916 continue;
5918 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5920 tree arg = USE_FROM_PTR (arg_p);
5921 if (TREE_CODE (arg) == SSA_NAME)
5922 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5925 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5928 return need_assert;
5931 /* Do an RPO walk over the function computing SSA name liveness
5932 on-the-fly and deciding on assert expressions to insert.
5933 Returns true if there are assert expressions to be inserted. */
5935 static bool
5936 find_assert_locations (void)
5938 int *rpo = XNEWVEC (int, last_basic_block);
5939 int *bb_rpo = XNEWVEC (int, last_basic_block);
5940 int *last_rpo = XCNEWVEC (int, last_basic_block);
5941 int rpo_cnt, i;
5942 bool need_asserts;
5944 live = XCNEWVEC (sbitmap, last_basic_block);
5945 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5946 for (i = 0; i < rpo_cnt; ++i)
5947 bb_rpo[rpo[i]] = i;
5949 need_asserts = false;
5950 for (i = rpo_cnt - 1; i >= 0; --i)
5952 basic_block bb = BASIC_BLOCK (rpo[i]);
5953 edge e;
5954 edge_iterator ei;
5956 if (!live[rpo[i]])
5958 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5959 bitmap_clear (live[rpo[i]]);
5962 /* Process BB and update the live information with uses in
5963 this block. */
5964 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5966 /* Merge liveness into the predecessor blocks and free it. */
5967 if (!bitmap_empty_p (live[rpo[i]]))
5969 int pred_rpo = i;
5970 FOR_EACH_EDGE (e, ei, bb->preds)
5972 int pred = e->src->index;
5973 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5974 continue;
5976 if (!live[pred])
5978 live[pred] = sbitmap_alloc (num_ssa_names);
5979 bitmap_clear (live[pred]);
5981 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5983 if (bb_rpo[pred] < pred_rpo)
5984 pred_rpo = bb_rpo[pred];
5987 /* Record the RPO number of the last visited block that needs
5988 live information from this block. */
5989 last_rpo[rpo[i]] = pred_rpo;
5991 else
5993 sbitmap_free (live[rpo[i]]);
5994 live[rpo[i]] = NULL;
5997 /* We can free all successors live bitmaps if all their
5998 predecessors have been visited already. */
5999 FOR_EACH_EDGE (e, ei, bb->succs)
6000 if (last_rpo[e->dest->index] == i
6001 && live[e->dest->index])
6003 sbitmap_free (live[e->dest->index]);
6004 live[e->dest->index] = NULL;
6008 XDELETEVEC (rpo);
6009 XDELETEVEC (bb_rpo);
6010 XDELETEVEC (last_rpo);
6011 for (i = 0; i < last_basic_block; ++i)
6012 if (live[i])
6013 sbitmap_free (live[i]);
6014 XDELETEVEC (live);
6016 return need_asserts;
6019 /* Create an ASSERT_EXPR for NAME and insert it in the location
6020 indicated by LOC. Return true if we made any edge insertions. */
6022 static bool
6023 process_assert_insertions_for (tree name, assert_locus_t loc)
6025 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6026 gimple stmt;
6027 tree cond;
6028 gimple assert_stmt;
6029 edge_iterator ei;
6030 edge e;
6032 /* If we have X <=> X do not insert an assert expr for that. */
6033 if (loc->expr == loc->val)
6034 return false;
6036 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6037 assert_stmt = build_assert_expr_for (cond, name);
6038 if (loc->e)
6040 /* We have been asked to insert the assertion on an edge. This
6041 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6042 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6043 || (gimple_code (gsi_stmt (loc->si))
6044 == GIMPLE_SWITCH));
6046 gsi_insert_on_edge (loc->e, assert_stmt);
6047 return true;
6050 /* Otherwise, we can insert right after LOC->SI iff the
6051 statement must not be the last statement in the block. */
6052 stmt = gsi_stmt (loc->si);
6053 if (!stmt_ends_bb_p (stmt))
6055 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6056 return false;
6059 /* If STMT must be the last statement in BB, we can only insert new
6060 assertions on the non-abnormal edge out of BB. Note that since
6061 STMT is not control flow, there may only be one non-abnormal edge
6062 out of BB. */
6063 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6064 if (!(e->flags & EDGE_ABNORMAL))
6066 gsi_insert_on_edge (e, assert_stmt);
6067 return true;
6070 gcc_unreachable ();
6074 /* Process all the insertions registered for every name N_i registered
6075 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6076 found in ASSERTS_FOR[i]. */
6078 static void
6079 process_assert_insertions (void)
6081 unsigned i;
6082 bitmap_iterator bi;
6083 bool update_edges_p = false;
6084 int num_asserts = 0;
6086 if (dump_file && (dump_flags & TDF_DETAILS))
6087 dump_all_asserts (dump_file);
6089 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6091 assert_locus_t loc = asserts_for[i];
6092 gcc_assert (loc);
6094 while (loc)
6096 assert_locus_t next = loc->next;
6097 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6098 free (loc);
6099 loc = next;
6100 num_asserts++;
6104 if (update_edges_p)
6105 gsi_commit_edge_inserts ();
6107 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6108 num_asserts);
6112 /* Traverse the flowgraph looking for conditional jumps to insert range
6113 expressions. These range expressions are meant to provide information
6114 to optimizations that need to reason in terms of value ranges. They
6115 will not be expanded into RTL. For instance, given:
6117 x = ...
6118 y = ...
6119 if (x < y)
6120 y = x - 2;
6121 else
6122 x = y + 3;
6124 this pass will transform the code into:
6126 x = ...
6127 y = ...
6128 if (x < y)
6130 x = ASSERT_EXPR <x, x < y>
6131 y = x - 2
6133 else
6135 y = ASSERT_EXPR <y, x <= y>
6136 x = y + 3
6139 The idea is that once copy and constant propagation have run, other
6140 optimizations will be able to determine what ranges of values can 'x'
6141 take in different paths of the code, simply by checking the reaching
6142 definition of 'x'. */
6144 static void
6145 insert_range_assertions (void)
6147 need_assert_for = BITMAP_ALLOC (NULL);
6148 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
6150 calculate_dominance_info (CDI_DOMINATORS);
6152 if (find_assert_locations ())
6154 process_assert_insertions ();
6155 update_ssa (TODO_update_ssa_no_phi);
6158 if (dump_file && (dump_flags & TDF_DETAILS))
6160 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6161 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6164 free (asserts_for);
6165 BITMAP_FREE (need_assert_for);
6168 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6169 and "struct" hacks. If VRP can determine that the
6170 array subscript is a constant, check if it is outside valid
6171 range. If the array subscript is a RANGE, warn if it is
6172 non-overlapping with valid range.
6173 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6175 static void
6176 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6178 value_range_t* vr = NULL;
6179 tree low_sub, up_sub;
6180 tree low_bound, up_bound, up_bound_p1;
6181 tree base;
6183 if (TREE_NO_WARNING (ref))
6184 return;
6186 low_sub = up_sub = TREE_OPERAND (ref, 1);
6187 up_bound = array_ref_up_bound (ref);
6189 /* Can not check flexible arrays. */
6190 if (!up_bound
6191 || TREE_CODE (up_bound) != INTEGER_CST)
6192 return;
6194 /* Accesses to trailing arrays via pointers may access storage
6195 beyond the types array bounds. */
6196 base = get_base_address (ref);
6197 if (base && TREE_CODE (base) == MEM_REF)
6199 tree cref, next = NULL_TREE;
6201 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6202 return;
6204 cref = TREE_OPERAND (ref, 0);
6205 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6206 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6207 next && TREE_CODE (next) != FIELD_DECL;
6208 next = DECL_CHAIN (next))
6211 /* If this is the last field in a struct type or a field in a
6212 union type do not warn. */
6213 if (!next)
6214 return;
6217 low_bound = array_ref_low_bound (ref);
6218 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
6220 if (TREE_CODE (low_sub) == SSA_NAME)
6222 vr = get_value_range (low_sub);
6223 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6225 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6226 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6230 if (vr && vr->type == VR_ANTI_RANGE)
6232 if (TREE_CODE (up_sub) == INTEGER_CST
6233 && tree_int_cst_lt (up_bound, up_sub)
6234 && TREE_CODE (low_sub) == INTEGER_CST
6235 && tree_int_cst_lt (low_sub, low_bound))
6237 warning_at (location, OPT_Warray_bounds,
6238 "array subscript is outside array bounds");
6239 TREE_NO_WARNING (ref) = 1;
6242 else if (TREE_CODE (up_sub) == INTEGER_CST
6243 && (ignore_off_by_one
6244 ? (tree_int_cst_lt (up_bound, up_sub)
6245 && !tree_int_cst_equal (up_bound_p1, up_sub))
6246 : (tree_int_cst_lt (up_bound, up_sub)
6247 || tree_int_cst_equal (up_bound_p1, up_sub))))
6249 if (dump_file && (dump_flags & TDF_DETAILS))
6251 fprintf (dump_file, "Array bound warning for ");
6252 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6253 fprintf (dump_file, "\n");
6255 warning_at (location, OPT_Warray_bounds,
6256 "array subscript is above array bounds");
6257 TREE_NO_WARNING (ref) = 1;
6259 else if (TREE_CODE (low_sub) == INTEGER_CST
6260 && tree_int_cst_lt (low_sub, low_bound))
6262 if (dump_file && (dump_flags & TDF_DETAILS))
6264 fprintf (dump_file, "Array bound warning for ");
6265 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6266 fprintf (dump_file, "\n");
6268 warning_at (location, OPT_Warray_bounds,
6269 "array subscript is below array bounds");
6270 TREE_NO_WARNING (ref) = 1;
6274 /* Searches if the expr T, located at LOCATION computes
6275 address of an ARRAY_REF, and call check_array_ref on it. */
6277 static void
6278 search_for_addr_array (tree t, location_t location)
6280 while (TREE_CODE (t) == SSA_NAME)
6282 gimple g = SSA_NAME_DEF_STMT (t);
6284 if (gimple_code (g) != GIMPLE_ASSIGN)
6285 return;
6287 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6288 != GIMPLE_SINGLE_RHS)
6289 return;
6291 t = gimple_assign_rhs1 (g);
6295 /* We are only interested in addresses of ARRAY_REF's. */
6296 if (TREE_CODE (t) != ADDR_EXPR)
6297 return;
6299 /* Check each ARRAY_REFs in the reference chain. */
6302 if (TREE_CODE (t) == ARRAY_REF)
6303 check_array_ref (location, t, true /*ignore_off_by_one*/);
6305 t = TREE_OPERAND (t, 0);
6307 while (handled_component_p (t));
6309 if (TREE_CODE (t) == MEM_REF
6310 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6311 && !TREE_NO_WARNING (t))
6313 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6314 tree low_bound, up_bound, el_sz;
6315 double_int idx;
6316 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6317 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6318 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6319 return;
6321 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6322 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6323 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6324 if (!low_bound
6325 || TREE_CODE (low_bound) != INTEGER_CST
6326 || !up_bound
6327 || TREE_CODE (up_bound) != INTEGER_CST
6328 || !el_sz
6329 || TREE_CODE (el_sz) != INTEGER_CST)
6330 return;
6332 idx = mem_ref_offset (t);
6333 idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
6334 if (idx.slt (double_int_zero))
6336 if (dump_file && (dump_flags & TDF_DETAILS))
6338 fprintf (dump_file, "Array bound warning for ");
6339 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6340 fprintf (dump_file, "\n");
6342 warning_at (location, OPT_Warray_bounds,
6343 "array subscript is below array bounds");
6344 TREE_NO_WARNING (t) = 1;
6346 else if (idx.sgt (tree_to_double_int (up_bound)
6347 - tree_to_double_int (low_bound)
6348 + double_int_one))
6350 if (dump_file && (dump_flags & TDF_DETAILS))
6352 fprintf (dump_file, "Array bound warning for ");
6353 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6354 fprintf (dump_file, "\n");
6356 warning_at (location, OPT_Warray_bounds,
6357 "array subscript is above array bounds");
6358 TREE_NO_WARNING (t) = 1;
6363 /* walk_tree() callback that checks if *TP is
6364 an ARRAY_REF inside an ADDR_EXPR (in which an array
6365 subscript one outside the valid range is allowed). Call
6366 check_array_ref for each ARRAY_REF found. The location is
6367 passed in DATA. */
6369 static tree
6370 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6372 tree t = *tp;
6373 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6374 location_t location;
6376 if (EXPR_HAS_LOCATION (t))
6377 location = EXPR_LOCATION (t);
6378 else
6380 location_t *locp = (location_t *) wi->info;
6381 location = *locp;
6384 *walk_subtree = TRUE;
6386 if (TREE_CODE (t) == ARRAY_REF)
6387 check_array_ref (location, t, false /*ignore_off_by_one*/);
6389 if (TREE_CODE (t) == MEM_REF
6390 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6391 search_for_addr_array (TREE_OPERAND (t, 0), location);
6393 if (TREE_CODE (t) == ADDR_EXPR)
6394 *walk_subtree = FALSE;
6396 return NULL_TREE;
6399 /* Walk over all statements of all reachable BBs and call check_array_bounds
6400 on them. */
6402 static void
6403 check_all_array_refs (void)
6405 basic_block bb;
6406 gimple_stmt_iterator si;
6408 FOR_EACH_BB (bb)
6410 edge_iterator ei;
6411 edge e;
6412 bool executable = false;
6414 /* Skip blocks that were found to be unreachable. */
6415 FOR_EACH_EDGE (e, ei, bb->preds)
6416 executable |= !!(e->flags & EDGE_EXECUTABLE);
6417 if (!executable)
6418 continue;
6420 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6422 gimple stmt = gsi_stmt (si);
6423 struct walk_stmt_info wi;
6424 if (!gimple_has_location (stmt))
6425 continue;
6427 if (is_gimple_call (stmt))
6429 size_t i;
6430 size_t n = gimple_call_num_args (stmt);
6431 for (i = 0; i < n; i++)
6433 tree arg = gimple_call_arg (stmt, i);
6434 search_for_addr_array (arg, gimple_location (stmt));
6437 else
6439 memset (&wi, 0, sizeof (wi));
6440 wi.info = CONST_CAST (void *, (const void *)
6441 gimple_location_ptr (stmt));
6443 walk_gimple_op (gsi_stmt (si),
6444 check_array_bounds,
6445 &wi);
6451 /* Convert range assertion expressions into the implied copies and
6452 copy propagate away the copies. Doing the trivial copy propagation
6453 here avoids the need to run the full copy propagation pass after
6454 VRP.
6456 FIXME, this will eventually lead to copy propagation removing the
6457 names that had useful range information attached to them. For
6458 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6459 then N_i will have the range [3, +INF].
6461 However, by converting the assertion into the implied copy
6462 operation N_i = N_j, we will then copy-propagate N_j into the uses
6463 of N_i and lose the range information. We may want to hold on to
6464 ASSERT_EXPRs a little while longer as the ranges could be used in
6465 things like jump threading.
6467 The problem with keeping ASSERT_EXPRs around is that passes after
6468 VRP need to handle them appropriately.
6470 Another approach would be to make the range information a first
6471 class property of the SSA_NAME so that it can be queried from
6472 any pass. This is made somewhat more complex by the need for
6473 multiple ranges to be associated with one SSA_NAME. */
6475 static void
6476 remove_range_assertions (void)
6478 basic_block bb;
6479 gimple_stmt_iterator si;
6481 /* Note that the BSI iterator bump happens at the bottom of the
6482 loop and no bump is necessary if we're removing the statement
6483 referenced by the current BSI. */
6484 FOR_EACH_BB (bb)
6485 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6487 gimple stmt = gsi_stmt (si);
6488 gimple use_stmt;
6490 if (is_gimple_assign (stmt)
6491 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6493 tree rhs = gimple_assign_rhs1 (stmt);
6494 tree var;
6495 tree cond = fold (ASSERT_EXPR_COND (rhs));
6496 use_operand_p use_p;
6497 imm_use_iterator iter;
6499 gcc_assert (cond != boolean_false_node);
6501 /* Propagate the RHS into every use of the LHS. */
6502 var = ASSERT_EXPR_VAR (rhs);
6503 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6504 gimple_assign_lhs (stmt))
6505 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6507 SET_USE (use_p, var);
6508 gcc_assert (TREE_CODE (var) == SSA_NAME);
6511 /* And finally, remove the copy, it is not needed. */
6512 gsi_remove (&si, true);
6513 release_defs (stmt);
6515 else
6516 gsi_next (&si);
6521 /* Return true if STMT is interesting for VRP. */
6523 static bool
6524 stmt_interesting_for_vrp (gimple stmt)
6526 if (gimple_code (stmt) == GIMPLE_PHI)
6528 tree res = gimple_phi_result (stmt);
6529 return (!virtual_operand_p (res)
6530 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6531 || POINTER_TYPE_P (TREE_TYPE (res))));
6533 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6535 tree lhs = gimple_get_lhs (stmt);
6537 /* In general, assignments with virtual operands are not useful
6538 for deriving ranges, with the obvious exception of calls to
6539 builtin functions. */
6540 if (lhs && TREE_CODE (lhs) == SSA_NAME
6541 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6542 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6543 && (is_gimple_call (stmt)
6544 || !gimple_vuse (stmt)))
6545 return true;
6547 else if (gimple_code (stmt) == GIMPLE_COND
6548 || gimple_code (stmt) == GIMPLE_SWITCH)
6549 return true;
6551 return false;
6555 /* Initialize local data structures for VRP. */
6557 static void
6558 vrp_initialize (void)
6560 basic_block bb;
6562 values_propagated = false;
6563 num_vr_values = num_ssa_names;
6564 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6565 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6567 FOR_EACH_BB (bb)
6569 gimple_stmt_iterator si;
6571 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6573 gimple phi = gsi_stmt (si);
6574 if (!stmt_interesting_for_vrp (phi))
6576 tree lhs = PHI_RESULT (phi);
6577 set_value_range_to_varying (get_value_range (lhs));
6578 prop_set_simulate_again (phi, false);
6580 else
6581 prop_set_simulate_again (phi, true);
6584 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6586 gimple stmt = gsi_stmt (si);
6588 /* If the statement is a control insn, then we do not
6589 want to avoid simulating the statement once. Failure
6590 to do so means that those edges will never get added. */
6591 if (stmt_ends_bb_p (stmt))
6592 prop_set_simulate_again (stmt, true);
6593 else if (!stmt_interesting_for_vrp (stmt))
6595 ssa_op_iter i;
6596 tree def;
6597 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6598 set_value_range_to_varying (get_value_range (def));
6599 prop_set_simulate_again (stmt, false);
6601 else
6602 prop_set_simulate_again (stmt, true);
6607 /* Return the singleton value-range for NAME or NAME. */
6609 static inline tree
6610 vrp_valueize (tree name)
6612 if (TREE_CODE (name) == SSA_NAME)
6614 value_range_t *vr = get_value_range (name);
6615 if (vr->type == VR_RANGE
6616 && (vr->min == vr->max
6617 || operand_equal_p (vr->min, vr->max, 0)))
6618 return vr->min;
6620 return name;
6623 /* Visit assignment STMT. If it produces an interesting range, record
6624 the SSA name in *OUTPUT_P. */
6626 static enum ssa_prop_result
6627 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6629 tree def, lhs;
6630 ssa_op_iter iter;
6631 enum gimple_code code = gimple_code (stmt);
6632 lhs = gimple_get_lhs (stmt);
6634 /* We only keep track of ranges in integral and pointer types. */
6635 if (TREE_CODE (lhs) == SSA_NAME
6636 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6637 /* It is valid to have NULL MIN/MAX values on a type. See
6638 build_range_type. */
6639 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6640 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6641 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6643 value_range_t new_vr = VR_INITIALIZER;
6645 /* Try folding the statement to a constant first. */
6646 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6647 if (tem && !is_overflow_infinity (tem))
6648 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6649 /* Then dispatch to value-range extracting functions. */
6650 else if (code == GIMPLE_CALL)
6651 extract_range_basic (&new_vr, stmt);
6652 else
6653 extract_range_from_assignment (&new_vr, stmt);
6655 if (update_value_range (lhs, &new_vr))
6657 *output_p = lhs;
6659 if (dump_file && (dump_flags & TDF_DETAILS))
6661 fprintf (dump_file, "Found new range for ");
6662 print_generic_expr (dump_file, lhs, 0);
6663 fprintf (dump_file, ": ");
6664 dump_value_range (dump_file, &new_vr);
6665 fprintf (dump_file, "\n\n");
6668 if (new_vr.type == VR_VARYING)
6669 return SSA_PROP_VARYING;
6671 return SSA_PROP_INTERESTING;
6674 return SSA_PROP_NOT_INTERESTING;
6677 /* Every other statement produces no useful ranges. */
6678 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6679 set_value_range_to_varying (get_value_range (def));
6681 return SSA_PROP_VARYING;
6684 /* Helper that gets the value range of the SSA_NAME with version I
6685 or a symbolic range containing the SSA_NAME only if the value range
6686 is varying or undefined. */
6688 static inline value_range_t
6689 get_vr_for_comparison (int i)
6691 value_range_t vr = *get_value_range (ssa_name (i));
6693 /* If name N_i does not have a valid range, use N_i as its own
6694 range. This allows us to compare against names that may
6695 have N_i in their ranges. */
6696 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6698 vr.type = VR_RANGE;
6699 vr.min = ssa_name (i);
6700 vr.max = ssa_name (i);
6703 return vr;
6706 /* Compare all the value ranges for names equivalent to VAR with VAL
6707 using comparison code COMP. Return the same value returned by
6708 compare_range_with_value, including the setting of
6709 *STRICT_OVERFLOW_P. */
6711 static tree
6712 compare_name_with_value (enum tree_code comp, tree var, tree val,
6713 bool *strict_overflow_p)
6715 bitmap_iterator bi;
6716 unsigned i;
6717 bitmap e;
6718 tree retval, t;
6719 int used_strict_overflow;
6720 bool sop;
6721 value_range_t equiv_vr;
6723 /* Get the set of equivalences for VAR. */
6724 e = get_value_range (var)->equiv;
6726 /* Start at -1. Set it to 0 if we do a comparison without relying
6727 on overflow, or 1 if all comparisons rely on overflow. */
6728 used_strict_overflow = -1;
6730 /* Compare vars' value range with val. */
6731 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6732 sop = false;
6733 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6734 if (retval)
6735 used_strict_overflow = sop ? 1 : 0;
6737 /* If the equiv set is empty we have done all work we need to do. */
6738 if (e == NULL)
6740 if (retval
6741 && used_strict_overflow > 0)
6742 *strict_overflow_p = true;
6743 return retval;
6746 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6748 equiv_vr = get_vr_for_comparison (i);
6749 sop = false;
6750 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6751 if (t)
6753 /* If we get different answers from different members
6754 of the equivalence set this check must be in a dead
6755 code region. Folding it to a trap representation
6756 would be correct here. For now just return don't-know. */
6757 if (retval != NULL
6758 && t != retval)
6760 retval = NULL_TREE;
6761 break;
6763 retval = t;
6765 if (!sop)
6766 used_strict_overflow = 0;
6767 else if (used_strict_overflow < 0)
6768 used_strict_overflow = 1;
6772 if (retval
6773 && used_strict_overflow > 0)
6774 *strict_overflow_p = true;
6776 return retval;
6780 /* Given a comparison code COMP and names N1 and N2, compare all the
6781 ranges equivalent to N1 against all the ranges equivalent to N2
6782 to determine the value of N1 COMP N2. Return the same value
6783 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6784 whether we relied on an overflow infinity in the comparison. */
6787 static tree
6788 compare_names (enum tree_code comp, tree n1, tree n2,
6789 bool *strict_overflow_p)
6791 tree t, retval;
6792 bitmap e1, e2;
6793 bitmap_iterator bi1, bi2;
6794 unsigned i1, i2;
6795 int used_strict_overflow;
6796 static bitmap_obstack *s_obstack = NULL;
6797 static bitmap s_e1 = NULL, s_e2 = NULL;
6799 /* Compare the ranges of every name equivalent to N1 against the
6800 ranges of every name equivalent to N2. */
6801 e1 = get_value_range (n1)->equiv;
6802 e2 = get_value_range (n2)->equiv;
6804 /* Use the fake bitmaps if e1 or e2 are not available. */
6805 if (s_obstack == NULL)
6807 s_obstack = XNEW (bitmap_obstack);
6808 bitmap_obstack_initialize (s_obstack);
6809 s_e1 = BITMAP_ALLOC (s_obstack);
6810 s_e2 = BITMAP_ALLOC (s_obstack);
6812 if (e1 == NULL)
6813 e1 = s_e1;
6814 if (e2 == NULL)
6815 e2 = s_e2;
6817 /* Add N1 and N2 to their own set of equivalences to avoid
6818 duplicating the body of the loop just to check N1 and N2
6819 ranges. */
6820 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6821 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6823 /* If the equivalence sets have a common intersection, then the two
6824 names can be compared without checking their ranges. */
6825 if (bitmap_intersect_p (e1, e2))
6827 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6828 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6830 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6831 ? boolean_true_node
6832 : boolean_false_node;
6835 /* Start at -1. Set it to 0 if we do a comparison without relying
6836 on overflow, or 1 if all comparisons rely on overflow. */
6837 used_strict_overflow = -1;
6839 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6840 N2 to their own set of equivalences to avoid duplicating the body
6841 of the loop just to check N1 and N2 ranges. */
6842 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6844 value_range_t vr1 = get_vr_for_comparison (i1);
6846 t = retval = NULL_TREE;
6847 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6849 bool sop = false;
6851 value_range_t vr2 = get_vr_for_comparison (i2);
6853 t = compare_ranges (comp, &vr1, &vr2, &sop);
6854 if (t)
6856 /* If we get different answers from different members
6857 of the equivalence set this check must be in a dead
6858 code region. Folding it to a trap representation
6859 would be correct here. For now just return don't-know. */
6860 if (retval != NULL
6861 && t != retval)
6863 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6864 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6865 return NULL_TREE;
6867 retval = t;
6869 if (!sop)
6870 used_strict_overflow = 0;
6871 else if (used_strict_overflow < 0)
6872 used_strict_overflow = 1;
6876 if (retval)
6878 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6879 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6880 if (used_strict_overflow > 0)
6881 *strict_overflow_p = true;
6882 return retval;
6886 /* None of the equivalent ranges are useful in computing this
6887 comparison. */
6888 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6889 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6890 return NULL_TREE;
6893 /* Helper function for vrp_evaluate_conditional_warnv. */
6895 static tree
6896 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6897 tree op0, tree op1,
6898 bool * strict_overflow_p)
6900 value_range_t *vr0, *vr1;
6902 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6903 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6905 if (vr0 && vr1)
6906 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6907 else if (vr0 && vr1 == NULL)
6908 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6909 else if (vr0 == NULL && vr1)
6910 return (compare_range_with_value
6911 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6912 return NULL;
6915 /* Helper function for vrp_evaluate_conditional_warnv. */
6917 static tree
6918 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6919 tree op1, bool use_equiv_p,
6920 bool *strict_overflow_p, bool *only_ranges)
6922 tree ret;
6923 if (only_ranges)
6924 *only_ranges = true;
6926 /* We only deal with integral and pointer types. */
6927 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6928 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6929 return NULL_TREE;
6931 if (use_equiv_p)
6933 if (only_ranges
6934 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6935 (code, op0, op1, strict_overflow_p)))
6936 return ret;
6937 *only_ranges = false;
6938 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6939 return compare_names (code, op0, op1, strict_overflow_p);
6940 else if (TREE_CODE (op0) == SSA_NAME)
6941 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6942 else if (TREE_CODE (op1) == SSA_NAME)
6943 return (compare_name_with_value
6944 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6946 else
6947 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6948 strict_overflow_p);
6949 return NULL_TREE;
6952 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6953 information. Return NULL if the conditional can not be evaluated.
6954 The ranges of all the names equivalent with the operands in COND
6955 will be used when trying to compute the value. If the result is
6956 based on undefined signed overflow, issue a warning if
6957 appropriate. */
6959 static tree
6960 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6962 bool sop;
6963 tree ret;
6964 bool only_ranges;
6966 /* Some passes and foldings leak constants with overflow flag set
6967 into the IL. Avoid doing wrong things with these and bail out. */
6968 if ((TREE_CODE (op0) == INTEGER_CST
6969 && TREE_OVERFLOW (op0))
6970 || (TREE_CODE (op1) == INTEGER_CST
6971 && TREE_OVERFLOW (op1)))
6972 return NULL_TREE;
6974 sop = false;
6975 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6976 &only_ranges);
6978 if (ret && sop)
6980 enum warn_strict_overflow_code wc;
6981 const char* warnmsg;
6983 if (is_gimple_min_invariant (ret))
6985 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6986 warnmsg = G_("assuming signed overflow does not occur when "
6987 "simplifying conditional to constant");
6989 else
6991 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6992 warnmsg = G_("assuming signed overflow does not occur when "
6993 "simplifying conditional");
6996 if (issue_strict_overflow_warning (wc))
6998 location_t location;
7000 if (!gimple_has_location (stmt))
7001 location = input_location;
7002 else
7003 location = gimple_location (stmt);
7004 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7008 if (warn_type_limits
7009 && ret && only_ranges
7010 && TREE_CODE_CLASS (code) == tcc_comparison
7011 && TREE_CODE (op0) == SSA_NAME)
7013 /* If the comparison is being folded and the operand on the LHS
7014 is being compared against a constant value that is outside of
7015 the natural range of OP0's type, then the predicate will
7016 always fold regardless of the value of OP0. If -Wtype-limits
7017 was specified, emit a warning. */
7018 tree type = TREE_TYPE (op0);
7019 value_range_t *vr0 = get_value_range (op0);
7021 if (vr0->type != VR_VARYING
7022 && INTEGRAL_TYPE_P (type)
7023 && vrp_val_is_min (vr0->min)
7024 && vrp_val_is_max (vr0->max)
7025 && is_gimple_min_invariant (op1))
7027 location_t location;
7029 if (!gimple_has_location (stmt))
7030 location = input_location;
7031 else
7032 location = gimple_location (stmt);
7034 warning_at (location, OPT_Wtype_limits,
7035 integer_zerop (ret)
7036 ? G_("comparison always false "
7037 "due to limited range of data type")
7038 : G_("comparison always true "
7039 "due to limited range of data type"));
7043 return ret;
7047 /* Visit conditional statement STMT. If we can determine which edge
7048 will be taken out of STMT's basic block, record it in
7049 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7050 SSA_PROP_VARYING. */
7052 static enum ssa_prop_result
7053 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
7055 tree val;
7056 bool sop;
7058 *taken_edge_p = NULL;
7060 if (dump_file && (dump_flags & TDF_DETAILS))
7062 tree use;
7063 ssa_op_iter i;
7065 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7066 print_gimple_stmt (dump_file, stmt, 0, 0);
7067 fprintf (dump_file, "\nWith known ranges\n");
7069 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7071 fprintf (dump_file, "\t");
7072 print_generic_expr (dump_file, use, 0);
7073 fprintf (dump_file, ": ");
7074 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7077 fprintf (dump_file, "\n");
7080 /* Compute the value of the predicate COND by checking the known
7081 ranges of each of its operands.
7083 Note that we cannot evaluate all the equivalent ranges here
7084 because those ranges may not yet be final and with the current
7085 propagation strategy, we cannot determine when the value ranges
7086 of the names in the equivalence set have changed.
7088 For instance, given the following code fragment
7090 i_5 = PHI <8, i_13>
7092 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7093 if (i_14 == 1)
7096 Assume that on the first visit to i_14, i_5 has the temporary
7097 range [8, 8] because the second argument to the PHI function is
7098 not yet executable. We derive the range ~[0, 0] for i_14 and the
7099 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7100 the first time, since i_14 is equivalent to the range [8, 8], we
7101 determine that the predicate is always false.
7103 On the next round of propagation, i_13 is determined to be
7104 VARYING, which causes i_5 to drop down to VARYING. So, another
7105 visit to i_14 is scheduled. In this second visit, we compute the
7106 exact same range and equivalence set for i_14, namely ~[0, 0] and
7107 { i_5 }. But we did not have the previous range for i_5
7108 registered, so vrp_visit_assignment thinks that the range for
7109 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7110 is not visited again, which stops propagation from visiting
7111 statements in the THEN clause of that if().
7113 To properly fix this we would need to keep the previous range
7114 value for the names in the equivalence set. This way we would've
7115 discovered that from one visit to the other i_5 changed from
7116 range [8, 8] to VR_VARYING.
7118 However, fixing this apparent limitation may not be worth the
7119 additional checking. Testing on several code bases (GCC, DLV,
7120 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7121 4 more predicates folded in SPEC. */
7122 sop = false;
7124 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7125 gimple_cond_lhs (stmt),
7126 gimple_cond_rhs (stmt),
7127 false, &sop, NULL);
7128 if (val)
7130 if (!sop)
7131 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7132 else
7134 if (dump_file && (dump_flags & TDF_DETAILS))
7135 fprintf (dump_file,
7136 "\nIgnoring predicate evaluation because "
7137 "it assumes that signed overflow is undefined");
7138 val = NULL_TREE;
7142 if (dump_file && (dump_flags & TDF_DETAILS))
7144 fprintf (dump_file, "\nPredicate evaluates to: ");
7145 if (val == NULL_TREE)
7146 fprintf (dump_file, "DON'T KNOW\n");
7147 else
7148 print_generic_stmt (dump_file, val, 0);
7151 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7154 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7155 that includes the value VAL. The search is restricted to the range
7156 [START_IDX, n - 1] where n is the size of VEC.
7158 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7159 returned.
7161 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7162 it is placed in IDX and false is returned.
7164 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7165 returned. */
7167 static bool
7168 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
7170 size_t n = gimple_switch_num_labels (stmt);
7171 size_t low, high;
7173 /* Find case label for minimum of the value range or the next one.
7174 At each iteration we are searching in [low, high - 1]. */
7176 for (low = start_idx, high = n; high != low; )
7178 tree t;
7179 int cmp;
7180 /* Note that i != high, so we never ask for n. */
7181 size_t i = (high + low) / 2;
7182 t = gimple_switch_label (stmt, i);
7184 /* Cache the result of comparing CASE_LOW and val. */
7185 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7187 if (cmp == 0)
7189 /* Ranges cannot be empty. */
7190 *idx = i;
7191 return true;
7193 else if (cmp > 0)
7194 high = i;
7195 else
7197 low = i + 1;
7198 if (CASE_HIGH (t) != NULL
7199 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7201 *idx = i;
7202 return true;
7207 *idx = high;
7208 return false;
7211 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7212 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7213 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7214 then MAX_IDX < MIN_IDX.
7215 Returns true if the default label is not needed. */
7217 static bool
7218 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7219 size_t *max_idx)
7221 size_t i, j;
7222 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7223 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7225 if (i == j
7226 && min_take_default
7227 && max_take_default)
7229 /* Only the default case label reached.
7230 Return an empty range. */
7231 *min_idx = 1;
7232 *max_idx = 0;
7233 return false;
7235 else
7237 bool take_default = min_take_default || max_take_default;
7238 tree low, high;
7239 size_t k;
7241 if (max_take_default)
7242 j--;
7244 /* If the case label range is continuous, we do not need
7245 the default case label. Verify that. */
7246 high = CASE_LOW (gimple_switch_label (stmt, i));
7247 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7248 high = CASE_HIGH (gimple_switch_label (stmt, i));
7249 for (k = i + 1; k <= j; ++k)
7251 low = CASE_LOW (gimple_switch_label (stmt, k));
7252 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7254 take_default = true;
7255 break;
7257 high = low;
7258 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7259 high = CASE_HIGH (gimple_switch_label (stmt, k));
7262 *min_idx = i;
7263 *max_idx = j;
7264 return !take_default;
7268 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7269 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7270 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7271 Returns true if the default label is not needed. */
7273 static bool
7274 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7275 size_t *max_idx1, size_t *min_idx2,
7276 size_t *max_idx2)
7278 size_t i, j, k, l;
7279 unsigned int n = gimple_switch_num_labels (stmt);
7280 bool take_default;
7281 tree case_low, case_high;
7282 tree min = vr->min, max = vr->max;
7284 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7286 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7288 /* Set second range to emtpy. */
7289 *min_idx2 = 1;
7290 *max_idx2 = 0;
7292 if (vr->type == VR_RANGE)
7294 *min_idx1 = i;
7295 *max_idx1 = j;
7296 return !take_default;
7299 /* Set first range to all case labels. */
7300 *min_idx1 = 1;
7301 *max_idx1 = n - 1;
7303 if (i > j)
7304 return false;
7306 /* Make sure all the values of case labels [i , j] are contained in
7307 range [MIN, MAX]. */
7308 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7309 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7310 if (tree_int_cst_compare (case_low, min) < 0)
7311 i += 1;
7312 if (case_high != NULL_TREE
7313 && tree_int_cst_compare (max, case_high) < 0)
7314 j -= 1;
7316 if (i > j)
7317 return false;
7319 /* If the range spans case labels [i, j], the corresponding anti-range spans
7320 the labels [1, i - 1] and [j + 1, n - 1]. */
7321 k = j + 1;
7322 l = n - 1;
7323 if (k > l)
7325 k = 1;
7326 l = 0;
7329 j = i - 1;
7330 i = 1;
7331 if (i > j)
7333 i = k;
7334 j = l;
7335 k = 1;
7336 l = 0;
7339 *min_idx1 = i;
7340 *max_idx1 = j;
7341 *min_idx2 = k;
7342 *max_idx2 = l;
7343 return false;
7346 /* Visit switch statement STMT. If we can determine which edge
7347 will be taken out of STMT's basic block, record it in
7348 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7349 SSA_PROP_VARYING. */
7351 static enum ssa_prop_result
7352 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7354 tree op, val;
7355 value_range_t *vr;
7356 size_t i = 0, j = 0, k, l;
7357 bool take_default;
7359 *taken_edge_p = NULL;
7360 op = gimple_switch_index (stmt);
7361 if (TREE_CODE (op) != SSA_NAME)
7362 return SSA_PROP_VARYING;
7364 vr = get_value_range (op);
7365 if (dump_file && (dump_flags & TDF_DETAILS))
7367 fprintf (dump_file, "\nVisiting switch expression with operand ");
7368 print_generic_expr (dump_file, op, 0);
7369 fprintf (dump_file, " with known range ");
7370 dump_value_range (dump_file, vr);
7371 fprintf (dump_file, "\n");
7374 if ((vr->type != VR_RANGE
7375 && vr->type != VR_ANTI_RANGE)
7376 || symbolic_range_p (vr))
7377 return SSA_PROP_VARYING;
7379 /* Find the single edge that is taken from the switch expression. */
7380 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7382 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7383 label */
7384 if (j < i)
7386 gcc_assert (take_default);
7387 val = gimple_switch_default_label (stmt);
7389 else
7391 /* Check if labels with index i to j and maybe the default label
7392 are all reaching the same label. */
7394 val = gimple_switch_label (stmt, i);
7395 if (take_default
7396 && CASE_LABEL (gimple_switch_default_label (stmt))
7397 != CASE_LABEL (val))
7399 if (dump_file && (dump_flags & TDF_DETAILS))
7400 fprintf (dump_file, " not a single destination for this "
7401 "range\n");
7402 return SSA_PROP_VARYING;
7404 for (++i; i <= j; ++i)
7406 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7408 if (dump_file && (dump_flags & TDF_DETAILS))
7409 fprintf (dump_file, " not a single destination for this "
7410 "range\n");
7411 return SSA_PROP_VARYING;
7414 for (; k <= l; ++k)
7416 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7418 if (dump_file && (dump_flags & TDF_DETAILS))
7419 fprintf (dump_file, " not a single destination for this "
7420 "range\n");
7421 return SSA_PROP_VARYING;
7426 *taken_edge_p = find_edge (gimple_bb (stmt),
7427 label_to_block (CASE_LABEL (val)));
7429 if (dump_file && (dump_flags & TDF_DETAILS))
7431 fprintf (dump_file, " will take edge to ");
7432 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7435 return SSA_PROP_INTERESTING;
7439 /* Evaluate statement STMT. If the statement produces a useful range,
7440 return SSA_PROP_INTERESTING and record the SSA name with the
7441 interesting range into *OUTPUT_P.
7443 If STMT is a conditional branch and we can determine its truth
7444 value, the taken edge is recorded in *TAKEN_EDGE_P.
7446 If STMT produces a varying value, return SSA_PROP_VARYING. */
7448 static enum ssa_prop_result
7449 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7451 tree def;
7452 ssa_op_iter iter;
7454 if (dump_file && (dump_flags & TDF_DETAILS))
7456 fprintf (dump_file, "\nVisiting statement:\n");
7457 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7458 fprintf (dump_file, "\n");
7461 if (!stmt_interesting_for_vrp (stmt))
7462 gcc_assert (stmt_ends_bb_p (stmt));
7463 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7464 return vrp_visit_assignment_or_call (stmt, output_p);
7465 else if (gimple_code (stmt) == GIMPLE_COND)
7466 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7467 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7468 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7470 /* All other statements produce nothing of interest for VRP, so mark
7471 their outputs varying and prevent further simulation. */
7472 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7473 set_value_range_to_varying (get_value_range (def));
7475 return SSA_PROP_VARYING;
7478 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7479 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7480 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7481 possible such range. The resulting range is not canonicalized. */
7483 static void
7484 union_ranges (enum value_range_type *vr0type,
7485 tree *vr0min, tree *vr0max,
7486 enum value_range_type vr1type,
7487 tree vr1min, tree vr1max)
7489 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7490 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7492 /* [] is vr0, () is vr1 in the following classification comments. */
7493 if (mineq && maxeq)
7495 /* [( )] */
7496 if (*vr0type == vr1type)
7497 /* Nothing to do for equal ranges. */
7499 else if ((*vr0type == VR_RANGE
7500 && vr1type == VR_ANTI_RANGE)
7501 || (*vr0type == VR_ANTI_RANGE
7502 && vr1type == VR_RANGE))
7504 /* For anti-range with range union the result is varying. */
7505 goto give_up;
7507 else
7508 gcc_unreachable ();
7510 else if (operand_less_p (*vr0max, vr1min) == 1
7511 || operand_less_p (vr1max, *vr0min) == 1)
7513 /* [ ] ( ) or ( ) [ ]
7514 If the ranges have an empty intersection, result of the union
7515 operation is the anti-range or if both are anti-ranges
7516 it covers all. */
7517 if (*vr0type == VR_ANTI_RANGE
7518 && vr1type == VR_ANTI_RANGE)
7519 goto give_up;
7520 else if (*vr0type == VR_ANTI_RANGE
7521 && vr1type == VR_RANGE)
7523 else if (*vr0type == VR_RANGE
7524 && vr1type == VR_ANTI_RANGE)
7526 *vr0type = vr1type;
7527 *vr0min = vr1min;
7528 *vr0max = vr1max;
7530 else if (*vr0type == VR_RANGE
7531 && vr1type == VR_RANGE)
7533 /* The result is the convex hull of both ranges. */
7534 if (operand_less_p (*vr0max, vr1min) == 1)
7536 /* If the result can be an anti-range, create one. */
7537 if (TREE_CODE (*vr0max) == INTEGER_CST
7538 && TREE_CODE (vr1min) == INTEGER_CST
7539 && vrp_val_is_min (*vr0min)
7540 && vrp_val_is_max (vr1max))
7542 tree min = int_const_binop (PLUS_EXPR,
7543 *vr0max, integer_one_node);
7544 tree max = int_const_binop (MINUS_EXPR,
7545 vr1min, integer_one_node);
7546 if (!operand_less_p (max, min))
7548 *vr0type = VR_ANTI_RANGE;
7549 *vr0min = min;
7550 *vr0max = max;
7552 else
7553 *vr0max = vr1max;
7555 else
7556 *vr0max = vr1max;
7558 else
7560 /* If the result can be an anti-range, create one. */
7561 if (TREE_CODE (vr1max) == INTEGER_CST
7562 && TREE_CODE (*vr0min) == INTEGER_CST
7563 && vrp_val_is_min (vr1min)
7564 && vrp_val_is_max (*vr0max))
7566 tree min = int_const_binop (PLUS_EXPR,
7567 vr1max, integer_one_node);
7568 tree max = int_const_binop (MINUS_EXPR,
7569 *vr0min, integer_one_node);
7570 if (!operand_less_p (max, min))
7572 *vr0type = VR_ANTI_RANGE;
7573 *vr0min = min;
7574 *vr0max = max;
7576 else
7577 *vr0min = vr1min;
7579 else
7580 *vr0min = vr1min;
7583 else
7584 gcc_unreachable ();
7586 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7587 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7589 /* [ ( ) ] or [( ) ] or [ ( )] */
7590 if (*vr0type == VR_RANGE
7591 && vr1type == VR_RANGE)
7593 else if (*vr0type == VR_ANTI_RANGE
7594 && vr1type == VR_ANTI_RANGE)
7596 *vr0type = vr1type;
7597 *vr0min = vr1min;
7598 *vr0max = vr1max;
7600 else if (*vr0type == VR_ANTI_RANGE
7601 && vr1type == VR_RANGE)
7603 /* Arbitrarily choose the right or left gap. */
7604 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7605 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7606 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7607 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7608 else
7609 goto give_up;
7611 else if (*vr0type == VR_RANGE
7612 && vr1type == VR_ANTI_RANGE)
7613 /* The result covers everything. */
7614 goto give_up;
7615 else
7616 gcc_unreachable ();
7618 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7619 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7621 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7622 if (*vr0type == VR_RANGE
7623 && vr1type == VR_RANGE)
7625 *vr0type = vr1type;
7626 *vr0min = vr1min;
7627 *vr0max = vr1max;
7629 else if (*vr0type == VR_ANTI_RANGE
7630 && vr1type == VR_ANTI_RANGE)
7632 else if (*vr0type == VR_RANGE
7633 && vr1type == VR_ANTI_RANGE)
7635 *vr0type = VR_ANTI_RANGE;
7636 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7638 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7639 *vr0min = vr1min;
7641 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7643 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7644 *vr0max = vr1max;
7646 else
7647 goto give_up;
7649 else if (*vr0type == VR_ANTI_RANGE
7650 && vr1type == VR_RANGE)
7651 /* The result covers everything. */
7652 goto give_up;
7653 else
7654 gcc_unreachable ();
7656 else if ((operand_less_p (vr1min, *vr0max) == 1
7657 || operand_equal_p (vr1min, *vr0max, 0))
7658 && operand_less_p (*vr0min, vr1min) == 1)
7660 /* [ ( ] ) or [ ]( ) */
7661 if (*vr0type == VR_RANGE
7662 && vr1type == VR_RANGE)
7663 *vr0max = vr1max;
7664 else if (*vr0type == VR_ANTI_RANGE
7665 && vr1type == VR_ANTI_RANGE)
7666 *vr0min = vr1min;
7667 else if (*vr0type == VR_ANTI_RANGE
7668 && vr1type == VR_RANGE)
7670 if (TREE_CODE (vr1min) == INTEGER_CST)
7671 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7672 else
7673 goto give_up;
7675 else if (*vr0type == VR_RANGE
7676 && vr1type == VR_ANTI_RANGE)
7678 if (TREE_CODE (*vr0max) == INTEGER_CST)
7680 *vr0type = vr1type;
7681 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7682 *vr0max = vr1max;
7684 else
7685 goto give_up;
7687 else
7688 gcc_unreachable ();
7690 else if ((operand_less_p (*vr0min, vr1max) == 1
7691 || operand_equal_p (*vr0min, vr1max, 0))
7692 && operand_less_p (vr1min, *vr0min) == 1)
7694 /* ( [ ) ] or ( )[ ] */
7695 if (*vr0type == VR_RANGE
7696 && vr1type == VR_RANGE)
7697 *vr0min = vr1min;
7698 else if (*vr0type == VR_ANTI_RANGE
7699 && vr1type == VR_ANTI_RANGE)
7700 *vr0max = vr1max;
7701 else if (*vr0type == VR_ANTI_RANGE
7702 && vr1type == VR_RANGE)
7704 if (TREE_CODE (vr1max) == INTEGER_CST)
7705 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7706 else
7707 goto give_up;
7709 else if (*vr0type == VR_RANGE
7710 && vr1type == VR_ANTI_RANGE)
7712 if (TREE_CODE (*vr0min) == INTEGER_CST)
7714 *vr0type = vr1type;
7715 *vr0min = vr1min;
7716 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7718 else
7719 goto give_up;
7721 else
7722 gcc_unreachable ();
7724 else
7725 goto give_up;
7727 return;
7729 give_up:
7730 *vr0type = VR_VARYING;
7731 *vr0min = NULL_TREE;
7732 *vr0max = NULL_TREE;
7735 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7736 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7737 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7738 possible such range. The resulting range is not canonicalized. */
7740 static void
7741 intersect_ranges (enum value_range_type *vr0type,
7742 tree *vr0min, tree *vr0max,
7743 enum value_range_type vr1type,
7744 tree vr1min, tree vr1max)
7746 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7747 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7749 /* [] is vr0, () is vr1 in the following classification comments. */
7750 if (mineq && maxeq)
7752 /* [( )] */
7753 if (*vr0type == vr1type)
7754 /* Nothing to do for equal ranges. */
7756 else if ((*vr0type == VR_RANGE
7757 && vr1type == VR_ANTI_RANGE)
7758 || (*vr0type == VR_ANTI_RANGE
7759 && vr1type == VR_RANGE))
7761 /* For anti-range with range intersection the result is empty. */
7762 *vr0type = VR_UNDEFINED;
7763 *vr0min = NULL_TREE;
7764 *vr0max = NULL_TREE;
7766 else
7767 gcc_unreachable ();
7769 else if (operand_less_p (*vr0max, vr1min) == 1
7770 || operand_less_p (vr1max, *vr0min) == 1)
7772 /* [ ] ( ) or ( ) [ ]
7773 If the ranges have an empty intersection, the result of the
7774 intersect operation is the range for intersecting an
7775 anti-range with a range or empty when intersecting two ranges. */
7776 if (*vr0type == VR_RANGE
7777 && vr1type == VR_ANTI_RANGE)
7779 else if (*vr0type == VR_ANTI_RANGE
7780 && vr1type == VR_RANGE)
7782 *vr0type = vr1type;
7783 *vr0min = vr1min;
7784 *vr0max = vr1max;
7786 else if (*vr0type == VR_RANGE
7787 && vr1type == VR_RANGE)
7789 *vr0type = VR_UNDEFINED;
7790 *vr0min = NULL_TREE;
7791 *vr0max = NULL_TREE;
7793 else if (*vr0type == VR_ANTI_RANGE
7794 && vr1type == VR_ANTI_RANGE)
7796 /* If the anti-ranges are adjacent to each other merge them. */
7797 if (TREE_CODE (*vr0max) == INTEGER_CST
7798 && TREE_CODE (vr1min) == INTEGER_CST
7799 && operand_less_p (*vr0max, vr1min) == 1
7800 && integer_onep (int_const_binop (MINUS_EXPR,
7801 vr1min, *vr0max)))
7802 *vr0max = vr1max;
7803 else if (TREE_CODE (vr1max) == INTEGER_CST
7804 && TREE_CODE (*vr0min) == INTEGER_CST
7805 && operand_less_p (vr1max, *vr0min) == 1
7806 && integer_onep (int_const_binop (MINUS_EXPR,
7807 *vr0min, vr1max)))
7808 *vr0min = vr1min;
7809 /* Else arbitrarily take VR0. */
7812 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7813 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7815 /* [ ( ) ] or [( ) ] or [ ( )] */
7816 if (*vr0type == VR_RANGE
7817 && vr1type == VR_RANGE)
7819 /* If both are ranges the result is the inner one. */
7820 *vr0type = vr1type;
7821 *vr0min = vr1min;
7822 *vr0max = vr1max;
7824 else if (*vr0type == VR_RANGE
7825 && vr1type == VR_ANTI_RANGE)
7827 /* Choose the right gap if the left one is empty. */
7828 if (mineq)
7830 if (TREE_CODE (vr1max) == INTEGER_CST)
7831 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7832 else
7833 *vr0min = vr1max;
7835 /* Choose the left gap if the right one is empty. */
7836 else if (maxeq)
7838 if (TREE_CODE (vr1min) == INTEGER_CST)
7839 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7840 integer_one_node);
7841 else
7842 *vr0max = vr1min;
7844 /* Choose the anti-range if the range is effectively varying. */
7845 else if (vrp_val_is_min (*vr0min)
7846 && vrp_val_is_max (*vr0max))
7848 *vr0type = vr1type;
7849 *vr0min = vr1min;
7850 *vr0max = vr1max;
7852 /* Else choose the range. */
7854 else if (*vr0type == VR_ANTI_RANGE
7855 && vr1type == VR_ANTI_RANGE)
7856 /* If both are anti-ranges the result is the outer one. */
7858 else if (*vr0type == VR_ANTI_RANGE
7859 && vr1type == VR_RANGE)
7861 /* The intersection is empty. */
7862 *vr0type = VR_UNDEFINED;
7863 *vr0min = NULL_TREE;
7864 *vr0max = NULL_TREE;
7866 else
7867 gcc_unreachable ();
7869 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7870 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7872 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7873 if (*vr0type == VR_RANGE
7874 && vr1type == VR_RANGE)
7875 /* Choose the inner range. */
7877 else if (*vr0type == VR_ANTI_RANGE
7878 && vr1type == VR_RANGE)
7880 /* Choose the right gap if the left is empty. */
7881 if (mineq)
7883 *vr0type = VR_RANGE;
7884 if (TREE_CODE (*vr0max) == INTEGER_CST)
7885 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7886 integer_one_node);
7887 else
7888 *vr0min = *vr0max;
7889 *vr0max = vr1max;
7891 /* Choose the left gap if the right is empty. */
7892 else if (maxeq)
7894 *vr0type = VR_RANGE;
7895 if (TREE_CODE (*vr0min) == INTEGER_CST)
7896 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7897 integer_one_node);
7898 else
7899 *vr0max = *vr0min;
7900 *vr0min = vr1min;
7902 /* Choose the anti-range if the range is effectively varying. */
7903 else if (vrp_val_is_min (vr1min)
7904 && vrp_val_is_max (vr1max))
7906 /* Else choose the range. */
7907 else
7909 *vr0type = vr1type;
7910 *vr0min = vr1min;
7911 *vr0max = vr1max;
7914 else if (*vr0type == VR_ANTI_RANGE
7915 && vr1type == VR_ANTI_RANGE)
7917 /* If both are anti-ranges the result is the outer one. */
7918 *vr0type = vr1type;
7919 *vr0min = vr1min;
7920 *vr0max = vr1max;
7922 else if (vr1type == VR_ANTI_RANGE
7923 && *vr0type == VR_RANGE)
7925 /* The intersection is empty. */
7926 *vr0type = VR_UNDEFINED;
7927 *vr0min = NULL_TREE;
7928 *vr0max = NULL_TREE;
7930 else
7931 gcc_unreachable ();
7933 else if ((operand_less_p (vr1min, *vr0max) == 1
7934 || operand_equal_p (vr1min, *vr0max, 0))
7935 && operand_less_p (*vr0min, vr1min) == 1)
7937 /* [ ( ] ) or [ ]( ) */
7938 if (*vr0type == VR_ANTI_RANGE
7939 && vr1type == VR_ANTI_RANGE)
7940 *vr0max = vr1max;
7941 else if (*vr0type == VR_RANGE
7942 && vr1type == VR_RANGE)
7943 *vr0min = vr1min;
7944 else if (*vr0type == VR_RANGE
7945 && vr1type == VR_ANTI_RANGE)
7947 if (TREE_CODE (vr1min) == INTEGER_CST)
7948 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7949 integer_one_node);
7950 else
7951 *vr0max = vr1min;
7953 else if (*vr0type == VR_ANTI_RANGE
7954 && vr1type == VR_RANGE)
7956 *vr0type = VR_RANGE;
7957 if (TREE_CODE (*vr0max) == INTEGER_CST)
7958 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7959 integer_one_node);
7960 else
7961 *vr0min = *vr0max;
7962 *vr0max = vr1max;
7964 else
7965 gcc_unreachable ();
7967 else if ((operand_less_p (*vr0min, vr1max) == 1
7968 || operand_equal_p (*vr0min, vr1max, 0))
7969 && operand_less_p (vr1min, *vr0min) == 1)
7971 /* ( [ ) ] or ( )[ ] */
7972 if (*vr0type == VR_ANTI_RANGE
7973 && vr1type == VR_ANTI_RANGE)
7974 *vr0min = vr1min;
7975 else if (*vr0type == VR_RANGE
7976 && vr1type == VR_RANGE)
7977 *vr0max = vr1max;
7978 else if (*vr0type == VR_RANGE
7979 && vr1type == VR_ANTI_RANGE)
7981 if (TREE_CODE (vr1max) == INTEGER_CST)
7982 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7983 integer_one_node);
7984 else
7985 *vr0min = vr1max;
7987 else if (*vr0type == VR_ANTI_RANGE
7988 && vr1type == VR_RANGE)
7990 *vr0type = VR_RANGE;
7991 if (TREE_CODE (*vr0min) == INTEGER_CST)
7992 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7993 integer_one_node);
7994 else
7995 *vr0max = *vr0min;
7996 *vr0min = vr1min;
7998 else
7999 gcc_unreachable ();
8002 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8003 result for the intersection. That's always a conservative
8004 correct estimate. */
8006 return;
8010 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8011 in *VR0. This may not be the smallest possible such range. */
8013 static void
8014 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
8016 value_range_t saved;
8018 /* If either range is VR_VARYING the other one wins. */
8019 if (vr1->type == VR_VARYING)
8020 return;
8021 if (vr0->type == VR_VARYING)
8023 copy_value_range (vr0, vr1);
8024 return;
8027 /* When either range is VR_UNDEFINED the resulting range is
8028 VR_UNDEFINED, too. */
8029 if (vr0->type == VR_UNDEFINED)
8030 return;
8031 if (vr1->type == VR_UNDEFINED)
8033 set_value_range_to_undefined (vr0);
8034 return;
8037 /* Save the original vr0 so we can return it as conservative intersection
8038 result when our worker turns things to varying. */
8039 saved = *vr0;
8040 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8041 vr1->type, vr1->min, vr1->max);
8042 /* Make sure to canonicalize the result though as the inversion of a
8043 VR_RANGE can still be a VR_RANGE. */
8044 set_and_canonicalize_value_range (vr0, vr0->type,
8045 vr0->min, vr0->max, vr0->equiv);
8046 /* If that failed, use the saved original VR0. */
8047 if (vr0->type == VR_VARYING)
8049 *vr0 = saved;
8050 return;
8052 /* If the result is VR_UNDEFINED there is no need to mess with
8053 the equivalencies. */
8054 if (vr0->type == VR_UNDEFINED)
8055 return;
8057 /* The resulting set of equivalences for range intersection is the union of
8058 the two sets. */
8059 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8060 bitmap_ior_into (vr0->equiv, vr1->equiv);
8061 else if (vr1->equiv && !vr0->equiv)
8062 bitmap_copy (vr0->equiv, vr1->equiv);
8065 static void
8066 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8068 if (dump_file && (dump_flags & TDF_DETAILS))
8070 fprintf (dump_file, "Intersecting\n ");
8071 dump_value_range (dump_file, vr0);
8072 fprintf (dump_file, "\nand\n ");
8073 dump_value_range (dump_file, vr1);
8074 fprintf (dump_file, "\n");
8076 vrp_intersect_ranges_1 (vr0, vr1);
8077 if (dump_file && (dump_flags & TDF_DETAILS))
8079 fprintf (dump_file, "to\n ");
8080 dump_value_range (dump_file, vr0);
8081 fprintf (dump_file, "\n");
8085 /* Meet operation for value ranges. Given two value ranges VR0 and
8086 VR1, store in VR0 a range that contains both VR0 and VR1. This
8087 may not be the smallest possible such range. */
8089 static void
8090 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8092 value_range_t saved;
8094 if (vr0->type == VR_UNDEFINED)
8096 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8097 return;
8100 if (vr1->type == VR_UNDEFINED)
8102 /* VR0 already has the resulting range. */
8103 return;
8106 if (vr0->type == VR_VARYING)
8108 /* Nothing to do. VR0 already has the resulting range. */
8109 return;
8112 if (vr1->type == VR_VARYING)
8114 set_value_range_to_varying (vr0);
8115 return;
8118 saved = *vr0;
8119 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8120 vr1->type, vr1->min, vr1->max);
8121 if (vr0->type == VR_VARYING)
8123 /* Failed to find an efficient meet. Before giving up and setting
8124 the result to VARYING, see if we can at least derive a useful
8125 anti-range. FIXME, all this nonsense about distinguishing
8126 anti-ranges from ranges is necessary because of the odd
8127 semantics of range_includes_zero_p and friends. */
8128 if (((saved.type == VR_RANGE
8129 && range_includes_zero_p (saved.min, saved.max) == 0)
8130 || (saved.type == VR_ANTI_RANGE
8131 && range_includes_zero_p (saved.min, saved.max) == 1))
8132 && ((vr1->type == VR_RANGE
8133 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8134 || (vr1->type == VR_ANTI_RANGE
8135 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8137 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8139 /* Since this meet operation did not result from the meeting of
8140 two equivalent names, VR0 cannot have any equivalences. */
8141 if (vr0->equiv)
8142 bitmap_clear (vr0->equiv);
8143 return;
8146 set_value_range_to_varying (vr0);
8147 return;
8149 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8150 vr0->equiv);
8151 if (vr0->type == VR_VARYING)
8152 return;
8154 /* The resulting set of equivalences is always the intersection of
8155 the two sets. */
8156 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8157 bitmap_and_into (vr0->equiv, vr1->equiv);
8158 else if (vr0->equiv && !vr1->equiv)
8159 bitmap_clear (vr0->equiv);
8162 static void
8163 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8165 if (dump_file && (dump_flags & TDF_DETAILS))
8167 fprintf (dump_file, "Meeting\n ");
8168 dump_value_range (dump_file, vr0);
8169 fprintf (dump_file, "\nand\n ");
8170 dump_value_range (dump_file, vr1);
8171 fprintf (dump_file, "\n");
8173 vrp_meet_1 (vr0, vr1);
8174 if (dump_file && (dump_flags & TDF_DETAILS))
8176 fprintf (dump_file, "to\n ");
8177 dump_value_range (dump_file, vr0);
8178 fprintf (dump_file, "\n");
8183 /* Visit all arguments for PHI node PHI that flow through executable
8184 edges. If a valid value range can be derived from all the incoming
8185 value ranges, set a new range for the LHS of PHI. */
8187 static enum ssa_prop_result
8188 vrp_visit_phi_node (gimple phi)
8190 size_t i;
8191 tree lhs = PHI_RESULT (phi);
8192 value_range_t *lhs_vr = get_value_range (lhs);
8193 value_range_t vr_result = VR_INITIALIZER;
8194 bool first = true;
8195 int edges, old_edges;
8196 struct loop *l;
8198 if (dump_file && (dump_flags & TDF_DETAILS))
8200 fprintf (dump_file, "\nVisiting PHI node: ");
8201 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8204 edges = 0;
8205 for (i = 0; i < gimple_phi_num_args (phi); i++)
8207 edge e = gimple_phi_arg_edge (phi, i);
8209 if (dump_file && (dump_flags & TDF_DETAILS))
8211 fprintf (dump_file,
8212 "\n Argument #%d (%d -> %d %sexecutable)\n",
8213 (int) i, e->src->index, e->dest->index,
8214 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8217 if (e->flags & EDGE_EXECUTABLE)
8219 tree arg = PHI_ARG_DEF (phi, i);
8220 value_range_t vr_arg;
8222 ++edges;
8224 if (TREE_CODE (arg) == SSA_NAME)
8226 vr_arg = *(get_value_range (arg));
8227 /* Do not allow equivalences or symbolic ranges to leak in from
8228 backedges. That creates invalid equivalencies.
8229 See PR53465 and PR54767. */
8230 if (e->flags & EDGE_DFS_BACK
8231 && (vr_arg.type == VR_RANGE
8232 || vr_arg.type == VR_ANTI_RANGE))
8234 vr_arg.equiv = NULL;
8235 if (symbolic_range_p (&vr_arg))
8237 vr_arg.type = VR_VARYING;
8238 vr_arg.min = NULL_TREE;
8239 vr_arg.max = NULL_TREE;
8243 else
8245 if (is_overflow_infinity (arg))
8247 arg = copy_node (arg);
8248 TREE_OVERFLOW (arg) = 0;
8251 vr_arg.type = VR_RANGE;
8252 vr_arg.min = arg;
8253 vr_arg.max = arg;
8254 vr_arg.equiv = NULL;
8257 if (dump_file && (dump_flags & TDF_DETAILS))
8259 fprintf (dump_file, "\t");
8260 print_generic_expr (dump_file, arg, dump_flags);
8261 fprintf (dump_file, "\n\tValue: ");
8262 dump_value_range (dump_file, &vr_arg);
8263 fprintf (dump_file, "\n");
8266 if (first)
8267 copy_value_range (&vr_result, &vr_arg);
8268 else
8269 vrp_meet (&vr_result, &vr_arg);
8270 first = false;
8272 if (vr_result.type == VR_VARYING)
8273 break;
8277 if (vr_result.type == VR_VARYING)
8278 goto varying;
8279 else if (vr_result.type == VR_UNDEFINED)
8280 goto update_range;
8282 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8283 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8285 /* To prevent infinite iterations in the algorithm, derive ranges
8286 when the new value is slightly bigger or smaller than the
8287 previous one. We don't do this if we have seen a new executable
8288 edge; this helps us avoid an overflow infinity for conditionals
8289 which are not in a loop. If the old value-range was VR_UNDEFINED
8290 use the updated range and iterate one more time. */
8291 if (edges > 0
8292 && gimple_phi_num_args (phi) > 1
8293 && edges == old_edges
8294 && lhs_vr->type != VR_UNDEFINED)
8296 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8297 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8299 /* For non VR_RANGE or for pointers fall back to varying if
8300 the range changed. */
8301 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8302 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8303 && (cmp_min != 0 || cmp_max != 0))
8304 goto varying;
8306 /* If the new minimum is smaller or larger than the previous
8307 one, go all the way to -INF. In the first case, to avoid
8308 iterating millions of times to reach -INF, and in the
8309 other case to avoid infinite bouncing between different
8310 minimums. */
8311 if (cmp_min > 0 || cmp_min < 0)
8313 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8314 || !vrp_var_may_overflow (lhs, phi))
8315 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8316 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8317 vr_result.min =
8318 negative_overflow_infinity (TREE_TYPE (vr_result.min));
8321 /* Similarly, if the new maximum is smaller or larger than
8322 the previous one, go all the way to +INF. */
8323 if (cmp_max < 0 || cmp_max > 0)
8325 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8326 || !vrp_var_may_overflow (lhs, phi))
8327 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8328 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8329 vr_result.max =
8330 positive_overflow_infinity (TREE_TYPE (vr_result.max));
8333 /* If we dropped either bound to +-INF then if this is a loop
8334 PHI node SCEV may known more about its value-range. */
8335 if ((cmp_min > 0 || cmp_min < 0
8336 || cmp_max < 0 || cmp_max > 0)
8337 && current_loops
8338 && (l = loop_containing_stmt (phi))
8339 && l->header == gimple_bb (phi))
8340 adjust_range_with_scev (&vr_result, l, phi, lhs);
8342 /* If we will end up with a (-INF, +INF) range, set it to
8343 VARYING. Same if the previous max value was invalid for
8344 the type and we end up with vr_result.min > vr_result.max. */
8345 if ((vrp_val_is_max (vr_result.max)
8346 && vrp_val_is_min (vr_result.min))
8347 || compare_values (vr_result.min,
8348 vr_result.max) > 0)
8349 goto varying;
8352 /* If the new range is different than the previous value, keep
8353 iterating. */
8354 update_range:
8355 if (update_value_range (lhs, &vr_result))
8357 if (dump_file && (dump_flags & TDF_DETAILS))
8359 fprintf (dump_file, "Found new range for ");
8360 print_generic_expr (dump_file, lhs, 0);
8361 fprintf (dump_file, ": ");
8362 dump_value_range (dump_file, &vr_result);
8363 fprintf (dump_file, "\n\n");
8366 return SSA_PROP_INTERESTING;
8369 /* Nothing changed, don't add outgoing edges. */
8370 return SSA_PROP_NOT_INTERESTING;
8372 /* No match found. Set the LHS to VARYING. */
8373 varying:
8374 set_value_range_to_varying (lhs_vr);
8375 return SSA_PROP_VARYING;
8378 /* Simplify boolean operations if the source is known
8379 to be already a boolean. */
8380 static bool
8381 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8383 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8384 tree lhs, op0, op1;
8385 bool need_conversion;
8387 /* We handle only !=/== case here. */
8388 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8390 op0 = gimple_assign_rhs1 (stmt);
8391 if (!op_with_boolean_value_range_p (op0))
8392 return false;
8394 op1 = gimple_assign_rhs2 (stmt);
8395 if (!op_with_boolean_value_range_p (op1))
8396 return false;
8398 /* Reduce number of cases to handle to NE_EXPR. As there is no
8399 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8400 if (rhs_code == EQ_EXPR)
8402 if (TREE_CODE (op1) == INTEGER_CST)
8403 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8404 else
8405 return false;
8408 lhs = gimple_assign_lhs (stmt);
8409 need_conversion
8410 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8412 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8413 if (need_conversion
8414 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8415 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8416 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8417 return false;
8419 /* For A != 0 we can substitute A itself. */
8420 if (integer_zerop (op1))
8421 gimple_assign_set_rhs_with_ops (gsi,
8422 need_conversion
8423 ? NOP_EXPR : TREE_CODE (op0),
8424 op0, NULL_TREE);
8425 /* For A != B we substitute A ^ B. Either with conversion. */
8426 else if (need_conversion)
8428 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8429 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8430 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8431 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8433 /* Or without. */
8434 else
8435 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8436 update_stmt (gsi_stmt (*gsi));
8438 return true;
8441 /* Simplify a division or modulo operator to a right shift or
8442 bitwise and if the first operand is unsigned or is greater
8443 than zero and the second operand is an exact power of two. */
8445 static bool
8446 simplify_div_or_mod_using_ranges (gimple stmt)
8448 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8449 tree val = NULL;
8450 tree op0 = gimple_assign_rhs1 (stmt);
8451 tree op1 = gimple_assign_rhs2 (stmt);
8452 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8454 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8456 val = integer_one_node;
8458 else
8460 bool sop = false;
8462 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8464 if (val
8465 && sop
8466 && integer_onep (val)
8467 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8469 location_t location;
8471 if (!gimple_has_location (stmt))
8472 location = input_location;
8473 else
8474 location = gimple_location (stmt);
8475 warning_at (location, OPT_Wstrict_overflow,
8476 "assuming signed overflow does not occur when "
8477 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8481 if (val && integer_onep (val))
8483 tree t;
8485 if (rhs_code == TRUNC_DIV_EXPR)
8487 t = build_int_cst (integer_type_node, tree_log2 (op1));
8488 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8489 gimple_assign_set_rhs1 (stmt, op0);
8490 gimple_assign_set_rhs2 (stmt, t);
8492 else
8494 t = build_int_cst (TREE_TYPE (op1), 1);
8495 t = int_const_binop (MINUS_EXPR, op1, t);
8496 t = fold_convert (TREE_TYPE (op0), t);
8498 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8499 gimple_assign_set_rhs1 (stmt, op0);
8500 gimple_assign_set_rhs2 (stmt, t);
8503 update_stmt (stmt);
8504 return true;
8507 return false;
8510 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8511 ABS_EXPR. If the operand is <= 0, then simplify the
8512 ABS_EXPR into a NEGATE_EXPR. */
8514 static bool
8515 simplify_abs_using_ranges (gimple stmt)
8517 tree val = NULL;
8518 tree op = gimple_assign_rhs1 (stmt);
8519 tree type = TREE_TYPE (op);
8520 value_range_t *vr = get_value_range (op);
8522 if (TYPE_UNSIGNED (type))
8524 val = integer_zero_node;
8526 else if (vr)
8528 bool sop = false;
8530 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8531 if (!val)
8533 sop = false;
8534 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8535 &sop);
8537 if (val)
8539 if (integer_zerop (val))
8540 val = integer_one_node;
8541 else if (integer_onep (val))
8542 val = integer_zero_node;
8546 if (val
8547 && (integer_onep (val) || integer_zerop (val)))
8549 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8551 location_t location;
8553 if (!gimple_has_location (stmt))
8554 location = input_location;
8555 else
8556 location = gimple_location (stmt);
8557 warning_at (location, OPT_Wstrict_overflow,
8558 "assuming signed overflow does not occur when "
8559 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8562 gimple_assign_set_rhs1 (stmt, op);
8563 if (integer_onep (val))
8564 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8565 else
8566 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8567 update_stmt (stmt);
8568 return true;
8572 return false;
8575 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8576 If all the bits that are being cleared by & are already
8577 known to be zero from VR, or all the bits that are being
8578 set by | are already known to be one from VR, the bit
8579 operation is redundant. */
8581 static bool
8582 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8584 tree op0 = gimple_assign_rhs1 (stmt);
8585 tree op1 = gimple_assign_rhs2 (stmt);
8586 tree op = NULL_TREE;
8587 value_range_t vr0 = VR_INITIALIZER;
8588 value_range_t vr1 = VR_INITIALIZER;
8589 double_int may_be_nonzero0, may_be_nonzero1;
8590 double_int must_be_nonzero0, must_be_nonzero1;
8591 double_int mask;
8593 if (TREE_CODE (op0) == SSA_NAME)
8594 vr0 = *(get_value_range (op0));
8595 else if (is_gimple_min_invariant (op0))
8596 set_value_range_to_value (&vr0, op0, NULL);
8597 else
8598 return false;
8600 if (TREE_CODE (op1) == SSA_NAME)
8601 vr1 = *(get_value_range (op1));
8602 else if (is_gimple_min_invariant (op1))
8603 set_value_range_to_value (&vr1, op1, NULL);
8604 else
8605 return false;
8607 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8608 return false;
8609 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8610 return false;
8612 switch (gimple_assign_rhs_code (stmt))
8614 case BIT_AND_EXPR:
8615 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8616 if (mask.is_zero ())
8618 op = op0;
8619 break;
8621 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8622 if (mask.is_zero ())
8624 op = op1;
8625 break;
8627 break;
8628 case BIT_IOR_EXPR:
8629 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8630 if (mask.is_zero ())
8632 op = op1;
8633 break;
8635 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8636 if (mask.is_zero ())
8638 op = op0;
8639 break;
8641 break;
8642 default:
8643 gcc_unreachable ();
8646 if (op == NULL_TREE)
8647 return false;
8649 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8650 update_stmt (gsi_stmt (*gsi));
8651 return true;
8654 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8655 a known value range VR.
8657 If there is one and only one value which will satisfy the
8658 conditional, then return that value. Else return NULL. */
8660 static tree
8661 test_for_singularity (enum tree_code cond_code, tree op0,
8662 tree op1, value_range_t *vr)
8664 tree min = NULL;
8665 tree max = NULL;
8667 /* Extract minimum/maximum values which satisfy the
8668 the conditional as it was written. */
8669 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8671 /* This should not be negative infinity; there is no overflow
8672 here. */
8673 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8675 max = op1;
8676 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8678 tree one = build_int_cst (TREE_TYPE (op0), 1);
8679 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8680 if (EXPR_P (max))
8681 TREE_NO_WARNING (max) = 1;
8684 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8686 /* This should not be positive infinity; there is no overflow
8687 here. */
8688 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8690 min = op1;
8691 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8693 tree one = build_int_cst (TREE_TYPE (op0), 1);
8694 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8695 if (EXPR_P (min))
8696 TREE_NO_WARNING (min) = 1;
8700 /* Now refine the minimum and maximum values using any
8701 value range information we have for op0. */
8702 if (min && max)
8704 if (compare_values (vr->min, min) == 1)
8705 min = vr->min;
8706 if (compare_values (vr->max, max) == -1)
8707 max = vr->max;
8709 /* If the new min/max values have converged to a single value,
8710 then there is only one value which can satisfy the condition,
8711 return that value. */
8712 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8713 return min;
8715 return NULL;
8718 /* Return whether the value range *VR fits in an integer type specified
8719 by PRECISION and UNSIGNED_P. */
8721 static bool
8722 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8724 tree src_type;
8725 unsigned src_precision;
8726 double_int tem;
8728 /* We can only handle integral and pointer types. */
8729 src_type = TREE_TYPE (vr->min);
8730 if (!INTEGRAL_TYPE_P (src_type)
8731 && !POINTER_TYPE_P (src_type))
8732 return false;
8734 /* An extension is fine unless VR is signed and unsigned_p,
8735 and so is an identity transform. */
8736 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8737 if ((src_precision < precision
8738 && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
8739 || (src_precision == precision
8740 && TYPE_UNSIGNED (src_type) == unsigned_p))
8741 return true;
8743 /* Now we can only handle ranges with constant bounds. */
8744 if (vr->type != VR_RANGE
8745 || TREE_CODE (vr->min) != INTEGER_CST
8746 || TREE_CODE (vr->max) != INTEGER_CST)
8747 return false;
8749 /* For sign changes, the MSB of the double_int has to be clear.
8750 An unsigned value with its MSB set cannot be represented by
8751 a signed double_int, while a negative value cannot be represented
8752 by an unsigned double_int. */
8753 if (TYPE_UNSIGNED (src_type) != unsigned_p
8754 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8755 return false;
8757 /* Then we can perform the conversion on both ends and compare
8758 the result for equality. */
8759 tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
8760 if (tree_to_double_int (vr->min) != tem)
8761 return false;
8762 tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
8763 if (tree_to_double_int (vr->max) != tem)
8764 return false;
8766 return true;
8769 /* Simplify a conditional using a relational operator to an equality
8770 test if the range information indicates only one value can satisfy
8771 the original conditional. */
8773 static bool
8774 simplify_cond_using_ranges (gimple stmt)
8776 tree op0 = gimple_cond_lhs (stmt);
8777 tree op1 = gimple_cond_rhs (stmt);
8778 enum tree_code cond_code = gimple_cond_code (stmt);
8780 if (cond_code != NE_EXPR
8781 && cond_code != EQ_EXPR
8782 && TREE_CODE (op0) == SSA_NAME
8783 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8784 && is_gimple_min_invariant (op1))
8786 value_range_t *vr = get_value_range (op0);
8788 /* If we have range information for OP0, then we might be
8789 able to simplify this conditional. */
8790 if (vr->type == VR_RANGE)
8792 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8794 if (new_tree)
8796 if (dump_file)
8798 fprintf (dump_file, "Simplified relational ");
8799 print_gimple_stmt (dump_file, stmt, 0, 0);
8800 fprintf (dump_file, " into ");
8803 gimple_cond_set_code (stmt, EQ_EXPR);
8804 gimple_cond_set_lhs (stmt, op0);
8805 gimple_cond_set_rhs (stmt, new_tree);
8807 update_stmt (stmt);
8809 if (dump_file)
8811 print_gimple_stmt (dump_file, stmt, 0, 0);
8812 fprintf (dump_file, "\n");
8815 return true;
8818 /* Try again after inverting the condition. We only deal
8819 with integral types here, so no need to worry about
8820 issues with inverting FP comparisons. */
8821 cond_code = invert_tree_comparison (cond_code, false);
8822 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8824 if (new_tree)
8826 if (dump_file)
8828 fprintf (dump_file, "Simplified relational ");
8829 print_gimple_stmt (dump_file, stmt, 0, 0);
8830 fprintf (dump_file, " into ");
8833 gimple_cond_set_code (stmt, NE_EXPR);
8834 gimple_cond_set_lhs (stmt, op0);
8835 gimple_cond_set_rhs (stmt, new_tree);
8837 update_stmt (stmt);
8839 if (dump_file)
8841 print_gimple_stmt (dump_file, stmt, 0, 0);
8842 fprintf (dump_file, "\n");
8845 return true;
8850 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8851 see if OP0 was set by a type conversion where the source of
8852 the conversion is another SSA_NAME with a range that fits
8853 into the range of OP0's type.
8855 If so, the conversion is redundant as the earlier SSA_NAME can be
8856 used for the comparison directly if we just massage the constant in the
8857 comparison. */
8858 if (TREE_CODE (op0) == SSA_NAME
8859 && TREE_CODE (op1) == INTEGER_CST)
8861 gimple def_stmt = SSA_NAME_DEF_STMT (op0);
8862 tree innerop;
8864 if (!is_gimple_assign (def_stmt)
8865 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8866 return false;
8868 innerop = gimple_assign_rhs1 (def_stmt);
8870 if (TREE_CODE (innerop) == SSA_NAME
8871 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
8873 value_range_t *vr = get_value_range (innerop);
8875 if (range_int_cst_p (vr)
8876 && range_fits_type_p (vr,
8877 TYPE_PRECISION (TREE_TYPE (op0)),
8878 TYPE_UNSIGNED (TREE_TYPE (op0)))
8879 && int_fits_type_p (op1, TREE_TYPE (innerop))
8880 /* The range must not have overflowed, or if it did overflow
8881 we must not be wrapping/trapping overflow and optimizing
8882 with strict overflow semantics. */
8883 && ((!is_negative_overflow_infinity (vr->min)
8884 && !is_positive_overflow_infinity (vr->max))
8885 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
8887 /* If the range overflowed and the user has asked for warnings
8888 when strict overflow semantics were used to optimize code,
8889 issue an appropriate warning. */
8890 if ((is_negative_overflow_infinity (vr->min)
8891 || is_positive_overflow_infinity (vr->max))
8892 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
8894 location_t location;
8896 if (!gimple_has_location (stmt))
8897 location = input_location;
8898 else
8899 location = gimple_location (stmt);
8900 warning_at (location, OPT_Wstrict_overflow,
8901 "assuming signed overflow does not occur when "
8902 "simplifying conditional");
8905 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
8906 gimple_cond_set_lhs (stmt, innerop);
8907 gimple_cond_set_rhs (stmt, newconst);
8908 return true;
8913 return false;
8916 /* Simplify a switch statement using the value range of the switch
8917 argument. */
8919 static bool
8920 simplify_switch_using_ranges (gimple stmt)
8922 tree op = gimple_switch_index (stmt);
8923 value_range_t *vr;
8924 bool take_default;
8925 edge e;
8926 edge_iterator ei;
8927 size_t i = 0, j = 0, n, n2;
8928 tree vec2;
8929 switch_update su;
8930 size_t k = 1, l = 0;
8932 if (TREE_CODE (op) == SSA_NAME)
8934 vr = get_value_range (op);
8936 /* We can only handle integer ranges. */
8937 if ((vr->type != VR_RANGE
8938 && vr->type != VR_ANTI_RANGE)
8939 || symbolic_range_p (vr))
8940 return false;
8942 /* Find case label for min/max of the value range. */
8943 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8945 else if (TREE_CODE (op) == INTEGER_CST)
8947 take_default = !find_case_label_index (stmt, 1, op, &i);
8948 if (take_default)
8950 i = 1;
8951 j = 0;
8953 else
8955 j = i;
8958 else
8959 return false;
8961 n = gimple_switch_num_labels (stmt);
8963 /* Bail out if this is just all edges taken. */
8964 if (i == 1
8965 && j == n - 1
8966 && take_default)
8967 return false;
8969 /* Build a new vector of taken case labels. */
8970 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8971 n2 = 0;
8973 /* Add the default edge, if necessary. */
8974 if (take_default)
8975 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8977 for (; i <= j; ++i, ++n2)
8978 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8980 for (; k <= l; ++k, ++n2)
8981 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8983 /* Mark needed edges. */
8984 for (i = 0; i < n2; ++i)
8986 e = find_edge (gimple_bb (stmt),
8987 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8988 e->aux = (void *)-1;
8991 /* Queue not needed edges for later removal. */
8992 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8994 if (e->aux == (void *)-1)
8996 e->aux = NULL;
8997 continue;
9000 if (dump_file && (dump_flags & TDF_DETAILS))
9002 fprintf (dump_file, "removing unreachable case label\n");
9004 to_remove_edges.safe_push (e);
9005 e->flags &= ~EDGE_EXECUTABLE;
9008 /* And queue an update for the stmt. */
9009 su.stmt = stmt;
9010 su.vec = vec2;
9011 to_update_switch_stmts.safe_push (su);
9012 return false;
9015 /* Simplify an integral conversion from an SSA name in STMT. */
9017 static bool
9018 simplify_conversion_using_ranges (gimple stmt)
9020 tree innerop, middleop, finaltype;
9021 gimple def_stmt;
9022 value_range_t *innervr;
9023 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
9024 unsigned inner_prec, middle_prec, final_prec;
9025 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9027 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9028 if (!INTEGRAL_TYPE_P (finaltype))
9029 return false;
9030 middleop = gimple_assign_rhs1 (stmt);
9031 def_stmt = SSA_NAME_DEF_STMT (middleop);
9032 if (!is_gimple_assign (def_stmt)
9033 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9034 return false;
9035 innerop = gimple_assign_rhs1 (def_stmt);
9036 if (TREE_CODE (innerop) != SSA_NAME
9037 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9038 return false;
9040 /* Get the value-range of the inner operand. */
9041 innervr = get_value_range (innerop);
9042 if (innervr->type != VR_RANGE
9043 || TREE_CODE (innervr->min) != INTEGER_CST
9044 || TREE_CODE (innervr->max) != INTEGER_CST)
9045 return false;
9047 /* Simulate the conversion chain to check if the result is equal if
9048 the middle conversion is removed. */
9049 innermin = tree_to_double_int (innervr->min);
9050 innermax = tree_to_double_int (innervr->max);
9052 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9053 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9054 final_prec = TYPE_PRECISION (finaltype);
9056 /* If the first conversion is not injective, the second must not
9057 be widening. */
9058 if ((innermax - innermin).ugt (double_int::mask (middle_prec))
9059 && middle_prec < final_prec)
9060 return false;
9061 /* We also want a medium value so that we can track the effect that
9062 narrowing conversions with sign change have. */
9063 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
9064 if (inner_unsigned_p)
9065 innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
9066 else
9067 innermed = double_int_zero;
9068 if (innermin.cmp (innermed, inner_unsigned_p) >= 0
9069 || innermed.cmp (innermax, inner_unsigned_p) >= 0)
9070 innermed = innermin;
9072 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
9073 middlemin = innermin.ext (middle_prec, middle_unsigned_p);
9074 middlemed = innermed.ext (middle_prec, middle_unsigned_p);
9075 middlemax = innermax.ext (middle_prec, middle_unsigned_p);
9077 /* Require that the final conversion applied to both the original
9078 and the intermediate range produces the same result. */
9079 final_unsigned_p = TYPE_UNSIGNED (finaltype);
9080 if (middlemin.ext (final_prec, final_unsigned_p)
9081 != innermin.ext (final_prec, final_unsigned_p)
9082 || middlemed.ext (final_prec, final_unsigned_p)
9083 != innermed.ext (final_prec, final_unsigned_p)
9084 || middlemax.ext (final_prec, final_unsigned_p)
9085 != innermax.ext (final_prec, final_unsigned_p))
9086 return false;
9088 gimple_assign_set_rhs1 (stmt, innerop);
9089 update_stmt (stmt);
9090 return true;
9093 /* Simplify a conversion from integral SSA name to float in STMT. */
9095 static bool
9096 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
9098 tree rhs1 = gimple_assign_rhs1 (stmt);
9099 value_range_t *vr = get_value_range (rhs1);
9100 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9101 enum machine_mode mode;
9102 tree tem;
9103 gimple conv;
9105 /* We can only handle constant ranges. */
9106 if (vr->type != VR_RANGE
9107 || TREE_CODE (vr->min) != INTEGER_CST
9108 || TREE_CODE (vr->max) != INTEGER_CST)
9109 return false;
9111 /* First check if we can use a signed type in place of an unsigned. */
9112 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9113 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9114 != CODE_FOR_nothing)
9115 && range_fits_type_p (vr, GET_MODE_PRECISION
9116 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
9117 mode = TYPE_MODE (TREE_TYPE (rhs1));
9118 /* If we can do the conversion in the current input mode do nothing. */
9119 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9120 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9121 return false;
9122 /* Otherwise search for a mode we can use, starting from the narrowest
9123 integer mode available. */
9124 else
9126 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9129 /* If we cannot do a signed conversion to float from mode
9130 or if the value-range does not fit in the signed type
9131 try with a wider mode. */
9132 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9133 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
9134 break;
9136 mode = GET_MODE_WIDER_MODE (mode);
9137 /* But do not widen the input. Instead leave that to the
9138 optabs expansion code. */
9139 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9140 return false;
9142 while (mode != VOIDmode);
9143 if (mode == VOIDmode)
9144 return false;
9147 /* It works, insert a truncation or sign-change before the
9148 float conversion. */
9149 tem = make_ssa_name (build_nonstandard_integer_type
9150 (GET_MODE_PRECISION (mode), 0), NULL);
9151 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
9152 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9153 gimple_assign_set_rhs1 (stmt, tem);
9154 update_stmt (stmt);
9156 return true;
9159 /* Simplify STMT using ranges if possible. */
9161 static bool
9162 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9164 gimple stmt = gsi_stmt (*gsi);
9165 if (is_gimple_assign (stmt))
9167 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9168 tree rhs1 = gimple_assign_rhs1 (stmt);
9170 switch (rhs_code)
9172 case EQ_EXPR:
9173 case NE_EXPR:
9174 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9175 if the RHS is zero or one, and the LHS are known to be boolean
9176 values. */
9177 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9178 return simplify_truth_ops_using_ranges (gsi, stmt);
9179 break;
9181 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9182 and BIT_AND_EXPR respectively if the first operand is greater
9183 than zero and the second operand is an exact power of two. */
9184 case TRUNC_DIV_EXPR:
9185 case TRUNC_MOD_EXPR:
9186 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
9187 && integer_pow2p (gimple_assign_rhs2 (stmt)))
9188 return simplify_div_or_mod_using_ranges (stmt);
9189 break;
9191 /* Transform ABS (X) into X or -X as appropriate. */
9192 case ABS_EXPR:
9193 if (TREE_CODE (rhs1) == SSA_NAME
9194 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9195 return simplify_abs_using_ranges (stmt);
9196 break;
9198 case BIT_AND_EXPR:
9199 case BIT_IOR_EXPR:
9200 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9201 if all the bits being cleared are already cleared or
9202 all the bits being set are already set. */
9203 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9204 return simplify_bit_ops_using_ranges (gsi, stmt);
9205 break;
9207 CASE_CONVERT:
9208 if (TREE_CODE (rhs1) == SSA_NAME
9209 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9210 return simplify_conversion_using_ranges (stmt);
9211 break;
9213 case FLOAT_EXPR:
9214 if (TREE_CODE (rhs1) == SSA_NAME
9215 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9216 return simplify_float_conversion_using_ranges (gsi, stmt);
9217 break;
9219 default:
9220 break;
9223 else if (gimple_code (stmt) == GIMPLE_COND)
9224 return simplify_cond_using_ranges (stmt);
9225 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9226 return simplify_switch_using_ranges (stmt);
9228 return false;
9231 /* If the statement pointed by SI has a predicate whose value can be
9232 computed using the value range information computed by VRP, compute
9233 its value and return true. Otherwise, return false. */
9235 static bool
9236 fold_predicate_in (gimple_stmt_iterator *si)
9238 bool assignment_p = false;
9239 tree val;
9240 gimple stmt = gsi_stmt (*si);
9242 if (is_gimple_assign (stmt)
9243 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9245 assignment_p = true;
9246 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9247 gimple_assign_rhs1 (stmt),
9248 gimple_assign_rhs2 (stmt),
9249 stmt);
9251 else if (gimple_code (stmt) == GIMPLE_COND)
9252 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
9253 gimple_cond_lhs (stmt),
9254 gimple_cond_rhs (stmt),
9255 stmt);
9256 else
9257 return false;
9259 if (val)
9261 if (assignment_p)
9262 val = fold_convert (gimple_expr_type (stmt), val);
9264 if (dump_file)
9266 fprintf (dump_file, "Folding predicate ");
9267 print_gimple_expr (dump_file, stmt, 0, 0);
9268 fprintf (dump_file, " to ");
9269 print_generic_expr (dump_file, val, 0);
9270 fprintf (dump_file, "\n");
9273 if (is_gimple_assign (stmt))
9274 gimple_assign_set_rhs_from_tree (si, val);
9275 else
9277 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9278 if (integer_zerop (val))
9279 gimple_cond_make_false (stmt);
9280 else if (integer_onep (val))
9281 gimple_cond_make_true (stmt);
9282 else
9283 gcc_unreachable ();
9286 return true;
9289 return false;
9292 /* Callback for substitute_and_fold folding the stmt at *SI. */
9294 static bool
9295 vrp_fold_stmt (gimple_stmt_iterator *si)
9297 if (fold_predicate_in (si))
9298 return true;
9300 return simplify_stmt_using_ranges (si);
9303 /* Stack of dest,src equivalency pairs that need to be restored after
9304 each attempt to thread a block's incoming edge to an outgoing edge.
9306 A NULL entry is used to mark the end of pairs which need to be
9307 restored. */
9308 static vec<tree> equiv_stack;
9310 /* A trivial wrapper so that we can present the generic jump threading
9311 code with a simple API for simplifying statements. STMT is the
9312 statement we want to simplify, WITHIN_STMT provides the location
9313 for any overflow warnings. */
9315 static tree
9316 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9318 if (gimple_code (stmt) == GIMPLE_COND)
9319 return vrp_evaluate_conditional (gimple_cond_code (stmt),
9320 gimple_cond_lhs (stmt),
9321 gimple_cond_rhs (stmt), within_stmt);
9323 if (gimple_code (stmt) == GIMPLE_ASSIGN)
9325 value_range_t new_vr = VR_INITIALIZER;
9326 tree lhs = gimple_assign_lhs (stmt);
9328 if (TREE_CODE (lhs) == SSA_NAME
9329 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
9330 || POINTER_TYPE_P (TREE_TYPE (lhs))))
9332 extract_range_from_assignment (&new_vr, stmt);
9333 if (range_int_cst_singleton_p (&new_vr))
9334 return new_vr.min;
9338 return NULL_TREE;
9341 /* Blocks which have more than one predecessor and more than
9342 one successor present jump threading opportunities, i.e.,
9343 when the block is reached from a specific predecessor, we
9344 may be able to determine which of the outgoing edges will
9345 be traversed. When this optimization applies, we are able
9346 to avoid conditionals at runtime and we may expose secondary
9347 optimization opportunities.
9349 This routine is effectively a driver for the generic jump
9350 threading code. It basically just presents the generic code
9351 with edges that may be suitable for jump threading.
9353 Unlike DOM, we do not iterate VRP if jump threading was successful.
9354 While iterating may expose new opportunities for VRP, it is expected
9355 those opportunities would be very limited and the compile time cost
9356 to expose those opportunities would be significant.
9358 As jump threading opportunities are discovered, they are registered
9359 for later realization. */
9361 static void
9362 identify_jump_threads (void)
9364 basic_block bb;
9365 gimple dummy;
9366 int i;
9367 edge e;
9369 /* Ugh. When substituting values earlier in this pass we can
9370 wipe the dominance information. So rebuild the dominator
9371 information as we need it within the jump threading code. */
9372 calculate_dominance_info (CDI_DOMINATORS);
9374 /* We do not allow VRP information to be used for jump threading
9375 across a back edge in the CFG. Otherwise it becomes too
9376 difficult to avoid eliminating loop exit tests. Of course
9377 EDGE_DFS_BACK is not accurate at this time so we have to
9378 recompute it. */
9379 mark_dfs_back_edges ();
9381 /* Do not thread across edges we are about to remove. Just marking
9382 them as EDGE_DFS_BACK will do. */
9383 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9384 e->flags |= EDGE_DFS_BACK;
9386 /* Allocate our unwinder stack to unwind any temporary equivalences
9387 that might be recorded. */
9388 equiv_stack.create (20);
9390 /* To avoid lots of silly node creation, we create a single
9391 conditional and just modify it in-place when attempting to
9392 thread jumps. */
9393 dummy = gimple_build_cond (EQ_EXPR,
9394 integer_zero_node, integer_zero_node,
9395 NULL, NULL);
9397 /* Walk through all the blocks finding those which present a
9398 potential jump threading opportunity. We could set this up
9399 as a dominator walker and record data during the walk, but
9400 I doubt it's worth the effort for the classes of jump
9401 threading opportunities we are trying to identify at this
9402 point in compilation. */
9403 FOR_EACH_BB (bb)
9405 gimple last;
9407 /* If the generic jump threading code does not find this block
9408 interesting, then there is nothing to do. */
9409 if (! potentially_threadable_block (bb))
9410 continue;
9412 /* We only care about blocks ending in a COND_EXPR. While there
9413 may be some value in handling SWITCH_EXPR here, I doubt it's
9414 terribly important. */
9415 last = gsi_stmt (gsi_last_bb (bb));
9417 /* We're basically looking for a switch or any kind of conditional with
9418 integral or pointer type arguments. Note the type of the second
9419 argument will be the same as the first argument, so no need to
9420 check it explicitly. */
9421 if (gimple_code (last) == GIMPLE_SWITCH
9422 || (gimple_code (last) == GIMPLE_COND
9423 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9424 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9425 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9426 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9427 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9429 edge_iterator ei;
9431 /* We've got a block with multiple predecessors and multiple
9432 successors which also ends in a suitable conditional or
9433 switch statement. For each predecessor, see if we can thread
9434 it to a specific successor. */
9435 FOR_EACH_EDGE (e, ei, bb->preds)
9437 /* Do not thread across back edges or abnormal edges
9438 in the CFG. */
9439 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9440 continue;
9442 thread_across_edge (dummy, e, true, &equiv_stack,
9443 simplify_stmt_for_jump_threading);
9448 /* We do not actually update the CFG or SSA graphs at this point as
9449 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9450 handle ASSERT_EXPRs gracefully. */
9453 /* We identified all the jump threading opportunities earlier, but could
9454 not transform the CFG at that time. This routine transforms the
9455 CFG and arranges for the dominator tree to be rebuilt if necessary.
9457 Note the SSA graph update will occur during the normal TODO
9458 processing by the pass manager. */
9459 static void
9460 finalize_jump_threads (void)
9462 thread_through_all_blocks (false);
9463 equiv_stack.release ();
9467 /* Traverse all the blocks folding conditionals with known ranges. */
9469 static void
9470 vrp_finalize (void)
9472 size_t i;
9474 values_propagated = true;
9476 if (dump_file)
9478 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9479 dump_all_value_ranges (dump_file);
9480 fprintf (dump_file, "\n");
9483 substitute_and_fold (op_with_constant_singleton_value_range,
9484 vrp_fold_stmt, false);
9486 if (warn_array_bounds)
9487 check_all_array_refs ();
9489 /* We must identify jump threading opportunities before we release
9490 the datastructures built by VRP. */
9491 identify_jump_threads ();
9493 /* Set value range to non pointer SSA_NAMEs. */
9494 for (i = 0; i < num_vr_values; i++)
9495 if (vr_value[i])
9497 tree name = ssa_name (i);
9499 if (!name
9500 || POINTER_TYPE_P (TREE_TYPE (name))
9501 || (vr_value[i]->type == VR_VARYING)
9502 || (vr_value[i]->type == VR_UNDEFINED))
9503 continue;
9505 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
9506 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST))
9508 if (vr_value[i]->type == VR_RANGE)
9509 set_range_info (name,
9510 tree_to_double_int (vr_value[i]->min),
9511 tree_to_double_int (vr_value[i]->max));
9512 else if (vr_value[i]->type == VR_ANTI_RANGE)
9514 /* VR_ANTI_RANGE ~[min, max] is encoded compactly as
9515 [max + 1, min - 1] without additional attributes.
9516 When min value > max value, we know that it is
9517 VR_ANTI_RANGE; it is VR_RANGE otherwise. */
9519 /* ~[0,0] anti-range is represented as
9520 range. */
9521 if (TYPE_UNSIGNED (TREE_TYPE (name))
9522 && integer_zerop (vr_value[i]->min)
9523 && integer_zerop (vr_value[i]->max))
9524 set_range_info (name,
9525 double_int_one,
9526 double_int::max_value
9527 (TYPE_PRECISION (TREE_TYPE (name)), true));
9528 else
9529 set_range_info (name,
9530 tree_to_double_int (vr_value[i]->max)
9531 + double_int_one,
9532 tree_to_double_int (vr_value[i]->min)
9533 - double_int_one);
9538 /* Free allocated memory. */
9539 for (i = 0; i < num_vr_values; i++)
9540 if (vr_value[i])
9542 BITMAP_FREE (vr_value[i]->equiv);
9543 free (vr_value[i]);
9546 free (vr_value);
9547 free (vr_phi_edge_counts);
9549 /* So that we can distinguish between VRP data being available
9550 and not available. */
9551 vr_value = NULL;
9552 vr_phi_edge_counts = NULL;
9556 /* Main entry point to VRP (Value Range Propagation). This pass is
9557 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9558 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9559 Programming Language Design and Implementation, pp. 67-78, 1995.
9560 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9562 This is essentially an SSA-CCP pass modified to deal with ranges
9563 instead of constants.
9565 While propagating ranges, we may find that two or more SSA name
9566 have equivalent, though distinct ranges. For instance,
9568 1 x_9 = p_3->a;
9569 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9570 3 if (p_4 == q_2)
9571 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9572 5 endif
9573 6 if (q_2)
9575 In the code above, pointer p_5 has range [q_2, q_2], but from the
9576 code we can also determine that p_5 cannot be NULL and, if q_2 had
9577 a non-varying range, p_5's range should also be compatible with it.
9579 These equivalences are created by two expressions: ASSERT_EXPR and
9580 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9581 result of another assertion, then we can use the fact that p_5 and
9582 p_4 are equivalent when evaluating p_5's range.
9584 Together with value ranges, we also propagate these equivalences
9585 between names so that we can take advantage of information from
9586 multiple ranges when doing final replacement. Note that this
9587 equivalency relation is transitive but not symmetric.
9589 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9590 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9591 in contexts where that assertion does not hold (e.g., in line 6).
9593 TODO, the main difference between this pass and Patterson's is that
9594 we do not propagate edge probabilities. We only compute whether
9595 edges can be taken or not. That is, instead of having a spectrum
9596 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9597 DON'T KNOW. In the future, it may be worthwhile to propagate
9598 probabilities to aid branch prediction. */
9600 static unsigned int
9601 execute_vrp (void)
9603 int i;
9604 edge e;
9605 switch_update *su;
9607 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9608 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9609 scev_initialize ();
9611 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9612 Inserting assertions may split edges which will invalidate
9613 EDGE_DFS_BACK. */
9614 insert_range_assertions ();
9616 to_remove_edges.create (10);
9617 to_update_switch_stmts.create (5);
9618 threadedge_initialize_values ();
9620 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9621 mark_dfs_back_edges ();
9623 vrp_initialize ();
9624 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9625 vrp_finalize ();
9627 free_numbers_of_iterations_estimates ();
9629 /* ASSERT_EXPRs must be removed before finalizing jump threads
9630 as finalizing jump threads calls the CFG cleanup code which
9631 does not properly handle ASSERT_EXPRs. */
9632 remove_range_assertions ();
9634 /* If we exposed any new variables, go ahead and put them into
9635 SSA form now, before we handle jump threading. This simplifies
9636 interactions between rewriting of _DECL nodes into SSA form
9637 and rewriting SSA_NAME nodes into SSA form after block
9638 duplication and CFG manipulation. */
9639 update_ssa (TODO_update_ssa);
9641 finalize_jump_threads ();
9643 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9644 CFG in a broken state and requires a cfg_cleanup run. */
9645 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9646 remove_edge (e);
9647 /* Update SWITCH_EXPR case label vector. */
9648 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9650 size_t j;
9651 size_t n = TREE_VEC_LENGTH (su->vec);
9652 tree label;
9653 gimple_switch_set_num_labels (su->stmt, n);
9654 for (j = 0; j < n; j++)
9655 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9656 /* As we may have replaced the default label with a regular one
9657 make sure to make it a real default label again. This ensures
9658 optimal expansion. */
9659 label = gimple_switch_label (su->stmt, 0);
9660 CASE_LOW (label) = NULL_TREE;
9661 CASE_HIGH (label) = NULL_TREE;
9664 if (to_remove_edges.length () > 0)
9666 free_dominance_info (CDI_DOMINATORS);
9667 if (current_loops)
9668 loops_state_set (LOOPS_NEED_FIXUP);
9671 to_remove_edges.release ();
9672 to_update_switch_stmts.release ();
9673 threadedge_finalize_values ();
9675 scev_finalize ();
9676 loop_optimizer_finalize ();
9677 return 0;
9680 static bool
9681 gate_vrp (void)
9683 return flag_tree_vrp != 0;
9686 namespace {
9688 const pass_data pass_data_vrp =
9690 GIMPLE_PASS, /* type */
9691 "vrp", /* name */
9692 OPTGROUP_NONE, /* optinfo_flags */
9693 true, /* has_gate */
9694 true, /* has_execute */
9695 TV_TREE_VRP, /* tv_id */
9696 PROP_ssa, /* properties_required */
9697 0, /* properties_provided */
9698 0, /* properties_destroyed */
9699 0, /* todo_flags_start */
9700 ( TODO_cleanup_cfg | TODO_update_ssa
9701 | TODO_verify_ssa
9702 | TODO_verify_flow ), /* todo_flags_finish */
9705 class pass_vrp : public gimple_opt_pass
9707 public:
9708 pass_vrp (gcc::context *ctxt)
9709 : gimple_opt_pass (pass_data_vrp, ctxt)
9712 /* opt_pass methods: */
9713 opt_pass * clone () { return new pass_vrp (m_ctxt); }
9714 bool gate () { return gate_vrp (); }
9715 unsigned int execute () { return execute_vrp (); }
9717 }; // class pass_vrp
9719 } // anon namespace
9721 gimple_opt_pass *
9722 make_pass_vrp (gcc::context *ctxt)
9724 return new pass_vrp (ctxt);