2016-07-28 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / tree-vrp.c
blob77c30144858b85f1d79a39a8046522613665cae3
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-low.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 #include "params.h"
63 /* Range of values that can be associated with an SSA_NAME after VRP
64 has executed. */
65 struct value_range
67 /* Lattice value represented by this range. */
68 enum value_range_type type;
70 /* Minimum and maximum values represented by this range. These
71 values should be interpreted as follows:
73 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
74 be NULL.
76 - If TYPE == VR_RANGE then MIN holds the minimum value and
77 MAX holds the maximum value of the range [MIN, MAX].
79 - If TYPE == ANTI_RANGE the variable is known to NOT
80 take any values in the range [MIN, MAX]. */
81 tree min;
82 tree max;
84 /* Set of SSA names whose value ranges are equivalent to this one.
85 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
86 bitmap equiv;
89 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
91 /* Set of SSA names found live during the RPO traversal of the function
92 for still active basic-blocks. */
93 static sbitmap *live;
95 /* Return true if the SSA name NAME is live on the edge E. */
97 static bool
98 live_on_edge (edge e, tree name)
100 return (live[e->dest->index]
101 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
104 /* Local functions. */
105 static int compare_values (tree val1, tree val2);
106 static int compare_values_warnv (tree val1, tree val2, bool *);
107 static void vrp_meet (value_range *, value_range *);
108 static void vrp_intersect_ranges (value_range *, value_range *);
109 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
110 tree, tree, bool, bool *,
111 bool *);
113 /* Location information for ASSERT_EXPRs. Each instance of this
114 structure describes an ASSERT_EXPR for an SSA name. Since a single
115 SSA name may have more than one assertion associated with it, these
116 locations are kept in a linked list attached to the corresponding
117 SSA name. */
118 struct assert_locus
120 /* Basic block where the assertion would be inserted. */
121 basic_block bb;
123 /* Some assertions need to be inserted on an edge (e.g., assertions
124 generated by COND_EXPRs). In those cases, BB will be NULL. */
125 edge e;
127 /* Pointer to the statement that generated this assertion. */
128 gimple_stmt_iterator si;
130 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
131 enum tree_code comp_code;
133 /* Value being compared against. */
134 tree val;
136 /* Expression to compare. */
137 tree expr;
139 /* Next node in the linked list. */
140 assert_locus *next;
143 /* If bit I is present, it means that SSA name N_i has a list of
144 assertions that should be inserted in the IL. */
145 static bitmap need_assert_for;
147 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
148 holds a list of ASSERT_LOCUS_T nodes that describe where
149 ASSERT_EXPRs for SSA name N_I should be inserted. */
150 static assert_locus **asserts_for;
152 /* Value range array. After propagation, VR_VALUE[I] holds the range
153 of values that SSA name N_I may take. */
154 static unsigned num_vr_values;
155 static value_range **vr_value;
156 static bool values_propagated;
158 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
159 number of executable edges we saw the last time we visited the
160 node. */
161 static int *vr_phi_edge_counts;
163 struct switch_update {
164 gswitch *stmt;
165 tree vec;
168 static vec<edge> to_remove_edges;
169 static vec<switch_update> to_update_switch_stmts;
172 /* Return the maximum value for TYPE. */
174 static inline tree
175 vrp_val_max (const_tree type)
177 if (!INTEGRAL_TYPE_P (type))
178 return NULL_TREE;
180 return TYPE_MAX_VALUE (type);
183 /* Return the minimum value for TYPE. */
185 static inline tree
186 vrp_val_min (const_tree type)
188 if (!INTEGRAL_TYPE_P (type))
189 return NULL_TREE;
191 return TYPE_MIN_VALUE (type);
194 /* Return whether VAL is equal to the maximum value of its type. This
195 will be true for a positive overflow infinity. We can't do a
196 simple equality comparison with TYPE_MAX_VALUE because C typedefs
197 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
198 to the integer constant with the same value in the type. */
200 static inline bool
201 vrp_val_is_max (const_tree val)
203 tree type_max = vrp_val_max (TREE_TYPE (val));
204 return (val == type_max
205 || (type_max != NULL_TREE
206 && operand_equal_p (val, type_max, 0)));
209 /* Return whether VAL is equal to the minimum value of its type. This
210 will be true for a negative overflow infinity. */
212 static inline bool
213 vrp_val_is_min (const_tree val)
215 tree type_min = vrp_val_min (TREE_TYPE (val));
216 return (val == type_min
217 || (type_min != NULL_TREE
218 && operand_equal_p (val, type_min, 0)));
222 /* Return whether TYPE should use an overflow infinity distinct from
223 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
224 represent a signed overflow during VRP computations. An infinity
225 is distinct from a half-range, which will go from some number to
226 TYPE_{MIN,MAX}_VALUE. */
228 static inline bool
229 needs_overflow_infinity (const_tree type)
231 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
234 /* Return whether TYPE can support our overflow infinity
235 representation: we use the TREE_OVERFLOW flag, which only exists
236 for constants. If TYPE doesn't support this, we don't optimize
237 cases which would require signed overflow--we drop them to
238 VARYING. */
240 static inline bool
241 supports_overflow_infinity (const_tree type)
243 tree min = vrp_val_min (type), max = vrp_val_max (type);
244 gcc_checking_assert (needs_overflow_infinity (type));
245 return (min != NULL_TREE
246 && CONSTANT_CLASS_P (min)
247 && max != NULL_TREE
248 && CONSTANT_CLASS_P (max));
251 /* VAL is the maximum or minimum value of a type. Return a
252 corresponding overflow infinity. */
254 static inline tree
255 make_overflow_infinity (tree val)
257 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
258 val = copy_node (val);
259 TREE_OVERFLOW (val) = 1;
260 return val;
263 /* Return a negative overflow infinity for TYPE. */
265 static inline tree
266 negative_overflow_infinity (tree type)
268 gcc_checking_assert (supports_overflow_infinity (type));
269 return make_overflow_infinity (vrp_val_min (type));
272 /* Return a positive overflow infinity for TYPE. */
274 static inline tree
275 positive_overflow_infinity (tree type)
277 gcc_checking_assert (supports_overflow_infinity (type));
278 return make_overflow_infinity (vrp_val_max (type));
281 /* Return whether VAL is a negative overflow infinity. */
283 static inline bool
284 is_negative_overflow_infinity (const_tree val)
286 return (TREE_OVERFLOW_P (val)
287 && needs_overflow_infinity (TREE_TYPE (val))
288 && vrp_val_is_min (val));
291 /* Return whether VAL is a positive overflow infinity. */
293 static inline bool
294 is_positive_overflow_infinity (const_tree val)
296 return (TREE_OVERFLOW_P (val)
297 && needs_overflow_infinity (TREE_TYPE (val))
298 && vrp_val_is_max (val));
301 /* Return whether VAL is a positive or negative overflow infinity. */
303 static inline bool
304 is_overflow_infinity (const_tree val)
306 return (TREE_OVERFLOW_P (val)
307 && needs_overflow_infinity (TREE_TYPE (val))
308 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
311 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
313 static inline bool
314 stmt_overflow_infinity (gimple *stmt)
316 if (is_gimple_assign (stmt)
317 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
318 GIMPLE_SINGLE_RHS)
319 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
320 return false;
323 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
324 the same value with TREE_OVERFLOW clear. This can be used to avoid
325 confusing a regular value with an overflow value. */
327 static inline tree
328 avoid_overflow_infinity (tree val)
330 if (!is_overflow_infinity (val))
331 return val;
333 if (vrp_val_is_max (val))
334 return vrp_val_max (TREE_TYPE (val));
335 else
337 gcc_checking_assert (vrp_val_is_min (val));
338 return vrp_val_min (TREE_TYPE (val));
343 /* Set value range VR to VR_UNDEFINED. */
345 static inline void
346 set_value_range_to_undefined (value_range *vr)
348 vr->type = VR_UNDEFINED;
349 vr->min = vr->max = NULL_TREE;
350 if (vr->equiv)
351 bitmap_clear (vr->equiv);
355 /* Set value range VR to VR_VARYING. */
357 static inline void
358 set_value_range_to_varying (value_range *vr)
360 vr->type = VR_VARYING;
361 vr->min = vr->max = NULL_TREE;
362 if (vr->equiv)
363 bitmap_clear (vr->equiv);
367 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
369 static void
370 set_value_range (value_range *vr, enum value_range_type t, tree min,
371 tree max, bitmap equiv)
373 /* Check the validity of the range. */
374 if (flag_checking
375 && (t == VR_RANGE || t == VR_ANTI_RANGE))
377 int cmp;
379 gcc_assert (min && max);
381 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
382 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
384 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
385 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
387 cmp = compare_values (min, max);
388 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
390 if (needs_overflow_infinity (TREE_TYPE (min)))
391 gcc_assert (!is_overflow_infinity (min)
392 || !is_overflow_infinity (max));
395 if (flag_checking
396 && (t == VR_UNDEFINED || t == VR_VARYING))
398 gcc_assert (min == NULL_TREE && max == NULL_TREE);
399 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
402 vr->type = t;
403 vr->min = min;
404 vr->max = max;
406 /* Since updating the equivalence set involves deep copying the
407 bitmaps, only do it if absolutely necessary. */
408 if (vr->equiv == NULL
409 && equiv != NULL)
410 vr->equiv = BITMAP_ALLOC (NULL);
412 if (equiv != vr->equiv)
414 if (equiv && !bitmap_empty_p (equiv))
415 bitmap_copy (vr->equiv, equiv);
416 else
417 bitmap_clear (vr->equiv);
422 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
423 This means adjusting T, MIN and MAX representing the case of a
424 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
425 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
426 In corner cases where MAX+1 or MIN-1 wraps this will fall back
427 to varying.
428 This routine exists to ease canonicalization in the case where we
429 extract ranges from var + CST op limit. */
431 static void
432 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
433 tree min, tree max, bitmap equiv)
435 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
436 if (t == VR_UNDEFINED)
438 set_value_range_to_undefined (vr);
439 return;
441 else if (t == VR_VARYING)
443 set_value_range_to_varying (vr);
444 return;
447 /* Nothing to canonicalize for symbolic ranges. */
448 if (TREE_CODE (min) != INTEGER_CST
449 || TREE_CODE (max) != INTEGER_CST)
451 set_value_range (vr, t, min, max, equiv);
452 return;
455 /* Wrong order for min and max, to swap them and the VR type we need
456 to adjust them. */
457 if (tree_int_cst_lt (max, min))
459 tree one, tmp;
461 /* For one bit precision if max < min, then the swapped
462 range covers all values, so for VR_RANGE it is varying and
463 for VR_ANTI_RANGE empty range, so drop to varying as well. */
464 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
466 set_value_range_to_varying (vr);
467 return;
470 one = build_int_cst (TREE_TYPE (min), 1);
471 tmp = int_const_binop (PLUS_EXPR, max, one);
472 max = int_const_binop (MINUS_EXPR, min, one);
473 min = tmp;
475 /* There's one corner case, if we had [C+1, C] before we now have
476 that again. But this represents an empty value range, so drop
477 to varying in this case. */
478 if (tree_int_cst_lt (max, min))
480 set_value_range_to_varying (vr);
481 return;
484 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
487 /* Anti-ranges that can be represented as ranges should be so. */
488 if (t == VR_ANTI_RANGE)
490 bool is_min = vrp_val_is_min (min);
491 bool is_max = vrp_val_is_max (max);
493 if (is_min && is_max)
495 /* We cannot deal with empty ranges, drop to varying.
496 ??? This could be VR_UNDEFINED instead. */
497 set_value_range_to_varying (vr);
498 return;
500 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
501 && (is_min || is_max))
503 /* Non-empty boolean ranges can always be represented
504 as a singleton range. */
505 if (is_min)
506 min = max = vrp_val_max (TREE_TYPE (min));
507 else
508 min = max = vrp_val_min (TREE_TYPE (min));
509 t = VR_RANGE;
511 else if (is_min
512 /* As a special exception preserve non-null ranges. */
513 && !(TYPE_UNSIGNED (TREE_TYPE (min))
514 && integer_zerop (max)))
516 tree one = build_int_cst (TREE_TYPE (max), 1);
517 min = int_const_binop (PLUS_EXPR, max, one);
518 max = vrp_val_max (TREE_TYPE (max));
519 t = VR_RANGE;
521 else if (is_max)
523 tree one = build_int_cst (TREE_TYPE (min), 1);
524 max = int_const_binop (MINUS_EXPR, min, one);
525 min = vrp_val_min (TREE_TYPE (min));
526 t = VR_RANGE;
530 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
531 if (needs_overflow_infinity (TREE_TYPE (min))
532 && is_overflow_infinity (min)
533 && is_overflow_infinity (max))
535 set_value_range_to_varying (vr);
536 return;
539 set_value_range (vr, t, min, max, equiv);
542 /* Copy value range FROM into value range TO. */
544 static inline void
545 copy_value_range (value_range *to, value_range *from)
547 set_value_range (to, from->type, from->min, from->max, from->equiv);
550 /* Set value range VR to a single value. This function is only called
551 with values we get from statements, and exists to clear the
552 TREE_OVERFLOW flag so that we don't think we have an overflow
553 infinity when we shouldn't. */
555 static inline void
556 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
558 gcc_assert (is_gimple_min_invariant (val));
559 if (TREE_OVERFLOW_P (val))
560 val = drop_tree_overflow (val);
561 set_value_range (vr, VR_RANGE, val, val, equiv);
564 /* Set value range VR to a non-negative range of type TYPE.
565 OVERFLOW_INFINITY indicates whether to use an overflow infinity
566 rather than TYPE_MAX_VALUE; this should be true if we determine
567 that the range is nonnegative based on the assumption that signed
568 overflow does not occur. */
570 static inline void
571 set_value_range_to_nonnegative (value_range *vr, tree type,
572 bool overflow_infinity)
574 tree zero;
576 if (overflow_infinity && !supports_overflow_infinity (type))
578 set_value_range_to_varying (vr);
579 return;
582 zero = build_int_cst (type, 0);
583 set_value_range (vr, VR_RANGE, zero,
584 (overflow_infinity
585 ? positive_overflow_infinity (type)
586 : TYPE_MAX_VALUE (type)),
587 vr->equiv);
590 /* Set value range VR to a non-NULL range of type TYPE. */
592 static inline void
593 set_value_range_to_nonnull (value_range *vr, tree type)
595 tree zero = build_int_cst (type, 0);
596 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
600 /* Set value range VR to a NULL range of type TYPE. */
602 static inline void
603 set_value_range_to_null (value_range *vr, tree type)
605 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
609 /* Set value range VR to a range of a truthvalue of type TYPE. */
611 static inline void
612 set_value_range_to_truthvalue (value_range *vr, tree type)
614 if (TYPE_PRECISION (type) == 1)
615 set_value_range_to_varying (vr);
616 else
617 set_value_range (vr, VR_RANGE,
618 build_int_cst (type, 0), build_int_cst (type, 1),
619 vr->equiv);
623 /* If abs (min) < abs (max), set VR to [-max, max], if
624 abs (min) >= abs (max), set VR to [-min, min]. */
626 static void
627 abs_extent_range (value_range *vr, tree min, tree max)
629 int cmp;
631 gcc_assert (TREE_CODE (min) == INTEGER_CST);
632 gcc_assert (TREE_CODE (max) == INTEGER_CST);
633 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
634 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
635 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
636 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
637 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
639 set_value_range_to_varying (vr);
640 return;
642 cmp = compare_values (min, max);
643 if (cmp == -1)
644 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
645 else if (cmp == 0 || cmp == 1)
647 max = min;
648 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
650 else
652 set_value_range_to_varying (vr);
653 return;
655 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
659 /* Return value range information for VAR.
661 If we have no values ranges recorded (ie, VRP is not running), then
662 return NULL. Otherwise create an empty range if none existed for VAR. */
664 static value_range *
665 get_value_range (const_tree var)
667 static const value_range vr_const_varying
668 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
669 value_range *vr;
670 tree sym;
671 unsigned ver = SSA_NAME_VERSION (var);
673 /* If we have no recorded ranges, then return NULL. */
674 if (! vr_value)
675 return NULL;
677 /* If we query the range for a new SSA name return an unmodifiable VARYING.
678 We should get here at most from the substitute-and-fold stage which
679 will never try to change values. */
680 if (ver >= num_vr_values)
681 return CONST_CAST (value_range *, &vr_const_varying);
683 vr = vr_value[ver];
684 if (vr)
685 return vr;
687 /* After propagation finished do not allocate new value-ranges. */
688 if (values_propagated)
689 return CONST_CAST (value_range *, &vr_const_varying);
691 /* Create a default value range. */
692 vr_value[ver] = vr = XCNEW (value_range);
694 /* Defer allocating the equivalence set. */
695 vr->equiv = NULL;
697 /* If VAR is a default definition of a parameter, the variable can
698 take any value in VAR's type. */
699 if (SSA_NAME_IS_DEFAULT_DEF (var))
701 sym = SSA_NAME_VAR (var);
702 if (TREE_CODE (sym) == PARM_DECL)
704 /* Try to use the "nonnull" attribute to create ~[0, 0]
705 anti-ranges for pointers. Note that this is only valid with
706 default definitions of PARM_DECLs. */
707 if (POINTER_TYPE_P (TREE_TYPE (sym))
708 && nonnull_arg_p (sym))
709 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
710 else
711 set_value_range_to_varying (vr);
713 else if (TREE_CODE (sym) == RESULT_DECL
714 && DECL_BY_REFERENCE (sym))
715 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
718 return vr;
721 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
723 static inline bool
724 vrp_operand_equal_p (const_tree val1, const_tree val2)
726 if (val1 == val2)
727 return true;
728 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
729 return false;
730 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
733 /* Return true, if the bitmaps B1 and B2 are equal. */
735 static inline bool
736 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
738 return (b1 == b2
739 || ((!b1 || bitmap_empty_p (b1))
740 && (!b2 || bitmap_empty_p (b2)))
741 || (b1 && b2
742 && bitmap_equal_p (b1, b2)));
745 /* Update the value range and equivalence set for variable VAR to
746 NEW_VR. Return true if NEW_VR is different from VAR's previous
747 value.
749 NOTE: This function assumes that NEW_VR is a temporary value range
750 object created for the sole purpose of updating VAR's range. The
751 storage used by the equivalence set from NEW_VR will be freed by
752 this function. Do not call update_value_range when NEW_VR
753 is the range object associated with another SSA name. */
755 static inline bool
756 update_value_range (const_tree var, value_range *new_vr)
758 value_range *old_vr;
759 bool is_new;
761 /* If there is a value-range on the SSA name from earlier analysis
762 factor that in. */
763 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
765 wide_int min, max;
766 value_range_type rtype = get_range_info (var, &min, &max);
767 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
769 value_range nr;
770 nr.type = rtype;
771 nr.min = wide_int_to_tree (TREE_TYPE (var), min);
772 nr.max = wide_int_to_tree (TREE_TYPE (var), max);
773 nr.equiv = NULL;
774 vrp_intersect_ranges (new_vr, &nr);
778 /* Update the value range, if necessary. */
779 old_vr = get_value_range (var);
780 is_new = old_vr->type != new_vr->type
781 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
782 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
783 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
785 if (is_new)
787 /* Do not allow transitions up the lattice. The following
788 is slightly more awkward than just new_vr->type < old_vr->type
789 because VR_RANGE and VR_ANTI_RANGE need to be considered
790 the same. We may not have is_new when transitioning to
791 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
792 called. */
793 if (new_vr->type == VR_UNDEFINED)
795 BITMAP_FREE (new_vr->equiv);
796 set_value_range_to_varying (old_vr);
797 set_value_range_to_varying (new_vr);
798 return true;
800 else
801 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
802 new_vr->equiv);
805 BITMAP_FREE (new_vr->equiv);
807 return is_new;
811 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
812 point where equivalence processing can be turned on/off. */
814 static void
815 add_equivalence (bitmap *equiv, const_tree var)
817 unsigned ver = SSA_NAME_VERSION (var);
818 value_range *vr = vr_value[ver];
820 if (*equiv == NULL)
821 *equiv = BITMAP_ALLOC (NULL);
822 bitmap_set_bit (*equiv, ver);
823 if (vr && vr->equiv)
824 bitmap_ior_into (*equiv, vr->equiv);
828 /* Return true if VR is ~[0, 0]. */
830 static inline bool
831 range_is_nonnull (value_range *vr)
833 return vr->type == VR_ANTI_RANGE
834 && integer_zerop (vr->min)
835 && integer_zerop (vr->max);
839 /* Return true if VR is [0, 0]. */
841 static inline bool
842 range_is_null (value_range *vr)
844 return vr->type == VR_RANGE
845 && integer_zerop (vr->min)
846 && integer_zerop (vr->max);
849 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
850 a singleton. */
852 static inline bool
853 range_int_cst_p (value_range *vr)
855 return (vr->type == VR_RANGE
856 && TREE_CODE (vr->max) == INTEGER_CST
857 && TREE_CODE (vr->min) == INTEGER_CST);
860 /* Return true if VR is a INTEGER_CST singleton. */
862 static inline bool
863 range_int_cst_singleton_p (value_range *vr)
865 return (range_int_cst_p (vr)
866 && !is_overflow_infinity (vr->min)
867 && !is_overflow_infinity (vr->max)
868 && tree_int_cst_equal (vr->min, vr->max));
871 /* Return true if value range VR involves at least one symbol. */
873 static inline bool
874 symbolic_range_p (value_range *vr)
876 return (!is_gimple_min_invariant (vr->min)
877 || !is_gimple_min_invariant (vr->max));
880 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
881 otherwise. We only handle additive operations and set NEG to true if the
882 symbol is negated and INV to the invariant part, if any. */
884 static tree
885 get_single_symbol (tree t, bool *neg, tree *inv)
887 bool neg_;
888 tree inv_;
890 if (TREE_CODE (t) == PLUS_EXPR
891 || TREE_CODE (t) == POINTER_PLUS_EXPR
892 || TREE_CODE (t) == MINUS_EXPR)
894 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
896 neg_ = (TREE_CODE (t) == MINUS_EXPR);
897 inv_ = TREE_OPERAND (t, 0);
898 t = TREE_OPERAND (t, 1);
900 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
902 neg_ = false;
903 inv_ = TREE_OPERAND (t, 1);
904 t = TREE_OPERAND (t, 0);
906 else
907 return NULL_TREE;
909 else
911 neg_ = false;
912 inv_ = NULL_TREE;
915 if (TREE_CODE (t) == NEGATE_EXPR)
917 t = TREE_OPERAND (t, 0);
918 neg_ = !neg_;
921 if (TREE_CODE (t) != SSA_NAME)
922 return NULL_TREE;
924 *neg = neg_;
925 *inv = inv_;
926 return t;
929 /* The reverse operation: build a symbolic expression with TYPE
930 from symbol SYM, negated according to NEG, and invariant INV. */
932 static tree
933 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
935 const bool pointer_p = POINTER_TYPE_P (type);
936 tree t = sym;
938 if (neg)
939 t = build1 (NEGATE_EXPR, type, t);
941 if (integer_zerop (inv))
942 return t;
944 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
947 /* Return true if value range VR involves exactly one symbol SYM. */
949 static bool
950 symbolic_range_based_on_p (value_range *vr, const_tree sym)
952 bool neg, min_has_symbol, max_has_symbol;
953 tree inv;
955 if (is_gimple_min_invariant (vr->min))
956 min_has_symbol = false;
957 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
958 min_has_symbol = true;
959 else
960 return false;
962 if (is_gimple_min_invariant (vr->max))
963 max_has_symbol = false;
964 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
965 max_has_symbol = true;
966 else
967 return false;
969 return (min_has_symbol || max_has_symbol);
972 /* Return true if value range VR uses an overflow infinity. */
974 static inline bool
975 overflow_infinity_range_p (value_range *vr)
977 return (vr->type == VR_RANGE
978 && (is_overflow_infinity (vr->min)
979 || is_overflow_infinity (vr->max)));
982 /* Return false if we can not make a valid comparison based on VR;
983 this will be the case if it uses an overflow infinity and overflow
984 is not undefined (i.e., -fno-strict-overflow is in effect).
985 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
986 uses an overflow infinity. */
988 static bool
989 usable_range_p (value_range *vr, bool *strict_overflow_p)
991 gcc_assert (vr->type == VR_RANGE);
992 if (is_overflow_infinity (vr->min))
994 *strict_overflow_p = true;
995 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
996 return false;
998 if (is_overflow_infinity (vr->max))
1000 *strict_overflow_p = true;
1001 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1002 return false;
1004 return true;
1007 /* Return true if the result of assignment STMT is know to be non-zero.
1008 If the return value is based on the assumption that signed overflow is
1009 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1010 *STRICT_OVERFLOW_P.*/
1012 static bool
1013 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1015 enum tree_code code = gimple_assign_rhs_code (stmt);
1016 switch (get_gimple_rhs_class (code))
1018 case GIMPLE_UNARY_RHS:
1019 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1020 gimple_expr_type (stmt),
1021 gimple_assign_rhs1 (stmt),
1022 strict_overflow_p);
1023 case GIMPLE_BINARY_RHS:
1024 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1025 gimple_expr_type (stmt),
1026 gimple_assign_rhs1 (stmt),
1027 gimple_assign_rhs2 (stmt),
1028 strict_overflow_p);
1029 case GIMPLE_TERNARY_RHS:
1030 return false;
1031 case GIMPLE_SINGLE_RHS:
1032 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1033 strict_overflow_p);
1034 case GIMPLE_INVALID_RHS:
1035 gcc_unreachable ();
1036 default:
1037 gcc_unreachable ();
1041 /* Return true if STMT is known to compute a non-zero value.
1042 If the return value is based on the assumption that signed overflow is
1043 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1044 *STRICT_OVERFLOW_P.*/
1046 static bool
1047 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1049 switch (gimple_code (stmt))
1051 case GIMPLE_ASSIGN:
1052 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1053 case GIMPLE_CALL:
1055 tree fndecl = gimple_call_fndecl (stmt);
1056 if (!fndecl) return false;
1057 if (flag_delete_null_pointer_checks && !flag_check_new
1058 && DECL_IS_OPERATOR_NEW (fndecl)
1059 && !TREE_NOTHROW (fndecl))
1060 return true;
1061 /* References are always non-NULL. */
1062 if (flag_delete_null_pointer_checks
1063 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1064 return true;
1065 if (flag_delete_null_pointer_checks &&
1066 lookup_attribute ("returns_nonnull",
1067 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1068 return true;
1069 return gimple_alloca_call_p (stmt);
1071 default:
1072 gcc_unreachable ();
1076 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1077 obtained so far. */
1079 static bool
1080 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1082 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1083 return true;
1085 /* If we have an expression of the form &X->a, then the expression
1086 is nonnull if X is nonnull. */
1087 if (is_gimple_assign (stmt)
1088 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1090 tree expr = gimple_assign_rhs1 (stmt);
1091 tree base = get_base_address (TREE_OPERAND (expr, 0));
1093 if (base != NULL_TREE
1094 && TREE_CODE (base) == MEM_REF
1095 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1097 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
1098 if (range_is_nonnull (vr))
1099 return true;
1103 return false;
1106 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1107 a gimple invariant, or SSA_NAME +- CST. */
1109 static bool
1110 valid_value_p (tree expr)
1112 if (TREE_CODE (expr) == SSA_NAME)
1113 return true;
1115 if (TREE_CODE (expr) == PLUS_EXPR
1116 || TREE_CODE (expr) == MINUS_EXPR)
1117 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1118 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1120 return is_gimple_min_invariant (expr);
1123 /* Return
1124 1 if VAL < VAL2
1125 0 if !(VAL < VAL2)
1126 -2 if those are incomparable. */
1127 static inline int
1128 operand_less_p (tree val, tree val2)
1130 /* LT is folded faster than GE and others. Inline the common case. */
1131 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1132 return tree_int_cst_lt (val, val2);
1133 else
1135 tree tcmp;
1137 fold_defer_overflow_warnings ();
1139 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1141 fold_undefer_and_ignore_overflow_warnings ();
1143 if (!tcmp
1144 || TREE_CODE (tcmp) != INTEGER_CST)
1145 return -2;
1147 if (!integer_zerop (tcmp))
1148 return 1;
1151 /* val >= val2, not considering overflow infinity. */
1152 if (is_negative_overflow_infinity (val))
1153 return is_negative_overflow_infinity (val2) ? 0 : 1;
1154 else if (is_positive_overflow_infinity (val2))
1155 return is_positive_overflow_infinity (val) ? 0 : 1;
1157 return 0;
1160 /* Compare two values VAL1 and VAL2. Return
1162 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1163 -1 if VAL1 < VAL2,
1164 0 if VAL1 == VAL2,
1165 +1 if VAL1 > VAL2, and
1166 +2 if VAL1 != VAL2
1168 This is similar to tree_int_cst_compare but supports pointer values
1169 and values that cannot be compared at compile time.
1171 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1172 true if the return value is only valid if we assume that signed
1173 overflow is undefined. */
1175 static int
1176 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1178 if (val1 == val2)
1179 return 0;
1181 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1182 both integers. */
1183 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1184 == POINTER_TYPE_P (TREE_TYPE (val2)));
1186 /* Convert the two values into the same type. This is needed because
1187 sizetype causes sign extension even for unsigned types. */
1188 val2 = fold_convert (TREE_TYPE (val1), val2);
1189 STRIP_USELESS_TYPE_CONVERSION (val2);
1191 const bool overflow_undefined
1192 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1193 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1194 tree inv1, inv2;
1195 bool neg1, neg2;
1196 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1197 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1199 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1200 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1201 if (sym1 && sym2)
1203 /* Both values must use the same name with the same sign. */
1204 if (sym1 != sym2 || neg1 != neg2)
1205 return -2;
1207 /* [-]NAME + CST == [-]NAME + CST. */
1208 if (inv1 == inv2)
1209 return 0;
1211 /* If overflow is defined we cannot simplify more. */
1212 if (!overflow_undefined)
1213 return -2;
1215 if (strict_overflow_p != NULL
1216 && (!inv1 || !TREE_NO_WARNING (val1))
1217 && (!inv2 || !TREE_NO_WARNING (val2)))
1218 *strict_overflow_p = true;
1220 if (!inv1)
1221 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1222 if (!inv2)
1223 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1225 return compare_values_warnv (inv1, inv2, strict_overflow_p);
1228 const bool cst1 = is_gimple_min_invariant (val1);
1229 const bool cst2 = is_gimple_min_invariant (val2);
1231 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1232 it might be possible to say something depending on the constants. */
1233 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1235 if (!overflow_undefined)
1236 return -2;
1238 if (strict_overflow_p != NULL
1239 && (!sym1 || !TREE_NO_WARNING (val1))
1240 && (!sym2 || !TREE_NO_WARNING (val2)))
1241 *strict_overflow_p = true;
1243 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1244 tree cst = cst1 ? val1 : val2;
1245 tree inv = cst1 ? inv2 : inv1;
1247 /* Compute the difference between the constants. If it overflows or
1248 underflows, this means that we can trivially compare the NAME with
1249 it and, consequently, the two values with each other. */
1250 wide_int diff = wi::sub (cst, inv);
1251 if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
1253 const int res = wi::cmp (cst, inv, sgn);
1254 return cst1 ? res : -res;
1257 return -2;
1260 /* We cannot say anything more for non-constants. */
1261 if (!cst1 || !cst2)
1262 return -2;
1264 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1266 /* We cannot compare overflowed values, except for overflow
1267 infinities. */
1268 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1270 if (strict_overflow_p != NULL)
1271 *strict_overflow_p = true;
1272 if (is_negative_overflow_infinity (val1))
1273 return is_negative_overflow_infinity (val2) ? 0 : -1;
1274 else if (is_negative_overflow_infinity (val2))
1275 return 1;
1276 else if (is_positive_overflow_infinity (val1))
1277 return is_positive_overflow_infinity (val2) ? 0 : 1;
1278 else if (is_positive_overflow_infinity (val2))
1279 return -1;
1280 return -2;
1283 return tree_int_cst_compare (val1, val2);
1285 else
1287 tree t;
1289 /* First see if VAL1 and VAL2 are not the same. */
1290 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1291 return 0;
1293 /* If VAL1 is a lower address than VAL2, return -1. */
1294 if (operand_less_p (val1, val2) == 1)
1295 return -1;
1297 /* If VAL1 is a higher address than VAL2, return +1. */
1298 if (operand_less_p (val2, val1) == 1)
1299 return 1;
1301 /* If VAL1 is different than VAL2, return +2.
1302 For integer constants we either have already returned -1 or 1
1303 or they are equivalent. We still might succeed in proving
1304 something about non-trivial operands. */
1305 if (TREE_CODE (val1) != INTEGER_CST
1306 || TREE_CODE (val2) != INTEGER_CST)
1308 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1309 if (t && integer_onep (t))
1310 return 2;
1313 return -2;
1317 /* Compare values like compare_values_warnv, but treat comparisons of
1318 nonconstants which rely on undefined overflow as incomparable. */
1320 static int
1321 compare_values (tree val1, tree val2)
1323 bool sop;
1324 int ret;
1326 sop = false;
1327 ret = compare_values_warnv (val1, val2, &sop);
1328 if (sop
1329 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1330 ret = -2;
1331 return ret;
1335 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1336 0 if VAL is not inside [MIN, MAX],
1337 -2 if we cannot tell either way.
1339 Benchmark compile/20001226-1.c compilation time after changing this
1340 function. */
1342 static inline int
1343 value_inside_range (tree val, tree min, tree max)
1345 int cmp1, cmp2;
1347 cmp1 = operand_less_p (val, min);
1348 if (cmp1 == -2)
1349 return -2;
1350 if (cmp1 == 1)
1351 return 0;
1353 cmp2 = operand_less_p (max, val);
1354 if (cmp2 == -2)
1355 return -2;
1357 return !cmp2;
1361 /* Return true if value ranges VR0 and VR1 have a non-empty
1362 intersection.
1364 Benchmark compile/20001226-1.c compilation time after changing this
1365 function.
1368 static inline bool
1369 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1371 /* The value ranges do not intersect if the maximum of the first range is
1372 less than the minimum of the second range or vice versa.
1373 When those relations are unknown, we can't do any better. */
1374 if (operand_less_p (vr0->max, vr1->min) != 0)
1375 return false;
1376 if (operand_less_p (vr1->max, vr0->min) != 0)
1377 return false;
1378 return true;
1382 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1383 include the value zero, -2 if we cannot tell. */
1385 static inline int
1386 range_includes_zero_p (tree min, tree max)
1388 tree zero = build_int_cst (TREE_TYPE (min), 0);
1389 return value_inside_range (zero, min, max);
1392 /* Return true if *VR is know to only contain nonnegative values. */
1394 static inline bool
1395 value_range_nonnegative_p (value_range *vr)
1397 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1398 which would return a useful value should be encoded as a
1399 VR_RANGE. */
1400 if (vr->type == VR_RANGE)
1402 int result = compare_values (vr->min, integer_zero_node);
1403 return (result == 0 || result == 1);
1406 return false;
1409 /* If *VR has a value rante that is a single constant value return that,
1410 otherwise return NULL_TREE. */
1412 static tree
1413 value_range_constant_singleton (value_range *vr)
1415 if (vr->type == VR_RANGE
1416 && operand_equal_p (vr->min, vr->max, 0)
1417 && is_gimple_min_invariant (vr->min))
1418 return vr->min;
1420 return NULL_TREE;
1423 /* If OP has a value range with a single constant value return that,
1424 otherwise return NULL_TREE. This returns OP itself if OP is a
1425 constant. */
1427 static tree
1428 op_with_constant_singleton_value_range (tree op)
1430 if (is_gimple_min_invariant (op))
1431 return op;
1433 if (TREE_CODE (op) != SSA_NAME)
1434 return NULL_TREE;
1436 return value_range_constant_singleton (get_value_range (op));
1439 /* Return true if op is in a boolean [0, 1] value-range. */
1441 static bool
1442 op_with_boolean_value_range_p (tree op)
1444 value_range *vr;
1446 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1447 return true;
1449 if (integer_zerop (op)
1450 || integer_onep (op))
1451 return true;
1453 if (TREE_CODE (op) != SSA_NAME)
1454 return false;
1456 vr = get_value_range (op);
1457 return (vr->type == VR_RANGE
1458 && integer_zerop (vr->min)
1459 && integer_onep (vr->max));
1462 /* Extract value range information from an ASSERT_EXPR EXPR and store
1463 it in *VR_P. */
1465 static void
1466 extract_range_from_assert (value_range *vr_p, tree expr)
1468 tree var, cond, limit, min, max, type;
1469 value_range *limit_vr;
1470 enum tree_code cond_code;
1472 var = ASSERT_EXPR_VAR (expr);
1473 cond = ASSERT_EXPR_COND (expr);
1475 gcc_assert (COMPARISON_CLASS_P (cond));
1477 /* Find VAR in the ASSERT_EXPR conditional. */
1478 if (var == TREE_OPERAND (cond, 0)
1479 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1480 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1482 /* If the predicate is of the form VAR COMP LIMIT, then we just
1483 take LIMIT from the RHS and use the same comparison code. */
1484 cond_code = TREE_CODE (cond);
1485 limit = TREE_OPERAND (cond, 1);
1486 cond = TREE_OPERAND (cond, 0);
1488 else
1490 /* If the predicate is of the form LIMIT COMP VAR, then we need
1491 to flip around the comparison code to create the proper range
1492 for VAR. */
1493 cond_code = swap_tree_comparison (TREE_CODE (cond));
1494 limit = TREE_OPERAND (cond, 0);
1495 cond = TREE_OPERAND (cond, 1);
1498 limit = avoid_overflow_infinity (limit);
1500 type = TREE_TYPE (var);
1501 gcc_assert (limit != var);
1503 /* For pointer arithmetic, we only keep track of pointer equality
1504 and inequality. */
1505 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1507 set_value_range_to_varying (vr_p);
1508 return;
1511 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1512 try to use LIMIT's range to avoid creating symbolic ranges
1513 unnecessarily. */
1514 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1516 /* LIMIT's range is only interesting if it has any useful information. */
1517 if (! limit_vr
1518 || limit_vr->type == VR_UNDEFINED
1519 || limit_vr->type == VR_VARYING
1520 || (symbolic_range_p (limit_vr)
1521 && ! (limit_vr->type == VR_RANGE
1522 && (limit_vr->min == limit_vr->max
1523 || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
1524 limit_vr = NULL;
1526 /* Initially, the new range has the same set of equivalences of
1527 VAR's range. This will be revised before returning the final
1528 value. Since assertions may be chained via mutually exclusive
1529 predicates, we will need to trim the set of equivalences before
1530 we are done. */
1531 gcc_assert (vr_p->equiv == NULL);
1532 add_equivalence (&vr_p->equiv, var);
1534 /* Extract a new range based on the asserted comparison for VAR and
1535 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1536 will only use it for equality comparisons (EQ_EXPR). For any
1537 other kind of assertion, we cannot derive a range from LIMIT's
1538 anti-range that can be used to describe the new range. For
1539 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1540 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1541 no single range for x_2 that could describe LE_EXPR, so we might
1542 as well build the range [b_4, +INF] for it.
1543 One special case we handle is extracting a range from a
1544 range test encoded as (unsigned)var + CST <= limit. */
1545 if (TREE_CODE (cond) == NOP_EXPR
1546 || TREE_CODE (cond) == PLUS_EXPR)
1548 if (TREE_CODE (cond) == PLUS_EXPR)
1550 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1551 TREE_OPERAND (cond, 1));
1552 max = int_const_binop (PLUS_EXPR, limit, min);
1553 cond = TREE_OPERAND (cond, 0);
1555 else
1557 min = build_int_cst (TREE_TYPE (var), 0);
1558 max = limit;
1561 /* Make sure to not set TREE_OVERFLOW on the final type
1562 conversion. We are willingly interpreting large positive
1563 unsigned values as negative signed values here. */
1564 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1565 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1567 /* We can transform a max, min range to an anti-range or
1568 vice-versa. Use set_and_canonicalize_value_range which does
1569 this for us. */
1570 if (cond_code == LE_EXPR)
1571 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1572 min, max, vr_p->equiv);
1573 else if (cond_code == GT_EXPR)
1574 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1575 min, max, vr_p->equiv);
1576 else
1577 gcc_unreachable ();
1579 else if (cond_code == EQ_EXPR)
1581 enum value_range_type range_type;
1583 if (limit_vr)
1585 range_type = limit_vr->type;
1586 min = limit_vr->min;
1587 max = limit_vr->max;
1589 else
1591 range_type = VR_RANGE;
1592 min = limit;
1593 max = limit;
1596 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1598 /* When asserting the equality VAR == LIMIT and LIMIT is another
1599 SSA name, the new range will also inherit the equivalence set
1600 from LIMIT. */
1601 if (TREE_CODE (limit) == SSA_NAME)
1602 add_equivalence (&vr_p->equiv, limit);
1604 else if (cond_code == NE_EXPR)
1606 /* As described above, when LIMIT's range is an anti-range and
1607 this assertion is an inequality (NE_EXPR), then we cannot
1608 derive anything from the anti-range. For instance, if
1609 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1610 not imply that VAR's range is [0, 0]. So, in the case of
1611 anti-ranges, we just assert the inequality using LIMIT and
1612 not its anti-range.
1614 If LIMIT_VR is a range, we can only use it to build a new
1615 anti-range if LIMIT_VR is a single-valued range. For
1616 instance, if LIMIT_VR is [0, 1], the predicate
1617 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1618 Rather, it means that for value 0 VAR should be ~[0, 0]
1619 and for value 1, VAR should be ~[1, 1]. We cannot
1620 represent these ranges.
1622 The only situation in which we can build a valid
1623 anti-range is when LIMIT_VR is a single-valued range
1624 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1625 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1626 if (limit_vr
1627 && limit_vr->type == VR_RANGE
1628 && compare_values (limit_vr->min, limit_vr->max) == 0)
1630 min = limit_vr->min;
1631 max = limit_vr->max;
1633 else
1635 /* In any other case, we cannot use LIMIT's range to build a
1636 valid anti-range. */
1637 min = max = limit;
1640 /* If MIN and MAX cover the whole range for their type, then
1641 just use the original LIMIT. */
1642 if (INTEGRAL_TYPE_P (type)
1643 && vrp_val_is_min (min)
1644 && vrp_val_is_max (max))
1645 min = max = limit;
1647 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1648 min, max, vr_p->equiv);
1650 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1652 min = TYPE_MIN_VALUE (type);
1654 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1655 max = limit;
1656 else
1658 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1659 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1660 LT_EXPR. */
1661 max = limit_vr->max;
1664 /* If the maximum value forces us to be out of bounds, simply punt.
1665 It would be pointless to try and do anything more since this
1666 all should be optimized away above us. */
1667 if ((cond_code == LT_EXPR
1668 && compare_values (max, min) == 0)
1669 || is_overflow_infinity (max))
1670 set_value_range_to_varying (vr_p);
1671 else
1673 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1674 if (cond_code == LT_EXPR)
1676 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1677 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1678 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1679 build_int_cst (TREE_TYPE (max), -1));
1680 else
1681 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1682 build_int_cst (TREE_TYPE (max), 1));
1683 if (EXPR_P (max))
1684 TREE_NO_WARNING (max) = 1;
1687 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1690 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1692 max = TYPE_MAX_VALUE (type);
1694 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1695 min = limit;
1696 else
1698 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1699 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1700 GT_EXPR. */
1701 min = limit_vr->min;
1704 /* If the minimum value forces us to be out of bounds, simply punt.
1705 It would be pointless to try and do anything more since this
1706 all should be optimized away above us. */
1707 if ((cond_code == GT_EXPR
1708 && compare_values (min, max) == 0)
1709 || is_overflow_infinity (min))
1710 set_value_range_to_varying (vr_p);
1711 else
1713 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1714 if (cond_code == GT_EXPR)
1716 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1717 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1718 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1719 build_int_cst (TREE_TYPE (min), -1));
1720 else
1721 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1722 build_int_cst (TREE_TYPE (min), 1));
1723 if (EXPR_P (min))
1724 TREE_NO_WARNING (min) = 1;
1727 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1730 else
1731 gcc_unreachable ();
1733 /* Finally intersect the new range with what we already know about var. */
1734 vrp_intersect_ranges (vr_p, get_value_range (var));
1738 /* Extract range information from SSA name VAR and store it in VR. If
1739 VAR has an interesting range, use it. Otherwise, create the
1740 range [VAR, VAR] and return it. This is useful in situations where
1741 we may have conditionals testing values of VARYING names. For
1742 instance,
1744 x_3 = y_5;
1745 if (x_3 > y_5)
1748 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1749 always false. */
1751 static void
1752 extract_range_from_ssa_name (value_range *vr, tree var)
1754 value_range *var_vr = get_value_range (var);
1756 if (var_vr->type != VR_VARYING)
1757 copy_value_range (vr, var_vr);
1758 else
1759 set_value_range (vr, VR_RANGE, var, var, NULL);
1761 add_equivalence (&vr->equiv, var);
1765 /* Wrapper around int_const_binop. If the operation overflows and we
1766 are not using wrapping arithmetic, then adjust the result to be
1767 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1768 NULL_TREE if we need to use an overflow infinity representation but
1769 the type does not support it. */
1771 static tree
1772 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1774 tree res;
1776 res = int_const_binop (code, val1, val2);
1778 /* If we are using unsigned arithmetic, operate symbolically
1779 on -INF and +INF as int_const_binop only handles signed overflow. */
1780 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1782 int checkz = compare_values (res, val1);
1783 bool overflow = false;
1785 /* Ensure that res = val1 [+*] val2 >= val1
1786 or that res = val1 - val2 <= val1. */
1787 if ((code == PLUS_EXPR
1788 && !(checkz == 1 || checkz == 0))
1789 || (code == MINUS_EXPR
1790 && !(checkz == 0 || checkz == -1)))
1792 overflow = true;
1794 /* Checking for multiplication overflow is done by dividing the
1795 output of the multiplication by the first input of the
1796 multiplication. If the result of that division operation is
1797 not equal to the second input of the multiplication, then the
1798 multiplication overflowed. */
1799 else if (code == MULT_EXPR && !integer_zerop (val1))
1801 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1802 res,
1803 val1);
1804 int check = compare_values (tmp, val2);
1806 if (check != 0)
1807 overflow = true;
1810 if (overflow)
1812 res = copy_node (res);
1813 TREE_OVERFLOW (res) = 1;
1817 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1818 /* If the singed operation wraps then int_const_binop has done
1819 everything we want. */
1821 /* Signed division of -1/0 overflows and by the time it gets here
1822 returns NULL_TREE. */
1823 else if (!res)
1824 return NULL_TREE;
1825 else if ((TREE_OVERFLOW (res)
1826 && !TREE_OVERFLOW (val1)
1827 && !TREE_OVERFLOW (val2))
1828 || is_overflow_infinity (val1)
1829 || is_overflow_infinity (val2))
1831 /* If the operation overflowed but neither VAL1 nor VAL2 are
1832 overflown, return -INF or +INF depending on the operation
1833 and the combination of signs of the operands. */
1834 int sgn1 = tree_int_cst_sgn (val1);
1835 int sgn2 = tree_int_cst_sgn (val2);
1837 if (needs_overflow_infinity (TREE_TYPE (res))
1838 && !supports_overflow_infinity (TREE_TYPE (res)))
1839 return NULL_TREE;
1841 /* We have to punt on adding infinities of different signs,
1842 since we can't tell what the sign of the result should be.
1843 Likewise for subtracting infinities of the same sign. */
1844 if (((code == PLUS_EXPR && sgn1 != sgn2)
1845 || (code == MINUS_EXPR && sgn1 == sgn2))
1846 && is_overflow_infinity (val1)
1847 && is_overflow_infinity (val2))
1848 return NULL_TREE;
1850 /* Don't try to handle division or shifting of infinities. */
1851 if ((code == TRUNC_DIV_EXPR
1852 || code == FLOOR_DIV_EXPR
1853 || code == CEIL_DIV_EXPR
1854 || code == EXACT_DIV_EXPR
1855 || code == ROUND_DIV_EXPR
1856 || code == RSHIFT_EXPR)
1857 && (is_overflow_infinity (val1)
1858 || is_overflow_infinity (val2)))
1859 return NULL_TREE;
1861 /* Notice that we only need to handle the restricted set of
1862 operations handled by extract_range_from_binary_expr.
1863 Among them, only multiplication, addition and subtraction
1864 can yield overflow without overflown operands because we
1865 are working with integral types only... except in the
1866 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1867 for division too. */
1869 /* For multiplication, the sign of the overflow is given
1870 by the comparison of the signs of the operands. */
1871 if ((code == MULT_EXPR && sgn1 == sgn2)
1872 /* For addition, the operands must be of the same sign
1873 to yield an overflow. Its sign is therefore that
1874 of one of the operands, for example the first. For
1875 infinite operands X + -INF is negative, not positive. */
1876 || (code == PLUS_EXPR
1877 && (sgn1 >= 0
1878 ? !is_negative_overflow_infinity (val2)
1879 : is_positive_overflow_infinity (val2)))
1880 /* For subtraction, non-infinite operands must be of
1881 different signs to yield an overflow. Its sign is
1882 therefore that of the first operand or the opposite of
1883 that of the second operand. A first operand of 0 counts
1884 as positive here, for the corner case 0 - (-INF), which
1885 overflows, but must yield +INF. For infinite operands 0
1886 - INF is negative, not positive. */
1887 || (code == MINUS_EXPR
1888 && (sgn1 >= 0
1889 ? !is_positive_overflow_infinity (val2)
1890 : is_negative_overflow_infinity (val2)))
1891 /* We only get in here with positive shift count, so the
1892 overflow direction is the same as the sign of val1.
1893 Actually rshift does not overflow at all, but we only
1894 handle the case of shifting overflowed -INF and +INF. */
1895 || (code == RSHIFT_EXPR
1896 && sgn1 >= 0)
1897 /* For division, the only case is -INF / -1 = +INF. */
1898 || code == TRUNC_DIV_EXPR
1899 || code == FLOOR_DIV_EXPR
1900 || code == CEIL_DIV_EXPR
1901 || code == EXACT_DIV_EXPR
1902 || code == ROUND_DIV_EXPR)
1903 return (needs_overflow_infinity (TREE_TYPE (res))
1904 ? positive_overflow_infinity (TREE_TYPE (res))
1905 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1906 else
1907 return (needs_overflow_infinity (TREE_TYPE (res))
1908 ? negative_overflow_infinity (TREE_TYPE (res))
1909 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1912 return res;
1916 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1917 bitmask if some bit is unset, it means for all numbers in the range
1918 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1919 bitmask if some bit is set, it means for all numbers in the range
1920 the bit is 1, otherwise it might be 0 or 1. */
1922 static bool
1923 zero_nonzero_bits_from_vr (const tree expr_type,
1924 value_range *vr,
1925 wide_int *may_be_nonzero,
1926 wide_int *must_be_nonzero)
1928 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1929 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1930 if (!range_int_cst_p (vr)
1931 || is_overflow_infinity (vr->min)
1932 || is_overflow_infinity (vr->max))
1933 return false;
1935 if (range_int_cst_singleton_p (vr))
1937 *may_be_nonzero = vr->min;
1938 *must_be_nonzero = *may_be_nonzero;
1940 else if (tree_int_cst_sgn (vr->min) >= 0
1941 || tree_int_cst_sgn (vr->max) < 0)
1943 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1944 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1945 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1946 if (xor_mask != 0)
1948 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1949 may_be_nonzero->get_precision ());
1950 *may_be_nonzero = *may_be_nonzero | mask;
1951 *must_be_nonzero = must_be_nonzero->and_not (mask);
1955 return true;
1958 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1959 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1960 false otherwise. If *AR can be represented with a single range
1961 *VR1 will be VR_UNDEFINED. */
1963 static bool
1964 ranges_from_anti_range (value_range *ar,
1965 value_range *vr0, value_range *vr1)
1967 tree type = TREE_TYPE (ar->min);
1969 vr0->type = VR_UNDEFINED;
1970 vr1->type = VR_UNDEFINED;
1972 if (ar->type != VR_ANTI_RANGE
1973 || TREE_CODE (ar->min) != INTEGER_CST
1974 || TREE_CODE (ar->max) != INTEGER_CST
1975 || !vrp_val_min (type)
1976 || !vrp_val_max (type))
1977 return false;
1979 if (!vrp_val_is_min (ar->min))
1981 vr0->type = VR_RANGE;
1982 vr0->min = vrp_val_min (type);
1983 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
1985 if (!vrp_val_is_max (ar->max))
1987 vr1->type = VR_RANGE;
1988 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
1989 vr1->max = vrp_val_max (type);
1991 if (vr0->type == VR_UNDEFINED)
1993 *vr0 = *vr1;
1994 vr1->type = VR_UNDEFINED;
1997 return vr0->type != VR_UNDEFINED;
2000 /* Helper to extract a value-range *VR for a multiplicative operation
2001 *VR0 CODE *VR1. */
2003 static void
2004 extract_range_from_multiplicative_op_1 (value_range *vr,
2005 enum tree_code code,
2006 value_range *vr0, value_range *vr1)
2008 enum value_range_type type;
2009 tree val[4];
2010 size_t i;
2011 tree min, max;
2012 bool sop;
2013 int cmp;
2015 /* Multiplications, divisions and shifts are a bit tricky to handle,
2016 depending on the mix of signs we have in the two ranges, we
2017 need to operate on different values to get the minimum and
2018 maximum values for the new range. One approach is to figure
2019 out all the variations of range combinations and do the
2020 operations.
2022 However, this involves several calls to compare_values and it
2023 is pretty convoluted. It's simpler to do the 4 operations
2024 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2025 MAX1) and then figure the smallest and largest values to form
2026 the new range. */
2027 gcc_assert (code == MULT_EXPR
2028 || code == TRUNC_DIV_EXPR
2029 || code == FLOOR_DIV_EXPR
2030 || code == CEIL_DIV_EXPR
2031 || code == EXACT_DIV_EXPR
2032 || code == ROUND_DIV_EXPR
2033 || code == RSHIFT_EXPR
2034 || code == LSHIFT_EXPR);
2035 gcc_assert ((vr0->type == VR_RANGE
2036 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2037 && vr0->type == vr1->type);
2039 type = vr0->type;
2041 /* Compute the 4 cross operations. */
2042 sop = false;
2043 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2044 if (val[0] == NULL_TREE)
2045 sop = true;
2047 if (vr1->max == vr1->min)
2048 val[1] = NULL_TREE;
2049 else
2051 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2052 if (val[1] == NULL_TREE)
2053 sop = true;
2056 if (vr0->max == vr0->min)
2057 val[2] = NULL_TREE;
2058 else
2060 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2061 if (val[2] == NULL_TREE)
2062 sop = true;
2065 if (vr0->min == vr0->max || vr1->min == vr1->max)
2066 val[3] = NULL_TREE;
2067 else
2069 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2070 if (val[3] == NULL_TREE)
2071 sop = true;
2074 if (sop)
2076 set_value_range_to_varying (vr);
2077 return;
2080 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2081 of VAL[i]. */
2082 min = val[0];
2083 max = val[0];
2084 for (i = 1; i < 4; i++)
2086 if (!is_gimple_min_invariant (min)
2087 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2088 || !is_gimple_min_invariant (max)
2089 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2090 break;
2092 if (val[i])
2094 if (!is_gimple_min_invariant (val[i])
2095 || (TREE_OVERFLOW (val[i])
2096 && !is_overflow_infinity (val[i])))
2098 /* If we found an overflowed value, set MIN and MAX
2099 to it so that we set the resulting range to
2100 VARYING. */
2101 min = max = val[i];
2102 break;
2105 if (compare_values (val[i], min) == -1)
2106 min = val[i];
2108 if (compare_values (val[i], max) == 1)
2109 max = val[i];
2113 /* If either MIN or MAX overflowed, then set the resulting range to
2114 VARYING. But we do accept an overflow infinity
2115 representation. */
2116 if (min == NULL_TREE
2117 || !is_gimple_min_invariant (min)
2118 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2119 || max == NULL_TREE
2120 || !is_gimple_min_invariant (max)
2121 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2123 set_value_range_to_varying (vr);
2124 return;
2127 /* We punt if:
2128 1) [-INF, +INF]
2129 2) [-INF, +-INF(OVF)]
2130 3) [+-INF(OVF), +INF]
2131 4) [+-INF(OVF), +-INF(OVF)]
2132 We learn nothing when we have INF and INF(OVF) on both sides.
2133 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2134 overflow. */
2135 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2136 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2138 set_value_range_to_varying (vr);
2139 return;
2142 cmp = compare_values (min, max);
2143 if (cmp == -2 || cmp == 1)
2145 /* If the new range has its limits swapped around (MIN > MAX),
2146 then the operation caused one of them to wrap around, mark
2147 the new range VARYING. */
2148 set_value_range_to_varying (vr);
2150 else
2151 set_value_range (vr, type, min, max, NULL);
2154 /* Extract range information from a binary operation CODE based on
2155 the ranges of each of its operands *VR0 and *VR1 with resulting
2156 type EXPR_TYPE. The resulting range is stored in *VR. */
2158 static void
2159 extract_range_from_binary_expr_1 (value_range *vr,
2160 enum tree_code code, tree expr_type,
2161 value_range *vr0_, value_range *vr1_)
2163 value_range vr0 = *vr0_, vr1 = *vr1_;
2164 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2165 enum value_range_type type;
2166 tree min = NULL_TREE, max = NULL_TREE;
2167 int cmp;
2169 if (!INTEGRAL_TYPE_P (expr_type)
2170 && !POINTER_TYPE_P (expr_type))
2172 set_value_range_to_varying (vr);
2173 return;
2176 /* Not all binary expressions can be applied to ranges in a
2177 meaningful way. Handle only arithmetic operations. */
2178 if (code != PLUS_EXPR
2179 && code != MINUS_EXPR
2180 && code != POINTER_PLUS_EXPR
2181 && code != MULT_EXPR
2182 && code != TRUNC_DIV_EXPR
2183 && code != FLOOR_DIV_EXPR
2184 && code != CEIL_DIV_EXPR
2185 && code != EXACT_DIV_EXPR
2186 && code != ROUND_DIV_EXPR
2187 && code != TRUNC_MOD_EXPR
2188 && code != RSHIFT_EXPR
2189 && code != LSHIFT_EXPR
2190 && code != MIN_EXPR
2191 && code != MAX_EXPR
2192 && code != BIT_AND_EXPR
2193 && code != BIT_IOR_EXPR
2194 && code != BIT_XOR_EXPR)
2196 set_value_range_to_varying (vr);
2197 return;
2200 /* If both ranges are UNDEFINED, so is the result. */
2201 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2203 set_value_range_to_undefined (vr);
2204 return;
2206 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2207 code. At some point we may want to special-case operations that
2208 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2209 operand. */
2210 else if (vr0.type == VR_UNDEFINED)
2211 set_value_range_to_varying (&vr0);
2212 else if (vr1.type == VR_UNDEFINED)
2213 set_value_range_to_varying (&vr1);
2215 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2216 and express ~[] op X as ([]' op X) U ([]'' op X). */
2217 if (vr0.type == VR_ANTI_RANGE
2218 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2220 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2221 if (vrtem1.type != VR_UNDEFINED)
2223 value_range vrres = VR_INITIALIZER;
2224 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2225 &vrtem1, vr1_);
2226 vrp_meet (vr, &vrres);
2228 return;
2230 /* Likewise for X op ~[]. */
2231 if (vr1.type == VR_ANTI_RANGE
2232 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2234 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2235 if (vrtem1.type != VR_UNDEFINED)
2237 value_range vrres = VR_INITIALIZER;
2238 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2239 vr0_, &vrtem1);
2240 vrp_meet (vr, &vrres);
2242 return;
2245 /* The type of the resulting value range defaults to VR0.TYPE. */
2246 type = vr0.type;
2248 /* Refuse to operate on VARYING ranges, ranges of different kinds
2249 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2250 because we may be able to derive a useful range even if one of
2251 the operands is VR_VARYING or symbolic range. Similarly for
2252 divisions, MIN/MAX and PLUS/MINUS.
2254 TODO, we may be able to derive anti-ranges in some cases. */
2255 if (code != BIT_AND_EXPR
2256 && code != BIT_IOR_EXPR
2257 && code != TRUNC_DIV_EXPR
2258 && code != FLOOR_DIV_EXPR
2259 && code != CEIL_DIV_EXPR
2260 && code != EXACT_DIV_EXPR
2261 && code != ROUND_DIV_EXPR
2262 && code != TRUNC_MOD_EXPR
2263 && code != MIN_EXPR
2264 && code != MAX_EXPR
2265 && code != PLUS_EXPR
2266 && code != MINUS_EXPR
2267 && code != RSHIFT_EXPR
2268 && (vr0.type == VR_VARYING
2269 || vr1.type == VR_VARYING
2270 || vr0.type != vr1.type
2271 || symbolic_range_p (&vr0)
2272 || symbolic_range_p (&vr1)))
2274 set_value_range_to_varying (vr);
2275 return;
2278 /* Now evaluate the expression to determine the new range. */
2279 if (POINTER_TYPE_P (expr_type))
2281 if (code == MIN_EXPR || code == MAX_EXPR)
2283 /* For MIN/MAX expressions with pointers, we only care about
2284 nullness, if both are non null, then the result is nonnull.
2285 If both are null, then the result is null. Otherwise they
2286 are varying. */
2287 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2288 set_value_range_to_nonnull (vr, expr_type);
2289 else if (range_is_null (&vr0) && range_is_null (&vr1))
2290 set_value_range_to_null (vr, expr_type);
2291 else
2292 set_value_range_to_varying (vr);
2294 else if (code == POINTER_PLUS_EXPR)
2296 /* For pointer types, we are really only interested in asserting
2297 whether the expression evaluates to non-NULL. */
2298 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2299 set_value_range_to_nonnull (vr, expr_type);
2300 else if (range_is_null (&vr0) && range_is_null (&vr1))
2301 set_value_range_to_null (vr, expr_type);
2302 else
2303 set_value_range_to_varying (vr);
2305 else if (code == BIT_AND_EXPR)
2307 /* For pointer types, we are really only interested in asserting
2308 whether the expression evaluates to non-NULL. */
2309 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2310 set_value_range_to_nonnull (vr, expr_type);
2311 else if (range_is_null (&vr0) || range_is_null (&vr1))
2312 set_value_range_to_null (vr, expr_type);
2313 else
2314 set_value_range_to_varying (vr);
2316 else
2317 set_value_range_to_varying (vr);
2319 return;
2322 /* For integer ranges, apply the operation to each end of the
2323 range and see what we end up with. */
2324 if (code == PLUS_EXPR || code == MINUS_EXPR)
2326 const bool minus_p = (code == MINUS_EXPR);
2327 tree min_op0 = vr0.min;
2328 tree min_op1 = minus_p ? vr1.max : vr1.min;
2329 tree max_op0 = vr0.max;
2330 tree max_op1 = minus_p ? vr1.min : vr1.max;
2331 tree sym_min_op0 = NULL_TREE;
2332 tree sym_min_op1 = NULL_TREE;
2333 tree sym_max_op0 = NULL_TREE;
2334 tree sym_max_op1 = NULL_TREE;
2335 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2337 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2338 single-symbolic ranges, try to compute the precise resulting range,
2339 but only if we know that this resulting range will also be constant
2340 or single-symbolic. */
2341 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2342 && (TREE_CODE (min_op0) == INTEGER_CST
2343 || (sym_min_op0
2344 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2345 && (TREE_CODE (min_op1) == INTEGER_CST
2346 || (sym_min_op1
2347 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2348 && (!(sym_min_op0 && sym_min_op1)
2349 || (sym_min_op0 == sym_min_op1
2350 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2351 && (TREE_CODE (max_op0) == INTEGER_CST
2352 || (sym_max_op0
2353 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2354 && (TREE_CODE (max_op1) == INTEGER_CST
2355 || (sym_max_op1
2356 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2357 && (!(sym_max_op0 && sym_max_op1)
2358 || (sym_max_op0 == sym_max_op1
2359 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2361 const signop sgn = TYPE_SIGN (expr_type);
2362 const unsigned int prec = TYPE_PRECISION (expr_type);
2363 wide_int type_min, type_max, wmin, wmax;
2364 int min_ovf = 0;
2365 int max_ovf = 0;
2367 /* Get the lower and upper bounds of the type. */
2368 if (TYPE_OVERFLOW_WRAPS (expr_type))
2370 type_min = wi::min_value (prec, sgn);
2371 type_max = wi::max_value (prec, sgn);
2373 else
2375 type_min = vrp_val_min (expr_type);
2376 type_max = vrp_val_max (expr_type);
2379 /* Combine the lower bounds, if any. */
2380 if (min_op0 && min_op1)
2382 if (minus_p)
2384 wmin = wi::sub (min_op0, min_op1);
2386 /* Check for overflow. */
2387 if (wi::cmp (0, min_op1, sgn)
2388 != wi::cmp (wmin, min_op0, sgn))
2389 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2391 else
2393 wmin = wi::add (min_op0, min_op1);
2395 /* Check for overflow. */
2396 if (wi::cmp (min_op1, 0, sgn)
2397 != wi::cmp (wmin, min_op0, sgn))
2398 min_ovf = wi::cmp (min_op0, wmin, sgn);
2401 else if (min_op0)
2402 wmin = min_op0;
2403 else if (min_op1)
2404 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2405 else
2406 wmin = wi::shwi (0, prec);
2408 /* Combine the upper bounds, if any. */
2409 if (max_op0 && max_op1)
2411 if (minus_p)
2413 wmax = wi::sub (max_op0, max_op1);
2415 /* Check for overflow. */
2416 if (wi::cmp (0, max_op1, sgn)
2417 != wi::cmp (wmax, max_op0, sgn))
2418 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2420 else
2422 wmax = wi::add (max_op0, max_op1);
2424 if (wi::cmp (max_op1, 0, sgn)
2425 != wi::cmp (wmax, max_op0, sgn))
2426 max_ovf = wi::cmp (max_op0, wmax, sgn);
2429 else if (max_op0)
2430 wmax = max_op0;
2431 else if (max_op1)
2432 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2433 else
2434 wmax = wi::shwi (0, prec);
2436 /* Check for type overflow. */
2437 if (min_ovf == 0)
2439 if (wi::cmp (wmin, type_min, sgn) == -1)
2440 min_ovf = -1;
2441 else if (wi::cmp (wmin, type_max, sgn) == 1)
2442 min_ovf = 1;
2444 if (max_ovf == 0)
2446 if (wi::cmp (wmax, type_min, sgn) == -1)
2447 max_ovf = -1;
2448 else if (wi::cmp (wmax, type_max, sgn) == 1)
2449 max_ovf = 1;
2452 /* If we have overflow for the constant part and the resulting
2453 range will be symbolic, drop to VR_VARYING. */
2454 if ((min_ovf && sym_min_op0 != sym_min_op1)
2455 || (max_ovf && sym_max_op0 != sym_max_op1))
2457 set_value_range_to_varying (vr);
2458 return;
2461 if (TYPE_OVERFLOW_WRAPS (expr_type))
2463 /* If overflow wraps, truncate the values and adjust the
2464 range kind and bounds appropriately. */
2465 wide_int tmin = wide_int::from (wmin, prec, sgn);
2466 wide_int tmax = wide_int::from (wmax, prec, sgn);
2467 if (min_ovf == max_ovf)
2469 /* No overflow or both overflow or underflow. The
2470 range kind stays VR_RANGE. */
2471 min = wide_int_to_tree (expr_type, tmin);
2472 max = wide_int_to_tree (expr_type, tmax);
2474 else if ((min_ovf == -1 && max_ovf == 0)
2475 || (max_ovf == 1 && min_ovf == 0))
2477 /* Min underflow or max overflow. The range kind
2478 changes to VR_ANTI_RANGE. */
2479 bool covers = false;
2480 wide_int tem = tmin;
2481 type = VR_ANTI_RANGE;
2482 tmin = tmax + 1;
2483 if (wi::cmp (tmin, tmax, sgn) < 0)
2484 covers = true;
2485 tmax = tem - 1;
2486 if (wi::cmp (tmax, tem, sgn) > 0)
2487 covers = true;
2488 /* If the anti-range would cover nothing, drop to varying.
2489 Likewise if the anti-range bounds are outside of the
2490 types values. */
2491 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2493 set_value_range_to_varying (vr);
2494 return;
2496 min = wide_int_to_tree (expr_type, tmin);
2497 max = wide_int_to_tree (expr_type, tmax);
2499 else
2501 /* Other underflow and/or overflow, drop to VR_VARYING. */
2502 set_value_range_to_varying (vr);
2503 return;
2506 else
2508 /* If overflow does not wrap, saturate to the types min/max
2509 value. */
2510 if (min_ovf == -1)
2512 if (needs_overflow_infinity (expr_type)
2513 && supports_overflow_infinity (expr_type))
2514 min = negative_overflow_infinity (expr_type);
2515 else
2516 min = wide_int_to_tree (expr_type, type_min);
2518 else if (min_ovf == 1)
2520 if (needs_overflow_infinity (expr_type)
2521 && supports_overflow_infinity (expr_type))
2522 min = positive_overflow_infinity (expr_type);
2523 else
2524 min = wide_int_to_tree (expr_type, type_max);
2526 else
2527 min = wide_int_to_tree (expr_type, wmin);
2529 if (max_ovf == -1)
2531 if (needs_overflow_infinity (expr_type)
2532 && supports_overflow_infinity (expr_type))
2533 max = negative_overflow_infinity (expr_type);
2534 else
2535 max = wide_int_to_tree (expr_type, type_min);
2537 else if (max_ovf == 1)
2539 if (needs_overflow_infinity (expr_type)
2540 && supports_overflow_infinity (expr_type))
2541 max = positive_overflow_infinity (expr_type);
2542 else
2543 max = wide_int_to_tree (expr_type, type_max);
2545 else
2546 max = wide_int_to_tree (expr_type, wmax);
2549 if (needs_overflow_infinity (expr_type)
2550 && supports_overflow_infinity (expr_type))
2552 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2553 || (min_op1
2554 && (minus_p
2555 ? is_positive_overflow_infinity (min_op1)
2556 : is_negative_overflow_infinity (min_op1))))
2557 min = negative_overflow_infinity (expr_type);
2558 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2559 || (max_op1
2560 && (minus_p
2561 ? is_negative_overflow_infinity (max_op1)
2562 : is_positive_overflow_infinity (max_op1))))
2563 max = positive_overflow_infinity (expr_type);
2566 /* If the result lower bound is constant, we're done;
2567 otherwise, build the symbolic lower bound. */
2568 if (sym_min_op0 == sym_min_op1)
2570 else if (sym_min_op0)
2571 min = build_symbolic_expr (expr_type, sym_min_op0,
2572 neg_min_op0, min);
2573 else if (sym_min_op1)
2574 min = build_symbolic_expr (expr_type, sym_min_op1,
2575 neg_min_op1 ^ minus_p, min);
2577 /* Likewise for the upper bound. */
2578 if (sym_max_op0 == sym_max_op1)
2580 else if (sym_max_op0)
2581 max = build_symbolic_expr (expr_type, sym_max_op0,
2582 neg_max_op0, max);
2583 else if (sym_max_op1)
2584 max = build_symbolic_expr (expr_type, sym_max_op1,
2585 neg_max_op1 ^ minus_p, max);
2587 else
2589 /* For other cases, for example if we have a PLUS_EXPR with two
2590 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2591 to compute a precise range for such a case.
2592 ??? General even mixed range kind operations can be expressed
2593 by for example transforming ~[3, 5] + [1, 2] to range-only
2594 operations and a union primitive:
2595 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2596 [-INF+1, 4] U [6, +INF(OVF)]
2597 though usually the union is not exactly representable with
2598 a single range or anti-range as the above is
2599 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2600 but one could use a scheme similar to equivalences for this. */
2601 set_value_range_to_varying (vr);
2602 return;
2605 else if (code == MIN_EXPR
2606 || code == MAX_EXPR)
2608 if (vr0.type == VR_RANGE
2609 && !symbolic_range_p (&vr0))
2611 type = VR_RANGE;
2612 if (vr1.type == VR_RANGE
2613 && !symbolic_range_p (&vr1))
2615 /* For operations that make the resulting range directly
2616 proportional to the original ranges, apply the operation to
2617 the same end of each range. */
2618 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2619 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2621 else if (code == MIN_EXPR)
2623 min = vrp_val_min (expr_type);
2624 max = vr0.max;
2626 else if (code == MAX_EXPR)
2628 min = vr0.min;
2629 max = vrp_val_max (expr_type);
2632 else if (vr1.type == VR_RANGE
2633 && !symbolic_range_p (&vr1))
2635 type = VR_RANGE;
2636 if (code == MIN_EXPR)
2638 min = vrp_val_min (expr_type);
2639 max = vr1.max;
2641 else if (code == MAX_EXPR)
2643 min = vr1.min;
2644 max = vrp_val_max (expr_type);
2647 else
2649 set_value_range_to_varying (vr);
2650 return;
2653 else if (code == MULT_EXPR)
2655 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2656 drop to varying. This test requires 2*prec bits if both
2657 operands are signed and 2*prec + 2 bits if either is not. */
2659 signop sign = TYPE_SIGN (expr_type);
2660 unsigned int prec = TYPE_PRECISION (expr_type);
2662 if (range_int_cst_p (&vr0)
2663 && range_int_cst_p (&vr1)
2664 && TYPE_OVERFLOW_WRAPS (expr_type))
2666 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2667 typedef generic_wide_int
2668 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2669 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2670 vrp_int size = sizem1 + 1;
2672 /* Extend the values using the sign of the result to PREC2.
2673 From here on out, everthing is just signed math no matter
2674 what the input types were. */
2675 vrp_int min0 = vrp_int_cst (vr0.min);
2676 vrp_int max0 = vrp_int_cst (vr0.max);
2677 vrp_int min1 = vrp_int_cst (vr1.min);
2678 vrp_int max1 = vrp_int_cst (vr1.max);
2679 /* Canonicalize the intervals. */
2680 if (sign == UNSIGNED)
2682 if (wi::ltu_p (size, min0 + max0))
2684 min0 -= size;
2685 max0 -= size;
2688 if (wi::ltu_p (size, min1 + max1))
2690 min1 -= size;
2691 max1 -= size;
2695 vrp_int prod0 = min0 * min1;
2696 vrp_int prod1 = min0 * max1;
2697 vrp_int prod2 = max0 * min1;
2698 vrp_int prod3 = max0 * max1;
2700 /* Sort the 4 products so that min is in prod0 and max is in
2701 prod3. */
2702 /* min0min1 > max0max1 */
2703 if (prod0 > prod3)
2704 std::swap (prod0, prod3);
2706 /* min0max1 > max0min1 */
2707 if (prod1 > prod2)
2708 std::swap (prod1, prod2);
2710 if (prod0 > prod1)
2711 std::swap (prod0, prod1);
2713 if (prod2 > prod3)
2714 std::swap (prod2, prod3);
2716 /* diff = max - min. */
2717 prod2 = prod3 - prod0;
2718 if (wi::geu_p (prod2, sizem1))
2720 /* the range covers all values. */
2721 set_value_range_to_varying (vr);
2722 return;
2725 /* The following should handle the wrapping and selecting
2726 VR_ANTI_RANGE for us. */
2727 min = wide_int_to_tree (expr_type, prod0);
2728 max = wide_int_to_tree (expr_type, prod3);
2729 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2730 return;
2733 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2734 drop to VR_VARYING. It would take more effort to compute a
2735 precise range for such a case. For example, if we have
2736 op0 == 65536 and op1 == 65536 with their ranges both being
2737 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2738 we cannot claim that the product is in ~[0,0]. Note that we
2739 are guaranteed to have vr0.type == vr1.type at this
2740 point. */
2741 if (vr0.type == VR_ANTI_RANGE
2742 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2744 set_value_range_to_varying (vr);
2745 return;
2748 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2749 return;
2751 else if (code == RSHIFT_EXPR
2752 || code == LSHIFT_EXPR)
2754 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2755 then drop to VR_VARYING. Outside of this range we get undefined
2756 behavior from the shift operation. We cannot even trust
2757 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2758 shifts, and the operation at the tree level may be widened. */
2759 if (range_int_cst_p (&vr1)
2760 && compare_tree_int (vr1.min, 0) >= 0
2761 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2763 if (code == RSHIFT_EXPR)
2765 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2766 useful ranges just from the shift count. E.g.
2767 x >> 63 for signed 64-bit x is always [-1, 0]. */
2768 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2770 vr0.type = type = VR_RANGE;
2771 vr0.min = vrp_val_min (expr_type);
2772 vr0.max = vrp_val_max (expr_type);
2774 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2775 return;
2777 /* We can map lshifts by constants to MULT_EXPR handling. */
2778 else if (code == LSHIFT_EXPR
2779 && range_int_cst_singleton_p (&vr1))
2781 bool saved_flag_wrapv;
2782 value_range vr1p = VR_INITIALIZER;
2783 vr1p.type = VR_RANGE;
2784 vr1p.min = (wide_int_to_tree
2785 (expr_type,
2786 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2787 TYPE_PRECISION (expr_type))));
2788 vr1p.max = vr1p.min;
2789 /* We have to use a wrapping multiply though as signed overflow
2790 on lshifts is implementation defined in C89. */
2791 saved_flag_wrapv = flag_wrapv;
2792 flag_wrapv = 1;
2793 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2794 &vr0, &vr1p);
2795 flag_wrapv = saved_flag_wrapv;
2796 return;
2798 else if (code == LSHIFT_EXPR
2799 && range_int_cst_p (&vr0))
2801 int prec = TYPE_PRECISION (expr_type);
2802 int overflow_pos = prec;
2803 int bound_shift;
2804 wide_int low_bound, high_bound;
2805 bool uns = TYPE_UNSIGNED (expr_type);
2806 bool in_bounds = false;
2808 if (!uns)
2809 overflow_pos -= 1;
2811 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2812 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2813 overflow. However, for that to happen, vr1.max needs to be
2814 zero, which means vr1 is a singleton range of zero, which
2815 means it should be handled by the previous LSHIFT_EXPR
2816 if-clause. */
2817 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2818 wide_int complement = ~(bound - 1);
2820 if (uns)
2822 low_bound = bound;
2823 high_bound = complement;
2824 if (wi::ltu_p (vr0.max, low_bound))
2826 /* [5, 6] << [1, 2] == [10, 24]. */
2827 /* We're shifting out only zeroes, the value increases
2828 monotonically. */
2829 in_bounds = true;
2831 else if (wi::ltu_p (high_bound, vr0.min))
2833 /* [0xffffff00, 0xffffffff] << [1, 2]
2834 == [0xfffffc00, 0xfffffffe]. */
2835 /* We're shifting out only ones, the value decreases
2836 monotonically. */
2837 in_bounds = true;
2840 else
2842 /* [-1, 1] << [1, 2] == [-4, 4]. */
2843 low_bound = complement;
2844 high_bound = bound;
2845 if (wi::lts_p (vr0.max, high_bound)
2846 && wi::lts_p (low_bound, vr0.min))
2848 /* For non-negative numbers, we're shifting out only
2849 zeroes, the value increases monotonically.
2850 For negative numbers, we're shifting out only ones, the
2851 value decreases monotomically. */
2852 in_bounds = true;
2856 if (in_bounds)
2858 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2859 return;
2863 set_value_range_to_varying (vr);
2864 return;
2866 else if (code == TRUNC_DIV_EXPR
2867 || code == FLOOR_DIV_EXPR
2868 || code == CEIL_DIV_EXPR
2869 || code == EXACT_DIV_EXPR
2870 || code == ROUND_DIV_EXPR)
2872 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2874 /* For division, if op1 has VR_RANGE but op0 does not, something
2875 can be deduced just from that range. Say [min, max] / [4, max]
2876 gives [min / 4, max / 4] range. */
2877 if (vr1.type == VR_RANGE
2878 && !symbolic_range_p (&vr1)
2879 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2881 vr0.type = type = VR_RANGE;
2882 vr0.min = vrp_val_min (expr_type);
2883 vr0.max = vrp_val_max (expr_type);
2885 else
2887 set_value_range_to_varying (vr);
2888 return;
2892 /* For divisions, if flag_non_call_exceptions is true, we must
2893 not eliminate a division by zero. */
2894 if (cfun->can_throw_non_call_exceptions
2895 && (vr1.type != VR_RANGE
2896 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2898 set_value_range_to_varying (vr);
2899 return;
2902 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2903 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2904 include 0. */
2905 if (vr0.type == VR_RANGE
2906 && (vr1.type != VR_RANGE
2907 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2909 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2910 int cmp;
2912 min = NULL_TREE;
2913 max = NULL_TREE;
2914 if (TYPE_UNSIGNED (expr_type)
2915 || value_range_nonnegative_p (&vr1))
2917 /* For unsigned division or when divisor is known
2918 to be non-negative, the range has to cover
2919 all numbers from 0 to max for positive max
2920 and all numbers from min to 0 for negative min. */
2921 cmp = compare_values (vr0.max, zero);
2922 if (cmp == -1)
2924 /* When vr0.max < 0, vr1.min != 0 and value
2925 ranges for dividend and divisor are available. */
2926 if (vr1.type == VR_RANGE
2927 && !symbolic_range_p (&vr0)
2928 && !symbolic_range_p (&vr1)
2929 && compare_values (vr1.min, zero) != 0)
2930 max = int_const_binop (code, vr0.max, vr1.min);
2931 else
2932 max = zero;
2934 else if (cmp == 0 || cmp == 1)
2935 max = vr0.max;
2936 else
2937 type = VR_VARYING;
2938 cmp = compare_values (vr0.min, zero);
2939 if (cmp == 1)
2941 /* For unsigned division when value ranges for dividend
2942 and divisor are available. */
2943 if (vr1.type == VR_RANGE
2944 && !symbolic_range_p (&vr0)
2945 && !symbolic_range_p (&vr1)
2946 && compare_values (vr1.max, zero) != 0)
2947 min = int_const_binop (code, vr0.min, vr1.max);
2948 else
2949 min = zero;
2951 else if (cmp == 0 || cmp == -1)
2952 min = vr0.min;
2953 else
2954 type = VR_VARYING;
2956 else
2958 /* Otherwise the range is -max .. max or min .. -min
2959 depending on which bound is bigger in absolute value,
2960 as the division can change the sign. */
2961 abs_extent_range (vr, vr0.min, vr0.max);
2962 return;
2964 if (type == VR_VARYING)
2966 set_value_range_to_varying (vr);
2967 return;
2970 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
2972 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2973 return;
2976 else if (code == TRUNC_MOD_EXPR)
2978 if (range_is_null (&vr1))
2980 set_value_range_to_undefined (vr);
2981 return;
2983 /* ABS (A % B) < ABS (B) and either
2984 0 <= A % B <= A or A <= A % B <= 0. */
2985 type = VR_RANGE;
2986 signop sgn = TYPE_SIGN (expr_type);
2987 unsigned int prec = TYPE_PRECISION (expr_type);
2988 wide_int wmin, wmax, tmp;
2989 wide_int zero = wi::zero (prec);
2990 wide_int one = wi::one (prec);
2991 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2993 wmax = wi::sub (vr1.max, one);
2994 if (sgn == SIGNED)
2996 tmp = wi::sub (wi::minus_one (prec), vr1.min);
2997 wmax = wi::smax (wmax, tmp);
3000 else
3002 wmax = wi::max_value (prec, sgn);
3003 /* X % INT_MIN may be INT_MAX. */
3004 if (sgn == UNSIGNED)
3005 wmax = wmax - one;
3008 if (sgn == UNSIGNED)
3009 wmin = zero;
3010 else
3012 wmin = -wmax;
3013 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3015 tmp = vr0.min;
3016 if (wi::gts_p (tmp, zero))
3017 tmp = zero;
3018 wmin = wi::smax (wmin, tmp);
3022 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3024 tmp = vr0.max;
3025 if (sgn == SIGNED && wi::neg_p (tmp))
3026 tmp = zero;
3027 wmax = wi::min (wmax, tmp, sgn);
3030 min = wide_int_to_tree (expr_type, wmin);
3031 max = wide_int_to_tree (expr_type, wmax);
3033 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3035 bool int_cst_range0, int_cst_range1;
3036 wide_int may_be_nonzero0, may_be_nonzero1;
3037 wide_int must_be_nonzero0, must_be_nonzero1;
3039 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3040 &may_be_nonzero0,
3041 &must_be_nonzero0);
3042 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3043 &may_be_nonzero1,
3044 &must_be_nonzero1);
3046 type = VR_RANGE;
3047 if (code == BIT_AND_EXPR)
3049 min = wide_int_to_tree (expr_type,
3050 must_be_nonzero0 & must_be_nonzero1);
3051 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3052 /* If both input ranges contain only negative values we can
3053 truncate the result range maximum to the minimum of the
3054 input range maxima. */
3055 if (int_cst_range0 && int_cst_range1
3056 && tree_int_cst_sgn (vr0.max) < 0
3057 && tree_int_cst_sgn (vr1.max) < 0)
3059 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3060 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3062 /* If either input range contains only non-negative values
3063 we can truncate the result range maximum to the respective
3064 maximum of the input range. */
3065 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3066 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3067 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3068 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3069 max = wide_int_to_tree (expr_type, wmax);
3071 else if (code == BIT_IOR_EXPR)
3073 max = wide_int_to_tree (expr_type,
3074 may_be_nonzero0 | may_be_nonzero1);
3075 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3076 /* If the input ranges contain only positive values we can
3077 truncate the minimum of the result range to the maximum
3078 of the input range minima. */
3079 if (int_cst_range0 && int_cst_range1
3080 && tree_int_cst_sgn (vr0.min) >= 0
3081 && tree_int_cst_sgn (vr1.min) >= 0)
3083 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3084 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3086 /* If either input range contains only negative values
3087 we can truncate the minimum of the result range to the
3088 respective minimum range. */
3089 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3090 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3091 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3092 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3093 min = wide_int_to_tree (expr_type, wmin);
3095 else if (code == BIT_XOR_EXPR)
3097 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3098 | ~(may_be_nonzero0 | may_be_nonzero1));
3099 wide_int result_one_bits
3100 = (must_be_nonzero0.and_not (may_be_nonzero1)
3101 | must_be_nonzero1.and_not (may_be_nonzero0));
3102 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3103 min = wide_int_to_tree (expr_type, result_one_bits);
3104 /* If the range has all positive or all negative values the
3105 result is better than VARYING. */
3106 if (tree_int_cst_sgn (min) < 0
3107 || tree_int_cst_sgn (max) >= 0)
3109 else
3110 max = min = NULL_TREE;
3113 else
3114 gcc_unreachable ();
3116 /* If either MIN or MAX overflowed, then set the resulting range to
3117 VARYING. But we do accept an overflow infinity representation. */
3118 if (min == NULL_TREE
3119 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3120 || max == NULL_TREE
3121 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3123 set_value_range_to_varying (vr);
3124 return;
3127 /* We punt if:
3128 1) [-INF, +INF]
3129 2) [-INF, +-INF(OVF)]
3130 3) [+-INF(OVF), +INF]
3131 4) [+-INF(OVF), +-INF(OVF)]
3132 We learn nothing when we have INF and INF(OVF) on both sides.
3133 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3134 overflow. */
3135 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3136 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3138 set_value_range_to_varying (vr);
3139 return;
3142 cmp = compare_values (min, max);
3143 if (cmp == -2 || cmp == 1)
3145 /* If the new range has its limits swapped around (MIN > MAX),
3146 then the operation caused one of them to wrap around, mark
3147 the new range VARYING. */
3148 set_value_range_to_varying (vr);
3150 else
3151 set_value_range (vr, type, min, max, NULL);
3154 /* Extract range information from a binary expression OP0 CODE OP1 based on
3155 the ranges of each of its operands with resulting type EXPR_TYPE.
3156 The resulting range is stored in *VR. */
3158 static void
3159 extract_range_from_binary_expr (value_range *vr,
3160 enum tree_code code,
3161 tree expr_type, tree op0, tree op1)
3163 value_range vr0 = VR_INITIALIZER;
3164 value_range vr1 = VR_INITIALIZER;
3166 /* Get value ranges for each operand. For constant operands, create
3167 a new value range with the operand to simplify processing. */
3168 if (TREE_CODE (op0) == SSA_NAME)
3169 vr0 = *(get_value_range (op0));
3170 else if (is_gimple_min_invariant (op0))
3171 set_value_range_to_value (&vr0, op0, NULL);
3172 else
3173 set_value_range_to_varying (&vr0);
3175 if (TREE_CODE (op1) == SSA_NAME)
3176 vr1 = *(get_value_range (op1));
3177 else if (is_gimple_min_invariant (op1))
3178 set_value_range_to_value (&vr1, op1, NULL);
3179 else
3180 set_value_range_to_varying (&vr1);
3182 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3184 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3185 and based on the other operand, for example if it was deduced from a
3186 symbolic comparison. When a bound of the range of the first operand
3187 is invariant, we set the corresponding bound of the new range to INF
3188 in order to avoid recursing on the range of the second operand. */
3189 if (vr->type == VR_VARYING
3190 && (code == PLUS_EXPR || code == MINUS_EXPR)
3191 && TREE_CODE (op1) == SSA_NAME
3192 && vr0.type == VR_RANGE
3193 && symbolic_range_based_on_p (&vr0, op1))
3195 const bool minus_p = (code == MINUS_EXPR);
3196 value_range n_vr1 = VR_INITIALIZER;
3198 /* Try with VR0 and [-INF, OP1]. */
3199 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3200 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3202 /* Try with VR0 and [OP1, +INF]. */
3203 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3204 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3206 /* Try with VR0 and [OP1, OP1]. */
3207 else
3208 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3210 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3213 if (vr->type == VR_VARYING
3214 && (code == PLUS_EXPR || code == MINUS_EXPR)
3215 && TREE_CODE (op0) == SSA_NAME
3216 && vr1.type == VR_RANGE
3217 && symbolic_range_based_on_p (&vr1, op0))
3219 const bool minus_p = (code == MINUS_EXPR);
3220 value_range n_vr0 = VR_INITIALIZER;
3222 /* Try with [-INF, OP0] and VR1. */
3223 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3224 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3226 /* Try with [OP0, +INF] and VR1. */
3227 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3228 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3230 /* Try with [OP0, OP0] and VR1. */
3231 else
3232 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3234 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3238 /* Extract range information from a unary operation CODE based on
3239 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3240 The resulting range is stored in *VR. */
3242 static void
3243 extract_range_from_unary_expr_1 (value_range *vr,
3244 enum tree_code code, tree type,
3245 value_range *vr0_, tree op0_type)
3247 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3249 /* VRP only operates on integral and pointer types. */
3250 if (!(INTEGRAL_TYPE_P (op0_type)
3251 || POINTER_TYPE_P (op0_type))
3252 || !(INTEGRAL_TYPE_P (type)
3253 || POINTER_TYPE_P (type)))
3255 set_value_range_to_varying (vr);
3256 return;
3259 /* If VR0 is UNDEFINED, so is the result. */
3260 if (vr0.type == VR_UNDEFINED)
3262 set_value_range_to_undefined (vr);
3263 return;
3266 /* Handle operations that we express in terms of others. */
3267 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3269 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3270 copy_value_range (vr, &vr0);
3271 return;
3273 else if (code == NEGATE_EXPR)
3275 /* -X is simply 0 - X, so re-use existing code that also handles
3276 anti-ranges fine. */
3277 value_range zero = VR_INITIALIZER;
3278 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3279 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3280 return;
3282 else if (code == BIT_NOT_EXPR)
3284 /* ~X is simply -1 - X, so re-use existing code that also handles
3285 anti-ranges fine. */
3286 value_range minusone = VR_INITIALIZER;
3287 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3288 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3289 type, &minusone, &vr0);
3290 return;
3293 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3294 and express op ~[] as (op []') U (op []''). */
3295 if (vr0.type == VR_ANTI_RANGE
3296 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3298 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3299 if (vrtem1.type != VR_UNDEFINED)
3301 value_range vrres = VR_INITIALIZER;
3302 extract_range_from_unary_expr_1 (&vrres, code, type,
3303 &vrtem1, op0_type);
3304 vrp_meet (vr, &vrres);
3306 return;
3309 if (CONVERT_EXPR_CODE_P (code))
3311 tree inner_type = op0_type;
3312 tree outer_type = type;
3314 /* If the expression evaluates to a pointer, we are only interested in
3315 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3316 if (POINTER_TYPE_P (type))
3318 if (range_is_nonnull (&vr0))
3319 set_value_range_to_nonnull (vr, type);
3320 else if (range_is_null (&vr0))
3321 set_value_range_to_null (vr, type);
3322 else
3323 set_value_range_to_varying (vr);
3324 return;
3327 /* If VR0 is varying and we increase the type precision, assume
3328 a full range for the following transformation. */
3329 if (vr0.type == VR_VARYING
3330 && INTEGRAL_TYPE_P (inner_type)
3331 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3333 vr0.type = VR_RANGE;
3334 vr0.min = TYPE_MIN_VALUE (inner_type);
3335 vr0.max = TYPE_MAX_VALUE (inner_type);
3338 /* If VR0 is a constant range or anti-range and the conversion is
3339 not truncating we can convert the min and max values and
3340 canonicalize the resulting range. Otherwise we can do the
3341 conversion if the size of the range is less than what the
3342 precision of the target type can represent and the range is
3343 not an anti-range. */
3344 if ((vr0.type == VR_RANGE
3345 || vr0.type == VR_ANTI_RANGE)
3346 && TREE_CODE (vr0.min) == INTEGER_CST
3347 && TREE_CODE (vr0.max) == INTEGER_CST
3348 && (!is_overflow_infinity (vr0.min)
3349 || (vr0.type == VR_RANGE
3350 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3351 && needs_overflow_infinity (outer_type)
3352 && supports_overflow_infinity (outer_type)))
3353 && (!is_overflow_infinity (vr0.max)
3354 || (vr0.type == VR_RANGE
3355 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3356 && needs_overflow_infinity (outer_type)
3357 && supports_overflow_infinity (outer_type)))
3358 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3359 || (vr0.type == VR_RANGE
3360 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3361 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3362 size_int (TYPE_PRECISION (outer_type)))))))
3364 tree new_min, new_max;
3365 if (is_overflow_infinity (vr0.min))
3366 new_min = negative_overflow_infinity (outer_type);
3367 else
3368 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3369 0, false);
3370 if (is_overflow_infinity (vr0.max))
3371 new_max = positive_overflow_infinity (outer_type);
3372 else
3373 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3374 0, false);
3375 set_and_canonicalize_value_range (vr, vr0.type,
3376 new_min, new_max, NULL);
3377 return;
3380 set_value_range_to_varying (vr);
3381 return;
3383 else if (code == ABS_EXPR)
3385 tree min, max;
3386 int cmp;
3388 /* Pass through vr0 in the easy cases. */
3389 if (TYPE_UNSIGNED (type)
3390 || value_range_nonnegative_p (&vr0))
3392 copy_value_range (vr, &vr0);
3393 return;
3396 /* For the remaining varying or symbolic ranges we can't do anything
3397 useful. */
3398 if (vr0.type == VR_VARYING
3399 || symbolic_range_p (&vr0))
3401 set_value_range_to_varying (vr);
3402 return;
3405 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3406 useful range. */
3407 if (!TYPE_OVERFLOW_UNDEFINED (type)
3408 && ((vr0.type == VR_RANGE
3409 && vrp_val_is_min (vr0.min))
3410 || (vr0.type == VR_ANTI_RANGE
3411 && !vrp_val_is_min (vr0.min))))
3413 set_value_range_to_varying (vr);
3414 return;
3417 /* ABS_EXPR may flip the range around, if the original range
3418 included negative values. */
3419 if (is_overflow_infinity (vr0.min))
3420 min = positive_overflow_infinity (type);
3421 else if (!vrp_val_is_min (vr0.min))
3422 min = fold_unary_to_constant (code, type, vr0.min);
3423 else if (!needs_overflow_infinity (type))
3424 min = TYPE_MAX_VALUE (type);
3425 else if (supports_overflow_infinity (type))
3426 min = positive_overflow_infinity (type);
3427 else
3429 set_value_range_to_varying (vr);
3430 return;
3433 if (is_overflow_infinity (vr0.max))
3434 max = positive_overflow_infinity (type);
3435 else if (!vrp_val_is_min (vr0.max))
3436 max = fold_unary_to_constant (code, type, vr0.max);
3437 else if (!needs_overflow_infinity (type))
3438 max = TYPE_MAX_VALUE (type);
3439 else if (supports_overflow_infinity (type)
3440 /* We shouldn't generate [+INF, +INF] as set_value_range
3441 doesn't like this and ICEs. */
3442 && !is_positive_overflow_infinity (min))
3443 max = positive_overflow_infinity (type);
3444 else
3446 set_value_range_to_varying (vr);
3447 return;
3450 cmp = compare_values (min, max);
3452 /* If a VR_ANTI_RANGEs contains zero, then we have
3453 ~[-INF, min(MIN, MAX)]. */
3454 if (vr0.type == VR_ANTI_RANGE)
3456 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3458 /* Take the lower of the two values. */
3459 if (cmp != 1)
3460 max = min;
3462 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3463 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3464 flag_wrapv is set and the original anti-range doesn't include
3465 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3466 if (TYPE_OVERFLOW_WRAPS (type))
3468 tree type_min_value = TYPE_MIN_VALUE (type);
3470 min = (vr0.min != type_min_value
3471 ? int_const_binop (PLUS_EXPR, type_min_value,
3472 build_int_cst (TREE_TYPE (type_min_value), 1))
3473 : type_min_value);
3475 else
3477 if (overflow_infinity_range_p (&vr0))
3478 min = negative_overflow_infinity (type);
3479 else
3480 min = TYPE_MIN_VALUE (type);
3483 else
3485 /* All else has failed, so create the range [0, INF], even for
3486 flag_wrapv since TYPE_MIN_VALUE is in the original
3487 anti-range. */
3488 vr0.type = VR_RANGE;
3489 min = build_int_cst (type, 0);
3490 if (needs_overflow_infinity (type))
3492 if (supports_overflow_infinity (type))
3493 max = positive_overflow_infinity (type);
3494 else
3496 set_value_range_to_varying (vr);
3497 return;
3500 else
3501 max = TYPE_MAX_VALUE (type);
3505 /* If the range contains zero then we know that the minimum value in the
3506 range will be zero. */
3507 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3509 if (cmp == 1)
3510 max = min;
3511 min = build_int_cst (type, 0);
3513 else
3515 /* If the range was reversed, swap MIN and MAX. */
3516 if (cmp == 1)
3517 std::swap (min, max);
3520 cmp = compare_values (min, max);
3521 if (cmp == -2 || cmp == 1)
3523 /* If the new range has its limits swapped around (MIN > MAX),
3524 then the operation caused one of them to wrap around, mark
3525 the new range VARYING. */
3526 set_value_range_to_varying (vr);
3528 else
3529 set_value_range (vr, vr0.type, min, max, NULL);
3530 return;
3533 /* For unhandled operations fall back to varying. */
3534 set_value_range_to_varying (vr);
3535 return;
3539 /* Extract range information from a unary expression CODE OP0 based on
3540 the range of its operand with resulting type TYPE.
3541 The resulting range is stored in *VR. */
3543 static void
3544 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3545 tree type, tree op0)
3547 value_range vr0 = VR_INITIALIZER;
3549 /* Get value ranges for the operand. For constant operands, create
3550 a new value range with the operand to simplify processing. */
3551 if (TREE_CODE (op0) == SSA_NAME)
3552 vr0 = *(get_value_range (op0));
3553 else if (is_gimple_min_invariant (op0))
3554 set_value_range_to_value (&vr0, op0, NULL);
3555 else
3556 set_value_range_to_varying (&vr0);
3558 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3562 /* Extract range information from a conditional expression STMT based on
3563 the ranges of each of its operands and the expression code. */
3565 static void
3566 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3568 tree op0, op1;
3569 value_range vr0 = VR_INITIALIZER;
3570 value_range vr1 = VR_INITIALIZER;
3572 /* Get value ranges for each operand. For constant operands, create
3573 a new value range with the operand to simplify processing. */
3574 op0 = gimple_assign_rhs2 (stmt);
3575 if (TREE_CODE (op0) == SSA_NAME)
3576 vr0 = *(get_value_range (op0));
3577 else if (is_gimple_min_invariant (op0))
3578 set_value_range_to_value (&vr0, op0, NULL);
3579 else
3580 set_value_range_to_varying (&vr0);
3582 op1 = gimple_assign_rhs3 (stmt);
3583 if (TREE_CODE (op1) == SSA_NAME)
3584 vr1 = *(get_value_range (op1));
3585 else if (is_gimple_min_invariant (op1))
3586 set_value_range_to_value (&vr1, op1, NULL);
3587 else
3588 set_value_range_to_varying (&vr1);
3590 /* The resulting value range is the union of the operand ranges */
3591 copy_value_range (vr, &vr0);
3592 vrp_meet (vr, &vr1);
3596 /* Extract range information from a comparison expression EXPR based
3597 on the range of its operand and the expression code. */
3599 static void
3600 extract_range_from_comparison (value_range *vr, enum tree_code code,
3601 tree type, tree op0, tree op1)
3603 bool sop = false;
3604 tree val;
3606 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3607 NULL);
3609 /* A disadvantage of using a special infinity as an overflow
3610 representation is that we lose the ability to record overflow
3611 when we don't have an infinity. So we have to ignore a result
3612 which relies on overflow. */
3614 if (val && !is_overflow_infinity (val) && !sop)
3616 /* Since this expression was found on the RHS of an assignment,
3617 its type may be different from _Bool. Convert VAL to EXPR's
3618 type. */
3619 val = fold_convert (type, val);
3620 if (is_gimple_min_invariant (val))
3621 set_value_range_to_value (vr, val, vr->equiv);
3622 else
3623 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3625 else
3626 /* The result of a comparison is always true or false. */
3627 set_value_range_to_truthvalue (vr, type);
3630 /* Helper function for simplify_internal_call_using_ranges and
3631 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3632 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3633 always overflow. Set *OVF to true if it is known to always
3634 overflow. */
3636 static bool
3637 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3638 tree op0, tree op1, bool *ovf)
3640 value_range vr0 = VR_INITIALIZER;
3641 value_range vr1 = VR_INITIALIZER;
3642 if (TREE_CODE (op0) == SSA_NAME)
3643 vr0 = *get_value_range (op0);
3644 else if (TREE_CODE (op0) == INTEGER_CST)
3645 set_value_range_to_value (&vr0, op0, NULL);
3646 else
3647 set_value_range_to_varying (&vr0);
3649 if (TREE_CODE (op1) == SSA_NAME)
3650 vr1 = *get_value_range (op1);
3651 else if (TREE_CODE (op1) == INTEGER_CST)
3652 set_value_range_to_value (&vr1, op1, NULL);
3653 else
3654 set_value_range_to_varying (&vr1);
3656 if (!range_int_cst_p (&vr0)
3657 || TREE_OVERFLOW (vr0.min)
3658 || TREE_OVERFLOW (vr0.max))
3660 vr0.min = vrp_val_min (TREE_TYPE (op0));
3661 vr0.max = vrp_val_max (TREE_TYPE (op0));
3663 if (!range_int_cst_p (&vr1)
3664 || TREE_OVERFLOW (vr1.min)
3665 || TREE_OVERFLOW (vr1.max))
3667 vr1.min = vrp_val_min (TREE_TYPE (op1));
3668 vr1.max = vrp_val_max (TREE_TYPE (op1));
3670 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3671 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3672 if (arith_overflowed_p (subcode, type, vr0.max,
3673 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3674 return false;
3675 if (subcode == MULT_EXPR)
3677 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3678 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3679 return false;
3681 if (*ovf)
3683 /* So far we found that there is an overflow on the boundaries.
3684 That doesn't prove that there is an overflow even for all values
3685 in between the boundaries. For that compute widest_int range
3686 of the result and see if it doesn't overlap the range of
3687 type. */
3688 widest_int wmin, wmax;
3689 widest_int w[4];
3690 int i;
3691 w[0] = wi::to_widest (vr0.min);
3692 w[1] = wi::to_widest (vr0.max);
3693 w[2] = wi::to_widest (vr1.min);
3694 w[3] = wi::to_widest (vr1.max);
3695 for (i = 0; i < 4; i++)
3697 widest_int wt;
3698 switch (subcode)
3700 case PLUS_EXPR:
3701 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3702 break;
3703 case MINUS_EXPR:
3704 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3705 break;
3706 case MULT_EXPR:
3707 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3708 break;
3709 default:
3710 gcc_unreachable ();
3712 if (i == 0)
3714 wmin = wt;
3715 wmax = wt;
3717 else
3719 wmin = wi::smin (wmin, wt);
3720 wmax = wi::smax (wmax, wt);
3723 /* The result of op0 CODE op1 is known to be in range
3724 [wmin, wmax]. */
3725 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3726 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3727 /* If all values in [wmin, wmax] are smaller than
3728 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3729 the arithmetic operation will always overflow. */
3730 if (wmax < wtmin || wmin > wtmax)
3731 return true;
3732 return false;
3734 return true;
3737 /* Try to derive a nonnegative or nonzero range out of STMT relying
3738 primarily on generic routines in fold in conjunction with range data.
3739 Store the result in *VR */
3741 static void
3742 extract_range_basic (value_range *vr, gimple *stmt)
3744 bool sop = false;
3745 tree type = gimple_expr_type (stmt);
3747 if (is_gimple_call (stmt))
3749 tree arg;
3750 int mini, maxi, zerov = 0, prec;
3751 enum tree_code subcode = ERROR_MARK;
3752 combined_fn cfn = gimple_call_combined_fn (stmt);
3754 switch (cfn)
3756 case CFN_BUILT_IN_CONSTANT_P:
3757 /* If the call is __builtin_constant_p and the argument is a
3758 function parameter resolve it to false. This avoids bogus
3759 array bound warnings.
3760 ??? We could do this as early as inlining is finished. */
3761 arg = gimple_call_arg (stmt, 0);
3762 if (TREE_CODE (arg) == SSA_NAME
3763 && SSA_NAME_IS_DEFAULT_DEF (arg)
3764 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3766 set_value_range_to_null (vr, type);
3767 return;
3769 break;
3770 /* Both __builtin_ffs* and __builtin_popcount return
3771 [0, prec]. */
3772 CASE_CFN_FFS:
3773 CASE_CFN_POPCOUNT:
3774 arg = gimple_call_arg (stmt, 0);
3775 prec = TYPE_PRECISION (TREE_TYPE (arg));
3776 mini = 0;
3777 maxi = prec;
3778 if (TREE_CODE (arg) == SSA_NAME)
3780 value_range *vr0 = get_value_range (arg);
3781 /* If arg is non-zero, then ffs or popcount
3782 are non-zero. */
3783 if (((vr0->type == VR_RANGE
3784 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3785 || (vr0->type == VR_ANTI_RANGE
3786 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3787 && !is_overflow_infinity (vr0->min)
3788 && !is_overflow_infinity (vr0->max))
3789 mini = 1;
3790 /* If some high bits are known to be zero,
3791 we can decrease the maximum. */
3792 if (vr0->type == VR_RANGE
3793 && TREE_CODE (vr0->max) == INTEGER_CST
3794 && !operand_less_p (vr0->min,
3795 build_zero_cst (TREE_TYPE (vr0->min)))
3796 && !is_overflow_infinity (vr0->max))
3797 maxi = tree_floor_log2 (vr0->max) + 1;
3799 goto bitop_builtin;
3800 /* __builtin_parity* returns [0, 1]. */
3801 CASE_CFN_PARITY:
3802 mini = 0;
3803 maxi = 1;
3804 goto bitop_builtin;
3805 /* __builtin_c[lt]z* return [0, prec-1], except for
3806 when the argument is 0, but that is undefined behavior.
3807 On many targets where the CLZ RTL or optab value is defined
3808 for 0 the value is prec, so include that in the range
3809 by default. */
3810 CASE_CFN_CLZ:
3811 arg = gimple_call_arg (stmt, 0);
3812 prec = TYPE_PRECISION (TREE_TYPE (arg));
3813 mini = 0;
3814 maxi = prec;
3815 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3816 != CODE_FOR_nothing
3817 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3818 zerov)
3819 /* Handle only the single common value. */
3820 && zerov != prec)
3821 /* Magic value to give up, unless vr0 proves
3822 arg is non-zero. */
3823 mini = -2;
3824 if (TREE_CODE (arg) == SSA_NAME)
3826 value_range *vr0 = get_value_range (arg);
3827 /* From clz of VR_RANGE minimum we can compute
3828 result maximum. */
3829 if (vr0->type == VR_RANGE
3830 && TREE_CODE (vr0->min) == INTEGER_CST
3831 && !is_overflow_infinity (vr0->min))
3833 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3834 if (maxi != prec)
3835 mini = 0;
3837 else if (vr0->type == VR_ANTI_RANGE
3838 && integer_zerop (vr0->min)
3839 && !is_overflow_infinity (vr0->min))
3841 maxi = prec - 1;
3842 mini = 0;
3844 if (mini == -2)
3845 break;
3846 /* From clz of VR_RANGE maximum we can compute
3847 result minimum. */
3848 if (vr0->type == VR_RANGE
3849 && TREE_CODE (vr0->max) == INTEGER_CST
3850 && !is_overflow_infinity (vr0->max))
3852 mini = prec - 1 - tree_floor_log2 (vr0->max);
3853 if (mini == prec)
3854 break;
3857 if (mini == -2)
3858 break;
3859 goto bitop_builtin;
3860 /* __builtin_ctz* return [0, prec-1], except for
3861 when the argument is 0, but that is undefined behavior.
3862 If there is a ctz optab for this mode and
3863 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3864 otherwise just assume 0 won't be seen. */
3865 CASE_CFN_CTZ:
3866 arg = gimple_call_arg (stmt, 0);
3867 prec = TYPE_PRECISION (TREE_TYPE (arg));
3868 mini = 0;
3869 maxi = prec - 1;
3870 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3871 != CODE_FOR_nothing
3872 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3873 zerov))
3875 /* Handle only the two common values. */
3876 if (zerov == -1)
3877 mini = -1;
3878 else if (zerov == prec)
3879 maxi = prec;
3880 else
3881 /* Magic value to give up, unless vr0 proves
3882 arg is non-zero. */
3883 mini = -2;
3885 if (TREE_CODE (arg) == SSA_NAME)
3887 value_range *vr0 = get_value_range (arg);
3888 /* If arg is non-zero, then use [0, prec - 1]. */
3889 if (((vr0->type == VR_RANGE
3890 && integer_nonzerop (vr0->min))
3891 || (vr0->type == VR_ANTI_RANGE
3892 && integer_zerop (vr0->min)))
3893 && !is_overflow_infinity (vr0->min))
3895 mini = 0;
3896 maxi = prec - 1;
3898 /* If some high bits are known to be zero,
3899 we can decrease the result maximum. */
3900 if (vr0->type == VR_RANGE
3901 && TREE_CODE (vr0->max) == INTEGER_CST
3902 && !is_overflow_infinity (vr0->max))
3904 maxi = tree_floor_log2 (vr0->max);
3905 /* For vr0 [0, 0] give up. */
3906 if (maxi == -1)
3907 break;
3910 if (mini == -2)
3911 break;
3912 goto bitop_builtin;
3913 /* __builtin_clrsb* returns [0, prec-1]. */
3914 CASE_CFN_CLRSB:
3915 arg = gimple_call_arg (stmt, 0);
3916 prec = TYPE_PRECISION (TREE_TYPE (arg));
3917 mini = 0;
3918 maxi = prec - 1;
3919 goto bitop_builtin;
3920 bitop_builtin:
3921 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3922 build_int_cst (type, maxi), NULL);
3923 return;
3924 case CFN_UBSAN_CHECK_ADD:
3925 subcode = PLUS_EXPR;
3926 break;
3927 case CFN_UBSAN_CHECK_SUB:
3928 subcode = MINUS_EXPR;
3929 break;
3930 case CFN_UBSAN_CHECK_MUL:
3931 subcode = MULT_EXPR;
3932 break;
3933 case CFN_GOACC_DIM_SIZE:
3934 case CFN_GOACC_DIM_POS:
3935 /* Optimizing these two internal functions helps the loop
3936 optimizer eliminate outer comparisons. Size is [1,N]
3937 and pos is [0,N-1]. */
3939 bool is_pos = cfn == CFN_GOACC_DIM_POS;
3940 int axis = get_oacc_ifn_dim_arg (stmt);
3941 int size = get_oacc_fn_dim_size (current_function_decl, axis);
3943 if (!size)
3944 /* If it's dynamic, the backend might know a hardware
3945 limitation. */
3946 size = targetm.goacc.dim_limit (axis);
3948 tree type = TREE_TYPE (gimple_call_lhs (stmt));
3949 set_value_range (vr, VR_RANGE,
3950 build_int_cst (type, is_pos ? 0 : 1),
3951 size ? build_int_cst (type, size - is_pos)
3952 : vrp_val_max (type), NULL);
3954 return;
3955 default:
3956 break;
3958 if (subcode != ERROR_MARK)
3960 bool saved_flag_wrapv = flag_wrapv;
3961 /* Pretend the arithmetics is wrapping. If there is
3962 any overflow, we'll complain, but will actually do
3963 wrapping operation. */
3964 flag_wrapv = 1;
3965 extract_range_from_binary_expr (vr, subcode, type,
3966 gimple_call_arg (stmt, 0),
3967 gimple_call_arg (stmt, 1));
3968 flag_wrapv = saved_flag_wrapv;
3970 /* If for both arguments vrp_valueize returned non-NULL,
3971 this should have been already folded and if not, it
3972 wasn't folded because of overflow. Avoid removing the
3973 UBSAN_CHECK_* calls in that case. */
3974 if (vr->type == VR_RANGE
3975 && (vr->min == vr->max
3976 || operand_equal_p (vr->min, vr->max, 0)))
3977 set_value_range_to_varying (vr);
3978 return;
3981 /* Handle extraction of the two results (result of arithmetics and
3982 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
3983 internal function. */
3984 else if (is_gimple_assign (stmt)
3985 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
3986 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
3987 && INTEGRAL_TYPE_P (type))
3989 enum tree_code code = gimple_assign_rhs_code (stmt);
3990 tree op = gimple_assign_rhs1 (stmt);
3991 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
3993 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
3994 if (is_gimple_call (g) && gimple_call_internal_p (g))
3996 enum tree_code subcode = ERROR_MARK;
3997 switch (gimple_call_internal_fn (g))
3999 case IFN_ADD_OVERFLOW:
4000 subcode = PLUS_EXPR;
4001 break;
4002 case IFN_SUB_OVERFLOW:
4003 subcode = MINUS_EXPR;
4004 break;
4005 case IFN_MUL_OVERFLOW:
4006 subcode = MULT_EXPR;
4007 break;
4008 default:
4009 break;
4011 if (subcode != ERROR_MARK)
4013 tree op0 = gimple_call_arg (g, 0);
4014 tree op1 = gimple_call_arg (g, 1);
4015 if (code == IMAGPART_EXPR)
4017 bool ovf = false;
4018 if (check_for_binary_op_overflow (subcode, type,
4019 op0, op1, &ovf))
4020 set_value_range_to_value (vr,
4021 build_int_cst (type, ovf),
4022 NULL);
4023 else if (TYPE_PRECISION (type) == 1
4024 && !TYPE_UNSIGNED (type))
4025 set_value_range_to_varying (vr);
4026 else
4027 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4028 build_int_cst (type, 1), NULL);
4030 else if (types_compatible_p (type, TREE_TYPE (op0))
4031 && types_compatible_p (type, TREE_TYPE (op1)))
4033 bool saved_flag_wrapv = flag_wrapv;
4034 /* Pretend the arithmetics is wrapping. If there is
4035 any overflow, IMAGPART_EXPR will be set. */
4036 flag_wrapv = 1;
4037 extract_range_from_binary_expr (vr, subcode, type,
4038 op0, op1);
4039 flag_wrapv = saved_flag_wrapv;
4041 else
4043 value_range vr0 = VR_INITIALIZER;
4044 value_range vr1 = VR_INITIALIZER;
4045 bool saved_flag_wrapv = flag_wrapv;
4046 /* Pretend the arithmetics is wrapping. If there is
4047 any overflow, IMAGPART_EXPR will be set. */
4048 flag_wrapv = 1;
4049 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4050 type, op0);
4051 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4052 type, op1);
4053 extract_range_from_binary_expr_1 (vr, subcode, type,
4054 &vr0, &vr1);
4055 flag_wrapv = saved_flag_wrapv;
4057 return;
4062 if (INTEGRAL_TYPE_P (type)
4063 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4064 set_value_range_to_nonnegative (vr, type,
4065 sop || stmt_overflow_infinity (stmt));
4066 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4067 && !sop)
4068 set_value_range_to_nonnull (vr, type);
4069 else
4070 set_value_range_to_varying (vr);
4074 /* Try to compute a useful range out of assignment STMT and store it
4075 in *VR. */
4077 static void
4078 extract_range_from_assignment (value_range *vr, gassign *stmt)
4080 enum tree_code code = gimple_assign_rhs_code (stmt);
4082 if (code == ASSERT_EXPR)
4083 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4084 else if (code == SSA_NAME)
4085 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4086 else if (TREE_CODE_CLASS (code) == tcc_binary)
4087 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4088 gimple_expr_type (stmt),
4089 gimple_assign_rhs1 (stmt),
4090 gimple_assign_rhs2 (stmt));
4091 else if (TREE_CODE_CLASS (code) == tcc_unary)
4092 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4093 gimple_expr_type (stmt),
4094 gimple_assign_rhs1 (stmt));
4095 else if (code == COND_EXPR)
4096 extract_range_from_cond_expr (vr, stmt);
4097 else if (TREE_CODE_CLASS (code) == tcc_comparison)
4098 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4099 gimple_expr_type (stmt),
4100 gimple_assign_rhs1 (stmt),
4101 gimple_assign_rhs2 (stmt));
4102 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4103 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4104 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4105 else
4106 set_value_range_to_varying (vr);
4108 if (vr->type == VR_VARYING)
4109 extract_range_basic (vr, stmt);
4112 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4113 would be profitable to adjust VR using scalar evolution information
4114 for VAR. If so, update VR with the new limits. */
4116 static void
4117 adjust_range_with_scev (value_range *vr, struct loop *loop,
4118 gimple *stmt, tree var)
4120 tree init, step, chrec, tmin, tmax, min, max, type, tem;
4121 enum ev_direction dir;
4123 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4124 better opportunities than a regular range, but I'm not sure. */
4125 if (vr->type == VR_ANTI_RANGE)
4126 return;
4128 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4130 /* Like in PR19590, scev can return a constant function. */
4131 if (is_gimple_min_invariant (chrec))
4133 set_value_range_to_value (vr, chrec, vr->equiv);
4134 return;
4137 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4138 return;
4140 init = initial_condition_in_loop_num (chrec, loop->num);
4141 tem = op_with_constant_singleton_value_range (init);
4142 if (tem)
4143 init = tem;
4144 step = evolution_part_in_loop_num (chrec, loop->num);
4145 tem = op_with_constant_singleton_value_range (step);
4146 if (tem)
4147 step = tem;
4149 /* If STEP is symbolic, we can't know whether INIT will be the
4150 minimum or maximum value in the range. Also, unless INIT is
4151 a simple expression, compare_values and possibly other functions
4152 in tree-vrp won't be able to handle it. */
4153 if (step == NULL_TREE
4154 || !is_gimple_min_invariant (step)
4155 || !valid_value_p (init))
4156 return;
4158 dir = scev_direction (chrec);
4159 if (/* Do not adjust ranges if we do not know whether the iv increases
4160 or decreases, ... */
4161 dir == EV_DIR_UNKNOWN
4162 /* ... or if it may wrap. */
4163 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
4164 get_chrec_loop (chrec), true))
4165 return;
4167 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4168 negative_overflow_infinity and positive_overflow_infinity,
4169 because we have concluded that the loop probably does not
4170 wrap. */
4172 type = TREE_TYPE (var);
4173 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4174 tmin = lower_bound_in_type (type, type);
4175 else
4176 tmin = TYPE_MIN_VALUE (type);
4177 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4178 tmax = upper_bound_in_type (type, type);
4179 else
4180 tmax = TYPE_MAX_VALUE (type);
4182 /* Try to use estimated number of iterations for the loop to constrain the
4183 final value in the evolution. */
4184 if (TREE_CODE (step) == INTEGER_CST
4185 && is_gimple_val (init)
4186 && (TREE_CODE (init) != SSA_NAME
4187 || get_value_range (init)->type == VR_RANGE))
4189 widest_int nit;
4191 /* We are only entering here for loop header PHI nodes, so using
4192 the number of latch executions is the correct thing to use. */
4193 if (max_loop_iterations (loop, &nit))
4195 value_range maxvr = VR_INITIALIZER;
4196 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4197 bool overflow;
4199 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4200 &overflow);
4201 /* If the multiplication overflowed we can't do a meaningful
4202 adjustment. Likewise if the result doesn't fit in the type
4203 of the induction variable. For a signed type we have to
4204 check whether the result has the expected signedness which
4205 is that of the step as number of iterations is unsigned. */
4206 if (!overflow
4207 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4208 && (sgn == UNSIGNED
4209 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4211 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4212 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4213 TREE_TYPE (init), init, tem);
4214 /* Likewise if the addition did. */
4215 if (maxvr.type == VR_RANGE)
4217 value_range initvr = VR_INITIALIZER;
4219 if (TREE_CODE (init) == SSA_NAME)
4220 initvr = *(get_value_range (init));
4221 else if (is_gimple_min_invariant (init))
4222 set_value_range_to_value (&initvr, init, NULL);
4223 else
4224 return;
4226 /* Check if init + nit * step overflows. Though we checked
4227 scev {init, step}_loop doesn't wrap, it is not enough
4228 because the loop may exit immediately. Overflow could
4229 happen in the plus expression in this case. */
4230 if ((dir == EV_DIR_DECREASES
4231 && (is_negative_overflow_infinity (maxvr.min)
4232 || compare_values (maxvr.min, initvr.min) != -1))
4233 || (dir == EV_DIR_GROWS
4234 && (is_positive_overflow_infinity (maxvr.max)
4235 || compare_values (maxvr.max, initvr.max) != 1)))
4236 return;
4238 tmin = maxvr.min;
4239 tmax = maxvr.max;
4245 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4247 min = tmin;
4248 max = tmax;
4250 /* For VARYING or UNDEFINED ranges, just about anything we get
4251 from scalar evolutions should be better. */
4253 if (dir == EV_DIR_DECREASES)
4254 max = init;
4255 else
4256 min = init;
4258 else if (vr->type == VR_RANGE)
4260 min = vr->min;
4261 max = vr->max;
4263 if (dir == EV_DIR_DECREASES)
4265 /* INIT is the maximum value. If INIT is lower than VR->MAX
4266 but no smaller than VR->MIN, set VR->MAX to INIT. */
4267 if (compare_values (init, max) == -1)
4268 max = init;
4270 /* According to the loop information, the variable does not
4271 overflow. If we think it does, probably because of an
4272 overflow due to arithmetic on a different INF value,
4273 reset now. */
4274 if (is_negative_overflow_infinity (min)
4275 || compare_values (min, tmin) == -1)
4276 min = tmin;
4279 else
4281 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4282 if (compare_values (init, min) == 1)
4283 min = init;
4285 if (is_positive_overflow_infinity (max)
4286 || compare_values (tmax, max) == -1)
4287 max = tmax;
4290 else
4291 return;
4293 /* If we just created an invalid range with the minimum
4294 greater than the maximum, we fail conservatively.
4295 This should happen only in unreachable
4296 parts of code, or for invalid programs. */
4297 if (compare_values (min, max) == 1
4298 || (is_negative_overflow_infinity (min)
4299 && is_positive_overflow_infinity (max)))
4300 return;
4302 /* Even for valid range info, sometimes overflow flag will leak in.
4303 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4304 drop them except for +-overflow_infinity which still need special
4305 handling in vrp pass. */
4306 if (TREE_OVERFLOW_P (min)
4307 && ! is_negative_overflow_infinity (min))
4308 min = drop_tree_overflow (min);
4309 if (TREE_OVERFLOW_P (max)
4310 && ! is_positive_overflow_infinity (max))
4311 max = drop_tree_overflow (max);
4313 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4317 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4319 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4320 all the values in the ranges.
4322 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4324 - Return NULL_TREE if it is not always possible to determine the
4325 value of the comparison.
4327 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4328 overflow infinity was used in the test. */
4331 static tree
4332 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4333 bool *strict_overflow_p)
4335 /* VARYING or UNDEFINED ranges cannot be compared. */
4336 if (vr0->type == VR_VARYING
4337 || vr0->type == VR_UNDEFINED
4338 || vr1->type == VR_VARYING
4339 || vr1->type == VR_UNDEFINED)
4340 return NULL_TREE;
4342 /* Anti-ranges need to be handled separately. */
4343 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4345 /* If both are anti-ranges, then we cannot compute any
4346 comparison. */
4347 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4348 return NULL_TREE;
4350 /* These comparisons are never statically computable. */
4351 if (comp == GT_EXPR
4352 || comp == GE_EXPR
4353 || comp == LT_EXPR
4354 || comp == LE_EXPR)
4355 return NULL_TREE;
4357 /* Equality can be computed only between a range and an
4358 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4359 if (vr0->type == VR_RANGE)
4361 /* To simplify processing, make VR0 the anti-range. */
4362 value_range *tmp = vr0;
4363 vr0 = vr1;
4364 vr1 = tmp;
4367 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4369 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4370 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4371 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4373 return NULL_TREE;
4376 if (!usable_range_p (vr0, strict_overflow_p)
4377 || !usable_range_p (vr1, strict_overflow_p))
4378 return NULL_TREE;
4380 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4381 operands around and change the comparison code. */
4382 if (comp == GT_EXPR || comp == GE_EXPR)
4384 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4385 std::swap (vr0, vr1);
4388 if (comp == EQ_EXPR)
4390 /* Equality may only be computed if both ranges represent
4391 exactly one value. */
4392 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4393 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4395 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4396 strict_overflow_p);
4397 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4398 strict_overflow_p);
4399 if (cmp_min == 0 && cmp_max == 0)
4400 return boolean_true_node;
4401 else if (cmp_min != -2 && cmp_max != -2)
4402 return boolean_false_node;
4404 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4405 else if (compare_values_warnv (vr0->min, vr1->max,
4406 strict_overflow_p) == 1
4407 || compare_values_warnv (vr1->min, vr0->max,
4408 strict_overflow_p) == 1)
4409 return boolean_false_node;
4411 return NULL_TREE;
4413 else if (comp == NE_EXPR)
4415 int cmp1, cmp2;
4417 /* If VR0 is completely to the left or completely to the right
4418 of VR1, they are always different. Notice that we need to
4419 make sure that both comparisons yield similar results to
4420 avoid comparing values that cannot be compared at
4421 compile-time. */
4422 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4423 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4424 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4425 return boolean_true_node;
4427 /* If VR0 and VR1 represent a single value and are identical,
4428 return false. */
4429 else if (compare_values_warnv (vr0->min, vr0->max,
4430 strict_overflow_p) == 0
4431 && compare_values_warnv (vr1->min, vr1->max,
4432 strict_overflow_p) == 0
4433 && compare_values_warnv (vr0->min, vr1->min,
4434 strict_overflow_p) == 0
4435 && compare_values_warnv (vr0->max, vr1->max,
4436 strict_overflow_p) == 0)
4437 return boolean_false_node;
4439 /* Otherwise, they may or may not be different. */
4440 else
4441 return NULL_TREE;
4443 else if (comp == LT_EXPR || comp == LE_EXPR)
4445 int tst;
4447 /* If VR0 is to the left of VR1, return true. */
4448 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4449 if ((comp == LT_EXPR && tst == -1)
4450 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4452 if (overflow_infinity_range_p (vr0)
4453 || overflow_infinity_range_p (vr1))
4454 *strict_overflow_p = true;
4455 return boolean_true_node;
4458 /* If VR0 is to the right of VR1, return false. */
4459 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4460 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4461 || (comp == LE_EXPR && tst == 1))
4463 if (overflow_infinity_range_p (vr0)
4464 || overflow_infinity_range_p (vr1))
4465 *strict_overflow_p = true;
4466 return boolean_false_node;
4469 /* Otherwise, we don't know. */
4470 return NULL_TREE;
4473 gcc_unreachable ();
4477 /* Given a value range VR, a value VAL and a comparison code COMP, return
4478 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4479 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4480 always returns false. Return NULL_TREE if it is not always
4481 possible to determine the value of the comparison. Also set
4482 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4483 infinity was used in the test. */
4485 static tree
4486 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4487 bool *strict_overflow_p)
4489 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4490 return NULL_TREE;
4492 /* Anti-ranges need to be handled separately. */
4493 if (vr->type == VR_ANTI_RANGE)
4495 /* For anti-ranges, the only predicates that we can compute at
4496 compile time are equality and inequality. */
4497 if (comp == GT_EXPR
4498 || comp == GE_EXPR
4499 || comp == LT_EXPR
4500 || comp == LE_EXPR)
4501 return NULL_TREE;
4503 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4504 if (value_inside_range (val, vr->min, vr->max) == 1)
4505 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4507 return NULL_TREE;
4510 if (!usable_range_p (vr, strict_overflow_p))
4511 return NULL_TREE;
4513 if (comp == EQ_EXPR)
4515 /* EQ_EXPR may only be computed if VR represents exactly
4516 one value. */
4517 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4519 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4520 if (cmp == 0)
4521 return boolean_true_node;
4522 else if (cmp == -1 || cmp == 1 || cmp == 2)
4523 return boolean_false_node;
4525 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4526 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4527 return boolean_false_node;
4529 return NULL_TREE;
4531 else if (comp == NE_EXPR)
4533 /* If VAL is not inside VR, then they are always different. */
4534 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4535 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4536 return boolean_true_node;
4538 /* If VR represents exactly one value equal to VAL, then return
4539 false. */
4540 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4541 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4542 return boolean_false_node;
4544 /* Otherwise, they may or may not be different. */
4545 return NULL_TREE;
4547 else if (comp == LT_EXPR || comp == LE_EXPR)
4549 int tst;
4551 /* If VR is to the left of VAL, return true. */
4552 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4553 if ((comp == LT_EXPR && tst == -1)
4554 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4556 if (overflow_infinity_range_p (vr))
4557 *strict_overflow_p = true;
4558 return boolean_true_node;
4561 /* If VR is to the right of VAL, return false. */
4562 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4563 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4564 || (comp == LE_EXPR && tst == 1))
4566 if (overflow_infinity_range_p (vr))
4567 *strict_overflow_p = true;
4568 return boolean_false_node;
4571 /* Otherwise, we don't know. */
4572 return NULL_TREE;
4574 else if (comp == GT_EXPR || comp == GE_EXPR)
4576 int tst;
4578 /* If VR is to the right of VAL, return true. */
4579 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4580 if ((comp == GT_EXPR && tst == 1)
4581 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4583 if (overflow_infinity_range_p (vr))
4584 *strict_overflow_p = true;
4585 return boolean_true_node;
4588 /* If VR is to the left of VAL, return false. */
4589 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4590 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4591 || (comp == GE_EXPR && tst == -1))
4593 if (overflow_infinity_range_p (vr))
4594 *strict_overflow_p = true;
4595 return boolean_false_node;
4598 /* Otherwise, we don't know. */
4599 return NULL_TREE;
4602 gcc_unreachable ();
4606 /* Debugging dumps. */
4608 void dump_value_range (FILE *, value_range *);
4609 void debug_value_range (value_range *);
4610 void dump_all_value_ranges (FILE *);
4611 void debug_all_value_ranges (void);
4612 void dump_vr_equiv (FILE *, bitmap);
4613 void debug_vr_equiv (bitmap);
4616 /* Dump value range VR to FILE. */
4618 void
4619 dump_value_range (FILE *file, value_range *vr)
4621 if (vr == NULL)
4622 fprintf (file, "[]");
4623 else if (vr->type == VR_UNDEFINED)
4624 fprintf (file, "UNDEFINED");
4625 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4627 tree type = TREE_TYPE (vr->min);
4629 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4631 if (is_negative_overflow_infinity (vr->min))
4632 fprintf (file, "-INF(OVF)");
4633 else if (INTEGRAL_TYPE_P (type)
4634 && !TYPE_UNSIGNED (type)
4635 && vrp_val_is_min (vr->min))
4636 fprintf (file, "-INF");
4637 else
4638 print_generic_expr (file, vr->min, 0);
4640 fprintf (file, ", ");
4642 if (is_positive_overflow_infinity (vr->max))
4643 fprintf (file, "+INF(OVF)");
4644 else if (INTEGRAL_TYPE_P (type)
4645 && vrp_val_is_max (vr->max))
4646 fprintf (file, "+INF");
4647 else
4648 print_generic_expr (file, vr->max, 0);
4650 fprintf (file, "]");
4652 if (vr->equiv)
4654 bitmap_iterator bi;
4655 unsigned i, c = 0;
4657 fprintf (file, " EQUIVALENCES: { ");
4659 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4661 print_generic_expr (file, ssa_name (i), 0);
4662 fprintf (file, " ");
4663 c++;
4666 fprintf (file, "} (%u elements)", c);
4669 else if (vr->type == VR_VARYING)
4670 fprintf (file, "VARYING");
4671 else
4672 fprintf (file, "INVALID RANGE");
4676 /* Dump value range VR to stderr. */
4678 DEBUG_FUNCTION void
4679 debug_value_range (value_range *vr)
4681 dump_value_range (stderr, vr);
4682 fprintf (stderr, "\n");
4686 /* Dump value ranges of all SSA_NAMEs to FILE. */
4688 void
4689 dump_all_value_ranges (FILE *file)
4691 size_t i;
4693 for (i = 0; i < num_vr_values; i++)
4695 if (vr_value[i])
4697 print_generic_expr (file, ssa_name (i), 0);
4698 fprintf (file, ": ");
4699 dump_value_range (file, vr_value[i]);
4700 fprintf (file, "\n");
4704 fprintf (file, "\n");
4708 /* Dump all value ranges to stderr. */
4710 DEBUG_FUNCTION void
4711 debug_all_value_ranges (void)
4713 dump_all_value_ranges (stderr);
4717 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4718 create a new SSA name N and return the assertion assignment
4719 'N = ASSERT_EXPR <V, V OP W>'. */
4721 static gimple *
4722 build_assert_expr_for (tree cond, tree v)
4724 tree a;
4725 gassign *assertion;
4727 gcc_assert (TREE_CODE (v) == SSA_NAME
4728 && COMPARISON_CLASS_P (cond));
4730 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4731 assertion = gimple_build_assign (NULL_TREE, a);
4733 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4734 operand of the ASSERT_EXPR. Create it so the new name and the old one
4735 are registered in the replacement table so that we can fix the SSA web
4736 after adding all the ASSERT_EXPRs. */
4737 create_new_def_for (v, assertion, NULL);
4739 return assertion;
4743 /* Return false if EXPR is a predicate expression involving floating
4744 point values. */
4746 static inline bool
4747 fp_predicate (gimple *stmt)
4749 GIMPLE_CHECK (stmt, GIMPLE_COND);
4751 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4754 /* If the range of values taken by OP can be inferred after STMT executes,
4755 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4756 describes the inferred range. Return true if a range could be
4757 inferred. */
4759 static bool
4760 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4762 *val_p = NULL_TREE;
4763 *comp_code_p = ERROR_MARK;
4765 /* Do not attempt to infer anything in names that flow through
4766 abnormal edges. */
4767 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4768 return false;
4770 /* Similarly, don't infer anything from statements that may throw
4771 exceptions. ??? Relax this requirement? */
4772 if (stmt_could_throw_p (stmt))
4773 return false;
4775 /* If STMT is the last statement of a basic block with no normal
4776 successors, there is no point inferring anything about any of its
4777 operands. We would not be able to find a proper insertion point
4778 for the assertion, anyway. */
4779 if (stmt_ends_bb_p (stmt))
4781 edge_iterator ei;
4782 edge e;
4784 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4785 if (!(e->flags & EDGE_ABNORMAL))
4786 break;
4787 if (e == NULL)
4788 return false;
4791 if (infer_nonnull_range (stmt, op))
4793 *val_p = build_int_cst (TREE_TYPE (op), 0);
4794 *comp_code_p = NE_EXPR;
4795 return true;
4798 return false;
4802 void dump_asserts_for (FILE *, tree);
4803 void debug_asserts_for (tree);
4804 void dump_all_asserts (FILE *);
4805 void debug_all_asserts (void);
4807 /* Dump all the registered assertions for NAME to FILE. */
4809 void
4810 dump_asserts_for (FILE *file, tree name)
4812 assert_locus *loc;
4814 fprintf (file, "Assertions to be inserted for ");
4815 print_generic_expr (file, name, 0);
4816 fprintf (file, "\n");
4818 loc = asserts_for[SSA_NAME_VERSION (name)];
4819 while (loc)
4821 fprintf (file, "\t");
4822 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4823 fprintf (file, "\n\tBB #%d", loc->bb->index);
4824 if (loc->e)
4826 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4827 loc->e->dest->index);
4828 dump_edge_info (file, loc->e, dump_flags, 0);
4830 fprintf (file, "\n\tPREDICATE: ");
4831 print_generic_expr (file, loc->expr, 0);
4832 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4833 print_generic_expr (file, loc->val, 0);
4834 fprintf (file, "\n\n");
4835 loc = loc->next;
4838 fprintf (file, "\n");
4842 /* Dump all the registered assertions for NAME to stderr. */
4844 DEBUG_FUNCTION void
4845 debug_asserts_for (tree name)
4847 dump_asserts_for (stderr, name);
4851 /* Dump all the registered assertions for all the names to FILE. */
4853 void
4854 dump_all_asserts (FILE *file)
4856 unsigned i;
4857 bitmap_iterator bi;
4859 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4860 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4861 dump_asserts_for (file, ssa_name (i));
4862 fprintf (file, "\n");
4866 /* Dump all the registered assertions for all the names to stderr. */
4868 DEBUG_FUNCTION void
4869 debug_all_asserts (void)
4871 dump_all_asserts (stderr);
4875 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4876 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4877 E->DEST, then register this location as a possible insertion point
4878 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4880 BB, E and SI provide the exact insertion point for the new
4881 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4882 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4883 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4884 must not be NULL. */
4886 static void
4887 register_new_assert_for (tree name, tree expr,
4888 enum tree_code comp_code,
4889 tree val,
4890 basic_block bb,
4891 edge e,
4892 gimple_stmt_iterator si)
4894 assert_locus *n, *loc, *last_loc;
4895 basic_block dest_bb;
4897 gcc_checking_assert (bb == NULL || e == NULL);
4899 if (e == NULL)
4900 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4901 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4903 /* Never build an assert comparing against an integer constant with
4904 TREE_OVERFLOW set. This confuses our undefined overflow warning
4905 machinery. */
4906 if (TREE_OVERFLOW_P (val))
4907 val = drop_tree_overflow (val);
4909 /* The new assertion A will be inserted at BB or E. We need to
4910 determine if the new location is dominated by a previously
4911 registered location for A. If we are doing an edge insertion,
4912 assume that A will be inserted at E->DEST. Note that this is not
4913 necessarily true.
4915 If E is a critical edge, it will be split. But even if E is
4916 split, the new block will dominate the same set of blocks that
4917 E->DEST dominates.
4919 The reverse, however, is not true, blocks dominated by E->DEST
4920 will not be dominated by the new block created to split E. So,
4921 if the insertion location is on a critical edge, we will not use
4922 the new location to move another assertion previously registered
4923 at a block dominated by E->DEST. */
4924 dest_bb = (bb) ? bb : e->dest;
4926 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4927 VAL at a block dominating DEST_BB, then we don't need to insert a new
4928 one. Similarly, if the same assertion already exists at a block
4929 dominated by DEST_BB and the new location is not on a critical
4930 edge, then update the existing location for the assertion (i.e.,
4931 move the assertion up in the dominance tree).
4933 Note, this is implemented as a simple linked list because there
4934 should not be more than a handful of assertions registered per
4935 name. If this becomes a performance problem, a table hashed by
4936 COMP_CODE and VAL could be implemented. */
4937 loc = asserts_for[SSA_NAME_VERSION (name)];
4938 last_loc = loc;
4939 while (loc)
4941 if (loc->comp_code == comp_code
4942 && (loc->val == val
4943 || operand_equal_p (loc->val, val, 0))
4944 && (loc->expr == expr
4945 || operand_equal_p (loc->expr, expr, 0)))
4947 /* If E is not a critical edge and DEST_BB
4948 dominates the existing location for the assertion, move
4949 the assertion up in the dominance tree by updating its
4950 location information. */
4951 if ((e == NULL || !EDGE_CRITICAL_P (e))
4952 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4954 loc->bb = dest_bb;
4955 loc->e = e;
4956 loc->si = si;
4957 return;
4961 /* Update the last node of the list and move to the next one. */
4962 last_loc = loc;
4963 loc = loc->next;
4966 /* If we didn't find an assertion already registered for
4967 NAME COMP_CODE VAL, add a new one at the end of the list of
4968 assertions associated with NAME. */
4969 n = XNEW (struct assert_locus);
4970 n->bb = dest_bb;
4971 n->e = e;
4972 n->si = si;
4973 n->comp_code = comp_code;
4974 n->val = val;
4975 n->expr = expr;
4976 n->next = NULL;
4978 if (last_loc)
4979 last_loc->next = n;
4980 else
4981 asserts_for[SSA_NAME_VERSION (name)] = n;
4983 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4986 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4987 Extract a suitable test code and value and store them into *CODE_P and
4988 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4990 If no extraction was possible, return FALSE, otherwise return TRUE.
4992 If INVERT is true, then we invert the result stored into *CODE_P. */
4994 static bool
4995 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4996 tree cond_op0, tree cond_op1,
4997 bool invert, enum tree_code *code_p,
4998 tree *val_p)
5000 enum tree_code comp_code;
5001 tree val;
5003 /* Otherwise, we have a comparison of the form NAME COMP VAL
5004 or VAL COMP NAME. */
5005 if (name == cond_op1)
5007 /* If the predicate is of the form VAL COMP NAME, flip
5008 COMP around because we need to register NAME as the
5009 first operand in the predicate. */
5010 comp_code = swap_tree_comparison (cond_code);
5011 val = cond_op0;
5013 else if (name == cond_op0)
5015 /* The comparison is of the form NAME COMP VAL, so the
5016 comparison code remains unchanged. */
5017 comp_code = cond_code;
5018 val = cond_op1;
5020 else
5021 gcc_unreachable ();
5023 /* Invert the comparison code as necessary. */
5024 if (invert)
5025 comp_code = invert_tree_comparison (comp_code, 0);
5027 /* VRP only handles integral and pointer types. */
5028 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
5029 && ! POINTER_TYPE_P (TREE_TYPE (val)))
5030 return false;
5032 /* Do not register always-false predicates.
5033 FIXME: this works around a limitation in fold() when dealing with
5034 enumerations. Given 'enum { N1, N2 } x;', fold will not
5035 fold 'if (x > N2)' to 'if (0)'. */
5036 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5037 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5039 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5040 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5042 if (comp_code == GT_EXPR
5043 && (!max
5044 || compare_values (val, max) == 0))
5045 return false;
5047 if (comp_code == LT_EXPR
5048 && (!min
5049 || compare_values (val, min) == 0))
5050 return false;
5052 *code_p = comp_code;
5053 *val_p = val;
5054 return true;
5057 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5058 (otherwise return VAL). VAL and MASK must be zero-extended for
5059 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5060 (to transform signed values into unsigned) and at the end xor
5061 SGNBIT back. */
5063 static wide_int
5064 masked_increment (const wide_int &val_in, const wide_int &mask,
5065 const wide_int &sgnbit, unsigned int prec)
5067 wide_int bit = wi::one (prec), res;
5068 unsigned int i;
5070 wide_int val = val_in ^ sgnbit;
5071 for (i = 0; i < prec; i++, bit += bit)
5073 res = mask;
5074 if ((res & bit) == 0)
5075 continue;
5076 res = bit - 1;
5077 res = (val + bit).and_not (res);
5078 res &= mask;
5079 if (wi::gtu_p (res, val))
5080 return res ^ sgnbit;
5082 return val ^ sgnbit;
5085 /* Try to register an edge assertion for SSA name NAME on edge E for
5086 the condition COND contributing to the conditional jump pointed to by BSI.
5087 Invert the condition COND if INVERT is true. */
5089 static void
5090 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5091 enum tree_code cond_code,
5092 tree cond_op0, tree cond_op1, bool invert)
5094 tree val;
5095 enum tree_code comp_code;
5097 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5098 cond_op0,
5099 cond_op1,
5100 invert, &comp_code, &val))
5101 return;
5103 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5104 reachable from E. */
5105 if (live_on_edge (e, name))
5106 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5108 /* In the case of NAME <= CST and NAME being defined as
5109 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5110 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5111 This catches range and anti-range tests. */
5112 if ((comp_code == LE_EXPR
5113 || comp_code == GT_EXPR)
5114 && TREE_CODE (val) == INTEGER_CST
5115 && TYPE_UNSIGNED (TREE_TYPE (val)))
5117 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5118 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5120 /* Extract CST2 from the (optional) addition. */
5121 if (is_gimple_assign (def_stmt)
5122 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5124 name2 = gimple_assign_rhs1 (def_stmt);
5125 cst2 = gimple_assign_rhs2 (def_stmt);
5126 if (TREE_CODE (name2) == SSA_NAME
5127 && TREE_CODE (cst2) == INTEGER_CST)
5128 def_stmt = SSA_NAME_DEF_STMT (name2);
5131 /* Extract NAME2 from the (optional) sign-changing cast. */
5132 if (gimple_assign_cast_p (def_stmt))
5134 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5135 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5136 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5137 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5138 name3 = gimple_assign_rhs1 (def_stmt);
5141 /* If name3 is used later, create an ASSERT_EXPR for it. */
5142 if (name3 != NULL_TREE
5143 && TREE_CODE (name3) == SSA_NAME
5144 && (cst2 == NULL_TREE
5145 || TREE_CODE (cst2) == INTEGER_CST)
5146 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5147 && live_on_edge (e, name3))
5149 tree tmp;
5151 /* Build an expression for the range test. */
5152 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5153 if (cst2 != NULL_TREE)
5154 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5156 if (dump_file)
5158 fprintf (dump_file, "Adding assert for ");
5159 print_generic_expr (dump_file, name3, 0);
5160 fprintf (dump_file, " from ");
5161 print_generic_expr (dump_file, tmp, 0);
5162 fprintf (dump_file, "\n");
5165 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5168 /* If name2 is used later, create an ASSERT_EXPR for it. */
5169 if (name2 != NULL_TREE
5170 && TREE_CODE (name2) == SSA_NAME
5171 && TREE_CODE (cst2) == INTEGER_CST
5172 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5173 && live_on_edge (e, name2))
5175 tree tmp;
5177 /* Build an expression for the range test. */
5178 tmp = name2;
5179 if (TREE_TYPE (name) != TREE_TYPE (name2))
5180 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5181 if (cst2 != NULL_TREE)
5182 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5184 if (dump_file)
5186 fprintf (dump_file, "Adding assert for ");
5187 print_generic_expr (dump_file, name2, 0);
5188 fprintf (dump_file, " from ");
5189 print_generic_expr (dump_file, tmp, 0);
5190 fprintf (dump_file, "\n");
5193 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5197 /* In the case of post-in/decrement tests like if (i++) ... and uses
5198 of the in/decremented value on the edge the extra name we want to
5199 assert for is not on the def chain of the name compared. Instead
5200 it is in the set of use stmts.
5201 Similar cases happen for conversions that were simplified through
5202 fold_{sign_changed,widened}_comparison. */
5203 if ((comp_code == NE_EXPR
5204 || comp_code == EQ_EXPR)
5205 && TREE_CODE (val) == INTEGER_CST)
5207 imm_use_iterator ui;
5208 gimple *use_stmt;
5209 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5211 if (!is_gimple_assign (use_stmt))
5212 continue;
5214 /* Cut off to use-stmts that are dominating the predecessor. */
5215 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5216 continue;
5218 tree name2 = gimple_assign_lhs (use_stmt);
5219 if (TREE_CODE (name2) != SSA_NAME
5220 || !live_on_edge (e, name2))
5221 continue;
5223 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5224 tree cst;
5225 if (code == PLUS_EXPR
5226 || code == MINUS_EXPR)
5228 cst = gimple_assign_rhs2 (use_stmt);
5229 if (TREE_CODE (cst) != INTEGER_CST)
5230 continue;
5231 cst = int_const_binop (code, val, cst);
5233 else if (CONVERT_EXPR_CODE_P (code))
5235 /* For truncating conversions we cannot record
5236 an inequality. */
5237 if (comp_code == NE_EXPR
5238 && (TYPE_PRECISION (TREE_TYPE (name2))
5239 < TYPE_PRECISION (TREE_TYPE (name))))
5240 continue;
5241 cst = fold_convert (TREE_TYPE (name2), val);
5243 else
5244 continue;
5246 if (TREE_OVERFLOW_P (cst))
5247 cst = drop_tree_overflow (cst);
5248 register_new_assert_for (name2, name2, comp_code, cst,
5249 NULL, e, bsi);
5253 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5254 && TREE_CODE (val) == INTEGER_CST)
5256 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5257 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5258 tree val2 = NULL_TREE;
5259 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5260 wide_int mask = wi::zero (prec);
5261 unsigned int nprec = prec;
5262 enum tree_code rhs_code = ERROR_MARK;
5264 if (is_gimple_assign (def_stmt))
5265 rhs_code = gimple_assign_rhs_code (def_stmt);
5267 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5268 assert that A != CST1 -+ CST2. */
5269 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5270 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5272 tree op0 = gimple_assign_rhs1 (def_stmt);
5273 tree op1 = gimple_assign_rhs2 (def_stmt);
5274 if (TREE_CODE (op0) == SSA_NAME
5275 && TREE_CODE (op1) == INTEGER_CST
5276 && live_on_edge (e, op0))
5278 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5279 ? MINUS_EXPR : PLUS_EXPR);
5280 op1 = int_const_binop (reverse_op, val, op1);
5281 if (TREE_OVERFLOW (op1))
5282 op1 = drop_tree_overflow (op1);
5283 register_new_assert_for (op0, op0, comp_code, op1, NULL, e, bsi);
5287 /* Add asserts for NAME cmp CST and NAME being defined
5288 as NAME = (int) NAME2. */
5289 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5290 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5291 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5292 && gimple_assign_cast_p (def_stmt))
5294 name2 = gimple_assign_rhs1 (def_stmt);
5295 if (CONVERT_EXPR_CODE_P (rhs_code)
5296 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5297 && TYPE_UNSIGNED (TREE_TYPE (name2))
5298 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5299 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5300 || !tree_int_cst_equal (val,
5301 TYPE_MIN_VALUE (TREE_TYPE (val))))
5302 && live_on_edge (e, name2))
5304 tree tmp, cst;
5305 enum tree_code new_comp_code = comp_code;
5307 cst = fold_convert (TREE_TYPE (name2),
5308 TYPE_MIN_VALUE (TREE_TYPE (val)));
5309 /* Build an expression for the range test. */
5310 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5311 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5312 fold_convert (TREE_TYPE (name2), val));
5313 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5315 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5316 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5317 build_int_cst (TREE_TYPE (name2), 1));
5320 if (dump_file)
5322 fprintf (dump_file, "Adding assert for ");
5323 print_generic_expr (dump_file, name2, 0);
5324 fprintf (dump_file, " from ");
5325 print_generic_expr (dump_file, tmp, 0);
5326 fprintf (dump_file, "\n");
5329 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5330 e, bsi);
5334 /* Add asserts for NAME cmp CST and NAME being defined as
5335 NAME = NAME2 >> CST2.
5337 Extract CST2 from the right shift. */
5338 if (rhs_code == RSHIFT_EXPR)
5340 name2 = gimple_assign_rhs1 (def_stmt);
5341 cst2 = gimple_assign_rhs2 (def_stmt);
5342 if (TREE_CODE (name2) == SSA_NAME
5343 && tree_fits_uhwi_p (cst2)
5344 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5345 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5346 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5347 && live_on_edge (e, name2))
5349 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5350 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5353 if (val2 != NULL_TREE
5354 && TREE_CODE (val2) == INTEGER_CST
5355 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5356 TREE_TYPE (val),
5357 val2, cst2), val))
5359 enum tree_code new_comp_code = comp_code;
5360 tree tmp, new_val;
5362 tmp = name2;
5363 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5365 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5367 tree type = build_nonstandard_integer_type (prec, 1);
5368 tmp = build1 (NOP_EXPR, type, name2);
5369 val2 = fold_convert (type, val2);
5371 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5372 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5373 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5375 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5377 wide_int minval
5378 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5379 new_val = val2;
5380 if (minval == new_val)
5381 new_val = NULL_TREE;
5383 else
5385 wide_int maxval
5386 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5387 mask |= val2;
5388 if (mask == maxval)
5389 new_val = NULL_TREE;
5390 else
5391 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5394 if (new_val)
5396 if (dump_file)
5398 fprintf (dump_file, "Adding assert for ");
5399 print_generic_expr (dump_file, name2, 0);
5400 fprintf (dump_file, " from ");
5401 print_generic_expr (dump_file, tmp, 0);
5402 fprintf (dump_file, "\n");
5405 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5406 NULL, e, bsi);
5410 /* Add asserts for NAME cmp CST and NAME being defined as
5411 NAME = NAME2 & CST2.
5413 Extract CST2 from the and.
5415 Also handle
5416 NAME = (unsigned) NAME2;
5417 casts where NAME's type is unsigned and has smaller precision
5418 than NAME2's type as if it was NAME = NAME2 & MASK. */
5419 names[0] = NULL_TREE;
5420 names[1] = NULL_TREE;
5421 cst2 = NULL_TREE;
5422 if (rhs_code == BIT_AND_EXPR
5423 || (CONVERT_EXPR_CODE_P (rhs_code)
5424 && INTEGRAL_TYPE_P (TREE_TYPE (val))
5425 && TYPE_UNSIGNED (TREE_TYPE (val))
5426 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5427 > prec))
5429 name2 = gimple_assign_rhs1 (def_stmt);
5430 if (rhs_code == BIT_AND_EXPR)
5431 cst2 = gimple_assign_rhs2 (def_stmt);
5432 else
5434 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5435 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5437 if (TREE_CODE (name2) == SSA_NAME
5438 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5439 && TREE_CODE (cst2) == INTEGER_CST
5440 && !integer_zerop (cst2)
5441 && (nprec > 1
5442 || TYPE_UNSIGNED (TREE_TYPE (val))))
5444 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5445 if (gimple_assign_cast_p (def_stmt2))
5447 names[1] = gimple_assign_rhs1 (def_stmt2);
5448 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5449 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5450 || (TYPE_PRECISION (TREE_TYPE (name2))
5451 != TYPE_PRECISION (TREE_TYPE (names[1])))
5452 || !live_on_edge (e, names[1]))
5453 names[1] = NULL_TREE;
5455 if (live_on_edge (e, name2))
5456 names[0] = name2;
5459 if (names[0] || names[1])
5461 wide_int minv, maxv, valv, cst2v;
5462 wide_int tem, sgnbit;
5463 bool valid_p = false, valn, cst2n;
5464 enum tree_code ccode = comp_code;
5466 valv = wide_int::from (val, nprec, UNSIGNED);
5467 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5468 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5469 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5470 /* If CST2 doesn't have most significant bit set,
5471 but VAL is negative, we have comparison like
5472 if ((x & 0x123) > -4) (always true). Just give up. */
5473 if (!cst2n && valn)
5474 ccode = ERROR_MARK;
5475 if (cst2n)
5476 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5477 else
5478 sgnbit = wi::zero (nprec);
5479 minv = valv & cst2v;
5480 switch (ccode)
5482 case EQ_EXPR:
5483 /* Minimum unsigned value for equality is VAL & CST2
5484 (should be equal to VAL, otherwise we probably should
5485 have folded the comparison into false) and
5486 maximum unsigned value is VAL | ~CST2. */
5487 maxv = valv | ~cst2v;
5488 valid_p = true;
5489 break;
5491 case NE_EXPR:
5492 tem = valv | ~cst2v;
5493 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5494 if (valv == 0)
5496 cst2n = false;
5497 sgnbit = wi::zero (nprec);
5498 goto gt_expr;
5500 /* If (VAL | ~CST2) is all ones, handle it as
5501 (X & CST2) < VAL. */
5502 if (tem == -1)
5504 cst2n = false;
5505 valn = false;
5506 sgnbit = wi::zero (nprec);
5507 goto lt_expr;
5509 if (!cst2n && wi::neg_p (cst2v))
5510 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5511 if (sgnbit != 0)
5513 if (valv == sgnbit)
5515 cst2n = true;
5516 valn = true;
5517 goto gt_expr;
5519 if (tem == wi::mask (nprec - 1, false, nprec))
5521 cst2n = true;
5522 goto lt_expr;
5524 if (!cst2n)
5525 sgnbit = wi::zero (nprec);
5527 break;
5529 case GE_EXPR:
5530 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5531 is VAL and maximum unsigned value is ~0. For signed
5532 comparison, if CST2 doesn't have most significant bit
5533 set, handle it similarly. If CST2 has MSB set,
5534 the minimum is the same, and maximum is ~0U/2. */
5535 if (minv != valv)
5537 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5538 VAL. */
5539 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5540 if (minv == valv)
5541 break;
5543 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5544 valid_p = true;
5545 break;
5547 case GT_EXPR:
5548 gt_expr:
5549 /* Find out smallest MINV where MINV > VAL
5550 && (MINV & CST2) == MINV, if any. If VAL is signed and
5551 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5552 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5553 if (minv == valv)
5554 break;
5555 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5556 valid_p = true;
5557 break;
5559 case LE_EXPR:
5560 /* Minimum unsigned value for <= is 0 and maximum
5561 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5562 Otherwise, find smallest VAL2 where VAL2 > VAL
5563 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5564 as maximum.
5565 For signed comparison, if CST2 doesn't have most
5566 significant bit set, handle it similarly. If CST2 has
5567 MSB set, the maximum is the same and minimum is INT_MIN. */
5568 if (minv == valv)
5569 maxv = valv;
5570 else
5572 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5573 if (maxv == valv)
5574 break;
5575 maxv -= 1;
5577 maxv |= ~cst2v;
5578 minv = sgnbit;
5579 valid_p = true;
5580 break;
5582 case LT_EXPR:
5583 lt_expr:
5584 /* Minimum unsigned value for < is 0 and maximum
5585 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5586 Otherwise, find smallest VAL2 where VAL2 > VAL
5587 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5588 as maximum.
5589 For signed comparison, if CST2 doesn't have most
5590 significant bit set, handle it similarly. If CST2 has
5591 MSB set, the maximum is the same and minimum is INT_MIN. */
5592 if (minv == valv)
5594 if (valv == sgnbit)
5595 break;
5596 maxv = valv;
5598 else
5600 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5601 if (maxv == valv)
5602 break;
5604 maxv -= 1;
5605 maxv |= ~cst2v;
5606 minv = sgnbit;
5607 valid_p = true;
5608 break;
5610 default:
5611 break;
5613 if (valid_p
5614 && (maxv - minv) != -1)
5616 tree tmp, new_val, type;
5617 int i;
5619 for (i = 0; i < 2; i++)
5620 if (names[i])
5622 wide_int maxv2 = maxv;
5623 tmp = names[i];
5624 type = TREE_TYPE (names[i]);
5625 if (!TYPE_UNSIGNED (type))
5627 type = build_nonstandard_integer_type (nprec, 1);
5628 tmp = build1 (NOP_EXPR, type, names[i]);
5630 if (minv != 0)
5632 tmp = build2 (PLUS_EXPR, type, tmp,
5633 wide_int_to_tree (type, -minv));
5634 maxv2 = maxv - minv;
5636 new_val = wide_int_to_tree (type, maxv2);
5638 if (dump_file)
5640 fprintf (dump_file, "Adding assert for ");
5641 print_generic_expr (dump_file, names[i], 0);
5642 fprintf (dump_file, " from ");
5643 print_generic_expr (dump_file, tmp, 0);
5644 fprintf (dump_file, "\n");
5647 register_new_assert_for (names[i], tmp, LE_EXPR,
5648 new_val, NULL, e, bsi);
5655 /* OP is an operand of a truth value expression which is known to have
5656 a particular value. Register any asserts for OP and for any
5657 operands in OP's defining statement.
5659 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5660 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5662 static void
5663 register_edge_assert_for_1 (tree op, enum tree_code code,
5664 edge e, gimple_stmt_iterator bsi)
5666 gimple *op_def;
5667 tree val;
5668 enum tree_code rhs_code;
5670 /* We only care about SSA_NAMEs. */
5671 if (TREE_CODE (op) != SSA_NAME)
5672 return;
5674 /* We know that OP will have a zero or nonzero value. If OP is used
5675 more than once go ahead and register an assert for OP. */
5676 if (live_on_edge (e, op))
5678 val = build_int_cst (TREE_TYPE (op), 0);
5679 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5682 /* Now look at how OP is set. If it's set from a comparison,
5683 a truth operation or some bit operations, then we may be able
5684 to register information about the operands of that assignment. */
5685 op_def = SSA_NAME_DEF_STMT (op);
5686 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5687 return;
5689 rhs_code = gimple_assign_rhs_code (op_def);
5691 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5693 bool invert = (code == EQ_EXPR ? true : false);
5694 tree op0 = gimple_assign_rhs1 (op_def);
5695 tree op1 = gimple_assign_rhs2 (op_def);
5697 if (TREE_CODE (op0) == SSA_NAME)
5698 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5699 if (TREE_CODE (op1) == SSA_NAME)
5700 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5702 else if ((code == NE_EXPR
5703 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5704 || (code == EQ_EXPR
5705 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5707 /* Recurse on each operand. */
5708 tree op0 = gimple_assign_rhs1 (op_def);
5709 tree op1 = gimple_assign_rhs2 (op_def);
5710 if (TREE_CODE (op0) == SSA_NAME
5711 && has_single_use (op0))
5712 register_edge_assert_for_1 (op0, code, e, bsi);
5713 if (TREE_CODE (op1) == SSA_NAME
5714 && has_single_use (op1))
5715 register_edge_assert_for_1 (op1, code, e, bsi);
5717 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5718 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5720 /* Recurse, flipping CODE. */
5721 code = invert_tree_comparison (code, false);
5722 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5724 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5726 /* Recurse through the copy. */
5727 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5729 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5731 /* Recurse through the type conversion, unless it is a narrowing
5732 conversion or conversion from non-integral type. */
5733 tree rhs = gimple_assign_rhs1 (op_def);
5734 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5735 && (TYPE_PRECISION (TREE_TYPE (rhs))
5736 <= TYPE_PRECISION (TREE_TYPE (op))))
5737 register_edge_assert_for_1 (rhs, code, e, bsi);
5741 /* Try to register an edge assertion for SSA name NAME on edge E for
5742 the condition COND contributing to the conditional jump pointed to by
5743 SI. */
5745 static void
5746 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5747 enum tree_code cond_code, tree cond_op0,
5748 tree cond_op1)
5750 tree val;
5751 enum tree_code comp_code;
5752 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5754 /* Do not attempt to infer anything in names that flow through
5755 abnormal edges. */
5756 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5757 return;
5759 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5760 cond_op0, cond_op1,
5761 is_else_edge,
5762 &comp_code, &val))
5763 return;
5765 /* Register ASSERT_EXPRs for name. */
5766 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5767 cond_op1, is_else_edge);
5770 /* If COND is effectively an equality test of an SSA_NAME against
5771 the value zero or one, then we may be able to assert values
5772 for SSA_NAMEs which flow into COND. */
5774 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5775 statement of NAME we can assert both operands of the BIT_AND_EXPR
5776 have nonzero value. */
5777 if (((comp_code == EQ_EXPR && integer_onep (val))
5778 || (comp_code == NE_EXPR && integer_zerop (val))))
5780 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5782 if (is_gimple_assign (def_stmt)
5783 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5785 tree op0 = gimple_assign_rhs1 (def_stmt);
5786 tree op1 = gimple_assign_rhs2 (def_stmt);
5787 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5788 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5792 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5793 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5794 have zero value. */
5795 if (((comp_code == EQ_EXPR && integer_zerop (val))
5796 || (comp_code == NE_EXPR && integer_onep (val))))
5798 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5800 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5801 necessarily zero value, or if type-precision is one. */
5802 if (is_gimple_assign (def_stmt)
5803 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5804 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5805 || comp_code == EQ_EXPR)))
5807 tree op0 = gimple_assign_rhs1 (def_stmt);
5808 tree op1 = gimple_assign_rhs2 (def_stmt);
5809 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5810 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5816 /* Determine whether the outgoing edges of BB should receive an
5817 ASSERT_EXPR for each of the operands of BB's LAST statement.
5818 The last statement of BB must be a COND_EXPR.
5820 If any of the sub-graphs rooted at BB have an interesting use of
5821 the predicate operands, an assert location node is added to the
5822 list of assertions for the corresponding operands. */
5824 static void
5825 find_conditional_asserts (basic_block bb, gcond *last)
5827 gimple_stmt_iterator bsi;
5828 tree op;
5829 edge_iterator ei;
5830 edge e;
5831 ssa_op_iter iter;
5833 bsi = gsi_for_stmt (last);
5835 /* Look for uses of the operands in each of the sub-graphs
5836 rooted at BB. We need to check each of the outgoing edges
5837 separately, so that we know what kind of ASSERT_EXPR to
5838 insert. */
5839 FOR_EACH_EDGE (e, ei, bb->succs)
5841 if (e->dest == bb)
5842 continue;
5844 /* Register the necessary assertions for each operand in the
5845 conditional predicate. */
5846 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5847 register_edge_assert_for (op, e, bsi,
5848 gimple_cond_code (last),
5849 gimple_cond_lhs (last),
5850 gimple_cond_rhs (last));
5854 struct case_info
5856 tree expr;
5857 basic_block bb;
5860 /* Compare two case labels sorting first by the destination bb index
5861 and then by the case value. */
5863 static int
5864 compare_case_labels (const void *p1, const void *p2)
5866 const struct case_info *ci1 = (const struct case_info *) p1;
5867 const struct case_info *ci2 = (const struct case_info *) p2;
5868 int idx1 = ci1->bb->index;
5869 int idx2 = ci2->bb->index;
5871 if (idx1 < idx2)
5872 return -1;
5873 else if (idx1 == idx2)
5875 /* Make sure the default label is first in a group. */
5876 if (!CASE_LOW (ci1->expr))
5877 return -1;
5878 else if (!CASE_LOW (ci2->expr))
5879 return 1;
5880 else
5881 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5882 CASE_LOW (ci2->expr));
5884 else
5885 return 1;
5888 /* Determine whether the outgoing edges of BB should receive an
5889 ASSERT_EXPR for each of the operands of BB's LAST statement.
5890 The last statement of BB must be a SWITCH_EXPR.
5892 If any of the sub-graphs rooted at BB have an interesting use of
5893 the predicate operands, an assert location node is added to the
5894 list of assertions for the corresponding operands. */
5896 static void
5897 find_switch_asserts (basic_block bb, gswitch *last)
5899 gimple_stmt_iterator bsi;
5900 tree op;
5901 edge e;
5902 struct case_info *ci;
5903 size_t n = gimple_switch_num_labels (last);
5904 #if GCC_VERSION >= 4000
5905 unsigned int idx;
5906 #else
5907 /* Work around GCC 3.4 bug (PR 37086). */
5908 volatile unsigned int idx;
5909 #endif
5911 bsi = gsi_for_stmt (last);
5912 op = gimple_switch_index (last);
5913 if (TREE_CODE (op) != SSA_NAME)
5914 return;
5916 /* Build a vector of case labels sorted by destination label. */
5917 ci = XNEWVEC (struct case_info, n);
5918 for (idx = 0; idx < n; ++idx)
5920 ci[idx].expr = gimple_switch_label (last, idx);
5921 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5923 edge default_edge = find_edge (bb, ci[0].bb);
5924 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5926 for (idx = 0; idx < n; ++idx)
5928 tree min, max;
5929 tree cl = ci[idx].expr;
5930 basic_block cbb = ci[idx].bb;
5932 min = CASE_LOW (cl);
5933 max = CASE_HIGH (cl);
5935 /* If there are multiple case labels with the same destination
5936 we need to combine them to a single value range for the edge. */
5937 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5939 /* Skip labels until the last of the group. */
5940 do {
5941 ++idx;
5942 } while (idx < n && cbb == ci[idx].bb);
5943 --idx;
5945 /* Pick up the maximum of the case label range. */
5946 if (CASE_HIGH (ci[idx].expr))
5947 max = CASE_HIGH (ci[idx].expr);
5948 else
5949 max = CASE_LOW (ci[idx].expr);
5952 /* Can't extract a useful assertion out of a range that includes the
5953 default label. */
5954 if (min == NULL_TREE)
5955 continue;
5957 /* Find the edge to register the assert expr on. */
5958 e = find_edge (bb, cbb);
5960 /* Register the necessary assertions for the operand in the
5961 SWITCH_EXPR. */
5962 register_edge_assert_for (op, e, bsi,
5963 max ? GE_EXPR : EQ_EXPR,
5964 op, fold_convert (TREE_TYPE (op), min));
5965 if (max)
5966 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
5967 fold_convert (TREE_TYPE (op), max));
5970 XDELETEVEC (ci);
5972 if (!live_on_edge (default_edge, op))
5973 return;
5975 /* Now register along the default label assertions that correspond to the
5976 anti-range of each label. */
5977 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
5978 for (idx = 1; idx < n; idx++)
5980 tree min, max;
5981 tree cl = gimple_switch_label (last, idx);
5983 min = CASE_LOW (cl);
5984 max = CASE_HIGH (cl);
5986 /* Combine contiguous case ranges to reduce the number of assertions
5987 to insert. */
5988 for (idx = idx + 1; idx < n; idx++)
5990 tree next_min, next_max;
5991 tree next_cl = gimple_switch_label (last, idx);
5993 next_min = CASE_LOW (next_cl);
5994 next_max = CASE_HIGH (next_cl);
5996 wide_int difference = wi::sub (next_min, max ? max : min);
5997 if (wi::eq_p (difference, 1))
5998 max = next_max ? next_max : next_min;
5999 else
6000 break;
6002 idx--;
6004 if (max == NULL_TREE)
6006 /* Register the assertion OP != MIN. */
6007 min = fold_convert (TREE_TYPE (op), min);
6008 register_edge_assert_for (op, default_edge, bsi, NE_EXPR, op, min);
6010 else
6012 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
6013 which will give OP the anti-range ~[MIN,MAX]. */
6014 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
6015 min = fold_convert (TREE_TYPE (uop), min);
6016 max = fold_convert (TREE_TYPE (uop), max);
6018 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
6019 tree rhs = int_const_binop (MINUS_EXPR, max, min);
6020 register_new_assert_for (op, lhs, GT_EXPR, rhs,
6021 NULL, default_edge, bsi);
6024 if (--insertion_limit == 0)
6025 break;
6030 /* Traverse all the statements in block BB looking for statements that
6031 may generate useful assertions for the SSA names in their operand.
6032 If a statement produces a useful assertion A for name N_i, then the
6033 list of assertions already generated for N_i is scanned to
6034 determine if A is actually needed.
6036 If N_i already had the assertion A at a location dominating the
6037 current location, then nothing needs to be done. Otherwise, the
6038 new location for A is recorded instead.
6040 1- For every statement S in BB, all the variables used by S are
6041 added to bitmap FOUND_IN_SUBGRAPH.
6043 2- If statement S uses an operand N in a way that exposes a known
6044 value range for N, then if N was not already generated by an
6045 ASSERT_EXPR, create a new assert location for N. For instance,
6046 if N is a pointer and the statement dereferences it, we can
6047 assume that N is not NULL.
6049 3- COND_EXPRs are a special case of #2. We can derive range
6050 information from the predicate but need to insert different
6051 ASSERT_EXPRs for each of the sub-graphs rooted at the
6052 conditional block. If the last statement of BB is a conditional
6053 expression of the form 'X op Y', then
6055 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6057 b) If the conditional is the only entry point to the sub-graph
6058 corresponding to the THEN_CLAUSE, recurse into it. On
6059 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6060 an ASSERT_EXPR is added for the corresponding variable.
6062 c) Repeat step (b) on the ELSE_CLAUSE.
6064 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6066 For instance,
6068 if (a == 9)
6069 b = a;
6070 else
6071 b = c + 1;
6073 In this case, an assertion on the THEN clause is useful to
6074 determine that 'a' is always 9 on that edge. However, an assertion
6075 on the ELSE clause would be unnecessary.
6077 4- If BB does not end in a conditional expression, then we recurse
6078 into BB's dominator children.
6080 At the end of the recursive traversal, every SSA name will have a
6081 list of locations where ASSERT_EXPRs should be added. When a new
6082 location for name N is found, it is registered by calling
6083 register_new_assert_for. That function keeps track of all the
6084 registered assertions to prevent adding unnecessary assertions.
6085 For instance, if a pointer P_4 is dereferenced more than once in a
6086 dominator tree, only the location dominating all the dereference of
6087 P_4 will receive an ASSERT_EXPR. */
6089 static void
6090 find_assert_locations_1 (basic_block bb, sbitmap live)
6092 gimple *last;
6094 last = last_stmt (bb);
6096 /* If BB's last statement is a conditional statement involving integer
6097 operands, determine if we need to add ASSERT_EXPRs. */
6098 if (last
6099 && gimple_code (last) == GIMPLE_COND
6100 && !fp_predicate (last)
6101 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6102 find_conditional_asserts (bb, as_a <gcond *> (last));
6104 /* If BB's last statement is a switch statement involving integer
6105 operands, determine if we need to add ASSERT_EXPRs. */
6106 if (last
6107 && gimple_code (last) == GIMPLE_SWITCH
6108 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6109 find_switch_asserts (bb, as_a <gswitch *> (last));
6111 /* Traverse all the statements in BB marking used names and looking
6112 for statements that may infer assertions for their used operands. */
6113 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6114 gsi_prev (&si))
6116 gimple *stmt;
6117 tree op;
6118 ssa_op_iter i;
6120 stmt = gsi_stmt (si);
6122 if (is_gimple_debug (stmt))
6123 continue;
6125 /* See if we can derive an assertion for any of STMT's operands. */
6126 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6128 tree value;
6129 enum tree_code comp_code;
6131 /* If op is not live beyond this stmt, do not bother to insert
6132 asserts for it. */
6133 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6134 continue;
6136 /* If OP is used in such a way that we can infer a value
6137 range for it, and we don't find a previous assertion for
6138 it, create a new assertion location node for OP. */
6139 if (infer_value_range (stmt, op, &comp_code, &value))
6141 /* If we are able to infer a nonzero value range for OP,
6142 then walk backwards through the use-def chain to see if OP
6143 was set via a typecast.
6145 If so, then we can also infer a nonzero value range
6146 for the operand of the NOP_EXPR. */
6147 if (comp_code == NE_EXPR && integer_zerop (value))
6149 tree t = op;
6150 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6152 while (is_gimple_assign (def_stmt)
6153 && CONVERT_EXPR_CODE_P
6154 (gimple_assign_rhs_code (def_stmt))
6155 && TREE_CODE
6156 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6157 && POINTER_TYPE_P
6158 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6160 t = gimple_assign_rhs1 (def_stmt);
6161 def_stmt = SSA_NAME_DEF_STMT (t);
6163 /* Note we want to register the assert for the
6164 operand of the NOP_EXPR after SI, not after the
6165 conversion. */
6166 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
6167 register_new_assert_for (t, t, comp_code, value,
6168 bb, NULL, si);
6172 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6176 /* Update live. */
6177 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6178 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6179 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6180 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6183 /* Traverse all PHI nodes in BB, updating live. */
6184 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6185 gsi_next (&si))
6187 use_operand_p arg_p;
6188 ssa_op_iter i;
6189 gphi *phi = si.phi ();
6190 tree res = gimple_phi_result (phi);
6192 if (virtual_operand_p (res))
6193 continue;
6195 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6197 tree arg = USE_FROM_PTR (arg_p);
6198 if (TREE_CODE (arg) == SSA_NAME)
6199 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6202 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6206 /* Do an RPO walk over the function computing SSA name liveness
6207 on-the-fly and deciding on assert expressions to insert. */
6209 static void
6210 find_assert_locations (void)
6212 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6213 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6214 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6215 int rpo_cnt, i;
6217 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6218 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6219 for (i = 0; i < rpo_cnt; ++i)
6220 bb_rpo[rpo[i]] = i;
6222 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6223 the order we compute liveness and insert asserts we otherwise
6224 fail to insert asserts into the loop latch. */
6225 loop_p loop;
6226 FOR_EACH_LOOP (loop, 0)
6228 i = loop->latch->index;
6229 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6230 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6231 !gsi_end_p (gsi); gsi_next (&gsi))
6233 gphi *phi = gsi.phi ();
6234 if (virtual_operand_p (gimple_phi_result (phi)))
6235 continue;
6236 tree arg = gimple_phi_arg_def (phi, j);
6237 if (TREE_CODE (arg) == SSA_NAME)
6239 if (live[i] == NULL)
6241 live[i] = sbitmap_alloc (num_ssa_names);
6242 bitmap_clear (live[i]);
6244 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6249 for (i = rpo_cnt - 1; i >= 0; --i)
6251 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6252 edge e;
6253 edge_iterator ei;
6255 if (!live[rpo[i]])
6257 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6258 bitmap_clear (live[rpo[i]]);
6261 /* Process BB and update the live information with uses in
6262 this block. */
6263 find_assert_locations_1 (bb, live[rpo[i]]);
6265 /* Merge liveness into the predecessor blocks and free it. */
6266 if (!bitmap_empty_p (live[rpo[i]]))
6268 int pred_rpo = i;
6269 FOR_EACH_EDGE (e, ei, bb->preds)
6271 int pred = e->src->index;
6272 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6273 continue;
6275 if (!live[pred])
6277 live[pred] = sbitmap_alloc (num_ssa_names);
6278 bitmap_clear (live[pred]);
6280 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6282 if (bb_rpo[pred] < pred_rpo)
6283 pred_rpo = bb_rpo[pred];
6286 /* Record the RPO number of the last visited block that needs
6287 live information from this block. */
6288 last_rpo[rpo[i]] = pred_rpo;
6290 else
6292 sbitmap_free (live[rpo[i]]);
6293 live[rpo[i]] = NULL;
6296 /* We can free all successors live bitmaps if all their
6297 predecessors have been visited already. */
6298 FOR_EACH_EDGE (e, ei, bb->succs)
6299 if (last_rpo[e->dest->index] == i
6300 && live[e->dest->index])
6302 sbitmap_free (live[e->dest->index]);
6303 live[e->dest->index] = NULL;
6307 XDELETEVEC (rpo);
6308 XDELETEVEC (bb_rpo);
6309 XDELETEVEC (last_rpo);
6310 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6311 if (live[i])
6312 sbitmap_free (live[i]);
6313 XDELETEVEC (live);
6316 /* Create an ASSERT_EXPR for NAME and insert it in the location
6317 indicated by LOC. Return true if we made any edge insertions. */
6319 static bool
6320 process_assert_insertions_for (tree name, assert_locus *loc)
6322 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6323 gimple *stmt;
6324 tree cond;
6325 gimple *assert_stmt;
6326 edge_iterator ei;
6327 edge e;
6329 /* If we have X <=> X do not insert an assert expr for that. */
6330 if (loc->expr == loc->val)
6331 return false;
6333 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6334 assert_stmt = build_assert_expr_for (cond, name);
6335 if (loc->e)
6337 /* We have been asked to insert the assertion on an edge. This
6338 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6339 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6340 || (gimple_code (gsi_stmt (loc->si))
6341 == GIMPLE_SWITCH));
6343 gsi_insert_on_edge (loc->e, assert_stmt);
6344 return true;
6347 /* Otherwise, we can insert right after LOC->SI iff the
6348 statement must not be the last statement in the block. */
6349 stmt = gsi_stmt (loc->si);
6350 if (!stmt_ends_bb_p (stmt))
6352 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6353 return false;
6356 /* If STMT must be the last statement in BB, we can only insert new
6357 assertions on the non-abnormal edge out of BB. Note that since
6358 STMT is not control flow, there may only be one non-abnormal edge
6359 out of BB. */
6360 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6361 if (!(e->flags & EDGE_ABNORMAL))
6363 gsi_insert_on_edge (e, assert_stmt);
6364 return true;
6367 gcc_unreachable ();
6371 /* Process all the insertions registered for every name N_i registered
6372 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6373 found in ASSERTS_FOR[i]. */
6375 static void
6376 process_assert_insertions (void)
6378 unsigned i;
6379 bitmap_iterator bi;
6380 bool update_edges_p = false;
6381 int num_asserts = 0;
6383 if (dump_file && (dump_flags & TDF_DETAILS))
6384 dump_all_asserts (dump_file);
6386 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6388 assert_locus *loc = asserts_for[i];
6389 gcc_assert (loc);
6391 while (loc)
6393 assert_locus *next = loc->next;
6394 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6395 free (loc);
6396 loc = next;
6397 num_asserts++;
6401 if (update_edges_p)
6402 gsi_commit_edge_inserts ();
6404 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6405 num_asserts);
6409 /* Traverse the flowgraph looking for conditional jumps to insert range
6410 expressions. These range expressions are meant to provide information
6411 to optimizations that need to reason in terms of value ranges. They
6412 will not be expanded into RTL. For instance, given:
6414 x = ...
6415 y = ...
6416 if (x < y)
6417 y = x - 2;
6418 else
6419 x = y + 3;
6421 this pass will transform the code into:
6423 x = ...
6424 y = ...
6425 if (x < y)
6427 x = ASSERT_EXPR <x, x < y>
6428 y = x - 2
6430 else
6432 y = ASSERT_EXPR <y, x >= y>
6433 x = y + 3
6436 The idea is that once copy and constant propagation have run, other
6437 optimizations will be able to determine what ranges of values can 'x'
6438 take in different paths of the code, simply by checking the reaching
6439 definition of 'x'. */
6441 static void
6442 insert_range_assertions (void)
6444 need_assert_for = BITMAP_ALLOC (NULL);
6445 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6447 calculate_dominance_info (CDI_DOMINATORS);
6449 find_assert_locations ();
6450 if (!bitmap_empty_p (need_assert_for))
6452 process_assert_insertions ();
6453 update_ssa (TODO_update_ssa_no_phi);
6456 if (dump_file && (dump_flags & TDF_DETAILS))
6458 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6459 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6462 free (asserts_for);
6463 BITMAP_FREE (need_assert_for);
6466 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6467 and "struct" hacks. If VRP can determine that the
6468 array subscript is a constant, check if it is outside valid
6469 range. If the array subscript is a RANGE, warn if it is
6470 non-overlapping with valid range.
6471 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6473 static void
6474 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6476 value_range *vr = NULL;
6477 tree low_sub, up_sub;
6478 tree low_bound, up_bound, up_bound_p1;
6480 if (TREE_NO_WARNING (ref))
6481 return;
6483 low_sub = up_sub = TREE_OPERAND (ref, 1);
6484 up_bound = array_ref_up_bound (ref);
6486 /* Can not check flexible arrays. */
6487 if (!up_bound
6488 || TREE_CODE (up_bound) != INTEGER_CST)
6489 return;
6491 /* Accesses to trailing arrays via pointers may access storage
6492 beyond the types array bounds. */
6493 if (warn_array_bounds < 2
6494 && array_at_struct_end_p (ref))
6495 return;
6497 low_bound = array_ref_low_bound (ref);
6498 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6499 build_int_cst (TREE_TYPE (up_bound), 1));
6501 /* Empty array. */
6502 if (tree_int_cst_equal (low_bound, up_bound_p1))
6504 warning_at (location, OPT_Warray_bounds,
6505 "array subscript is above array bounds");
6506 TREE_NO_WARNING (ref) = 1;
6509 if (TREE_CODE (low_sub) == SSA_NAME)
6511 vr = get_value_range (low_sub);
6512 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6514 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6515 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6519 if (vr && vr->type == VR_ANTI_RANGE)
6521 if (TREE_CODE (up_sub) == INTEGER_CST
6522 && (ignore_off_by_one
6523 ? tree_int_cst_lt (up_bound, up_sub)
6524 : tree_int_cst_le (up_bound, up_sub))
6525 && TREE_CODE (low_sub) == INTEGER_CST
6526 && tree_int_cst_le (low_sub, low_bound))
6528 warning_at (location, OPT_Warray_bounds,
6529 "array subscript is outside array bounds");
6530 TREE_NO_WARNING (ref) = 1;
6533 else if (TREE_CODE (up_sub) == INTEGER_CST
6534 && (ignore_off_by_one
6535 ? !tree_int_cst_le (up_sub, up_bound_p1)
6536 : !tree_int_cst_le (up_sub, up_bound)))
6538 if (dump_file && (dump_flags & TDF_DETAILS))
6540 fprintf (dump_file, "Array bound warning for ");
6541 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6542 fprintf (dump_file, "\n");
6544 warning_at (location, OPT_Warray_bounds,
6545 "array subscript is above array bounds");
6546 TREE_NO_WARNING (ref) = 1;
6548 else if (TREE_CODE (low_sub) == INTEGER_CST
6549 && tree_int_cst_lt (low_sub, low_bound))
6551 if (dump_file && (dump_flags & TDF_DETAILS))
6553 fprintf (dump_file, "Array bound warning for ");
6554 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6555 fprintf (dump_file, "\n");
6557 warning_at (location, OPT_Warray_bounds,
6558 "array subscript is below array bounds");
6559 TREE_NO_WARNING (ref) = 1;
6563 /* Searches if the expr T, located at LOCATION computes
6564 address of an ARRAY_REF, and call check_array_ref on it. */
6566 static void
6567 search_for_addr_array (tree t, location_t location)
6569 /* Check each ARRAY_REFs in the reference chain. */
6572 if (TREE_CODE (t) == ARRAY_REF)
6573 check_array_ref (location, t, true /*ignore_off_by_one*/);
6575 t = TREE_OPERAND (t, 0);
6577 while (handled_component_p (t));
6579 if (TREE_CODE (t) == MEM_REF
6580 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6581 && !TREE_NO_WARNING (t))
6583 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6584 tree low_bound, up_bound, el_sz;
6585 offset_int idx;
6586 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6587 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6588 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6589 return;
6591 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6592 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6593 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6594 if (!low_bound
6595 || TREE_CODE (low_bound) != INTEGER_CST
6596 || !up_bound
6597 || TREE_CODE (up_bound) != INTEGER_CST
6598 || !el_sz
6599 || TREE_CODE (el_sz) != INTEGER_CST)
6600 return;
6602 idx = mem_ref_offset (t);
6603 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6604 if (idx < 0)
6606 if (dump_file && (dump_flags & TDF_DETAILS))
6608 fprintf (dump_file, "Array bound warning for ");
6609 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6610 fprintf (dump_file, "\n");
6612 warning_at (location, OPT_Warray_bounds,
6613 "array subscript is below array bounds");
6614 TREE_NO_WARNING (t) = 1;
6616 else if (idx > (wi::to_offset (up_bound)
6617 - wi::to_offset (low_bound) + 1))
6619 if (dump_file && (dump_flags & TDF_DETAILS))
6621 fprintf (dump_file, "Array bound warning for ");
6622 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6623 fprintf (dump_file, "\n");
6625 warning_at (location, OPT_Warray_bounds,
6626 "array subscript is above array bounds");
6627 TREE_NO_WARNING (t) = 1;
6632 /* walk_tree() callback that checks if *TP is
6633 an ARRAY_REF inside an ADDR_EXPR (in which an array
6634 subscript one outside the valid range is allowed). Call
6635 check_array_ref for each ARRAY_REF found. The location is
6636 passed in DATA. */
6638 static tree
6639 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6641 tree t = *tp;
6642 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6643 location_t location;
6645 if (EXPR_HAS_LOCATION (t))
6646 location = EXPR_LOCATION (t);
6647 else
6649 location_t *locp = (location_t *) wi->info;
6650 location = *locp;
6653 *walk_subtree = TRUE;
6655 if (TREE_CODE (t) == ARRAY_REF)
6656 check_array_ref (location, t, false /*ignore_off_by_one*/);
6658 else if (TREE_CODE (t) == ADDR_EXPR)
6660 search_for_addr_array (t, location);
6661 *walk_subtree = FALSE;
6664 return NULL_TREE;
6667 /* Walk over all statements of all reachable BBs and call check_array_bounds
6668 on them. */
6670 static void
6671 check_all_array_refs (void)
6673 basic_block bb;
6674 gimple_stmt_iterator si;
6676 FOR_EACH_BB_FN (bb, cfun)
6678 edge_iterator ei;
6679 edge e;
6680 bool executable = false;
6682 /* Skip blocks that were found to be unreachable. */
6683 FOR_EACH_EDGE (e, ei, bb->preds)
6684 executable |= !!(e->flags & EDGE_EXECUTABLE);
6685 if (!executable)
6686 continue;
6688 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6690 gimple *stmt = gsi_stmt (si);
6691 struct walk_stmt_info wi;
6692 if (!gimple_has_location (stmt)
6693 || is_gimple_debug (stmt))
6694 continue;
6696 memset (&wi, 0, sizeof (wi));
6698 location_t loc = gimple_location (stmt);
6699 wi.info = &loc;
6701 walk_gimple_op (gsi_stmt (si),
6702 check_array_bounds,
6703 &wi);
6708 /* Return true if all imm uses of VAR are either in STMT, or
6709 feed (optionally through a chain of single imm uses) GIMPLE_COND
6710 in basic block COND_BB. */
6712 static bool
6713 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
6715 use_operand_p use_p, use2_p;
6716 imm_use_iterator iter;
6718 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6719 if (USE_STMT (use_p) != stmt)
6721 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
6722 if (is_gimple_debug (use_stmt))
6723 continue;
6724 while (is_gimple_assign (use_stmt)
6725 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6726 && single_imm_use (gimple_assign_lhs (use_stmt),
6727 &use2_p, &use_stmt2))
6728 use_stmt = use_stmt2;
6729 if (gimple_code (use_stmt) != GIMPLE_COND
6730 || gimple_bb (use_stmt) != cond_bb)
6731 return false;
6733 return true;
6736 /* Handle
6737 _4 = x_3 & 31;
6738 if (_4 != 0)
6739 goto <bb 6>;
6740 else
6741 goto <bb 7>;
6742 <bb 6>:
6743 __builtin_unreachable ();
6744 <bb 7>:
6745 x_5 = ASSERT_EXPR <x_3, ...>;
6746 If x_3 has no other immediate uses (checked by caller),
6747 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6748 from the non-zero bitmask. */
6750 static void
6751 maybe_set_nonzero_bits (basic_block bb, tree var)
6753 edge e = single_pred_edge (bb);
6754 basic_block cond_bb = e->src;
6755 gimple *stmt = last_stmt (cond_bb);
6756 tree cst;
6758 if (stmt == NULL
6759 || gimple_code (stmt) != GIMPLE_COND
6760 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6761 ? EQ_EXPR : NE_EXPR)
6762 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6763 || !integer_zerop (gimple_cond_rhs (stmt)))
6764 return;
6766 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6767 if (!is_gimple_assign (stmt)
6768 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6769 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6770 return;
6771 if (gimple_assign_rhs1 (stmt) != var)
6773 gimple *stmt2;
6775 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6776 return;
6777 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6778 if (!gimple_assign_cast_p (stmt2)
6779 || gimple_assign_rhs1 (stmt2) != var
6780 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6781 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6782 != TYPE_PRECISION (TREE_TYPE (var))))
6783 return;
6785 cst = gimple_assign_rhs2 (stmt);
6786 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
6789 /* Convert range assertion expressions into the implied copies and
6790 copy propagate away the copies. Doing the trivial copy propagation
6791 here avoids the need to run the full copy propagation pass after
6792 VRP.
6794 FIXME, this will eventually lead to copy propagation removing the
6795 names that had useful range information attached to them. For
6796 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6797 then N_i will have the range [3, +INF].
6799 However, by converting the assertion into the implied copy
6800 operation N_i = N_j, we will then copy-propagate N_j into the uses
6801 of N_i and lose the range information. We may want to hold on to
6802 ASSERT_EXPRs a little while longer as the ranges could be used in
6803 things like jump threading.
6805 The problem with keeping ASSERT_EXPRs around is that passes after
6806 VRP need to handle them appropriately.
6808 Another approach would be to make the range information a first
6809 class property of the SSA_NAME so that it can be queried from
6810 any pass. This is made somewhat more complex by the need for
6811 multiple ranges to be associated with one SSA_NAME. */
6813 static void
6814 remove_range_assertions (void)
6816 basic_block bb;
6817 gimple_stmt_iterator si;
6818 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6819 a basic block preceeded by GIMPLE_COND branching to it and
6820 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6821 int is_unreachable;
6823 /* Note that the BSI iterator bump happens at the bottom of the
6824 loop and no bump is necessary if we're removing the statement
6825 referenced by the current BSI. */
6826 FOR_EACH_BB_FN (bb, cfun)
6827 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6829 gimple *stmt = gsi_stmt (si);
6830 gimple *use_stmt;
6832 if (is_gimple_assign (stmt)
6833 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6835 tree lhs = gimple_assign_lhs (stmt);
6836 tree rhs = gimple_assign_rhs1 (stmt);
6837 tree var;
6838 use_operand_p use_p;
6839 imm_use_iterator iter;
6841 var = ASSERT_EXPR_VAR (rhs);
6842 gcc_assert (TREE_CODE (var) == SSA_NAME);
6844 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
6845 && SSA_NAME_RANGE_INFO (lhs))
6847 if (is_unreachable == -1)
6849 is_unreachable = 0;
6850 if (single_pred_p (bb)
6851 && assert_unreachable_fallthru_edge_p
6852 (single_pred_edge (bb)))
6853 is_unreachable = 1;
6855 /* Handle
6856 if (x_7 >= 10 && x_7 < 20)
6857 __builtin_unreachable ();
6858 x_8 = ASSERT_EXPR <x_7, ...>;
6859 if the only uses of x_7 are in the ASSERT_EXPR and
6860 in the condition. In that case, we can copy the
6861 range info from x_8 computed in this pass also
6862 for x_7. */
6863 if (is_unreachable
6864 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6865 single_pred (bb)))
6867 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
6868 SSA_NAME_RANGE_INFO (lhs)->get_min (),
6869 SSA_NAME_RANGE_INFO (lhs)->get_max ());
6870 maybe_set_nonzero_bits (bb, var);
6874 /* Propagate the RHS into every use of the LHS. */
6875 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
6876 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6877 SET_USE (use_p, var);
6879 /* And finally, remove the copy, it is not needed. */
6880 gsi_remove (&si, true);
6881 release_defs (stmt);
6883 else
6885 if (!is_gimple_debug (gsi_stmt (si)))
6886 is_unreachable = 0;
6887 gsi_next (&si);
6893 /* Return true if STMT is interesting for VRP. */
6895 static bool
6896 stmt_interesting_for_vrp (gimple *stmt)
6898 if (gimple_code (stmt) == GIMPLE_PHI)
6900 tree res = gimple_phi_result (stmt);
6901 return (!virtual_operand_p (res)
6902 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6903 || POINTER_TYPE_P (TREE_TYPE (res))));
6905 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6907 tree lhs = gimple_get_lhs (stmt);
6909 /* In general, assignments with virtual operands are not useful
6910 for deriving ranges, with the obvious exception of calls to
6911 builtin functions. */
6912 if (lhs && TREE_CODE (lhs) == SSA_NAME
6913 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6914 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6915 && (is_gimple_call (stmt)
6916 || !gimple_vuse (stmt)))
6917 return true;
6918 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
6919 switch (gimple_call_internal_fn (stmt))
6921 case IFN_ADD_OVERFLOW:
6922 case IFN_SUB_OVERFLOW:
6923 case IFN_MUL_OVERFLOW:
6924 /* These internal calls return _Complex integer type,
6925 but are interesting to VRP nevertheless. */
6926 if (lhs && TREE_CODE (lhs) == SSA_NAME)
6927 return true;
6928 break;
6929 default:
6930 break;
6933 else if (gimple_code (stmt) == GIMPLE_COND
6934 || gimple_code (stmt) == GIMPLE_SWITCH)
6935 return true;
6937 return false;
6941 /* Initialize local data structures for VRP. */
6943 static void
6944 vrp_initialize (void)
6946 basic_block bb;
6948 values_propagated = false;
6949 num_vr_values = num_ssa_names;
6950 vr_value = XCNEWVEC (value_range *, num_vr_values);
6951 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6953 FOR_EACH_BB_FN (bb, cfun)
6955 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6956 gsi_next (&si))
6958 gphi *phi = si.phi ();
6959 if (!stmt_interesting_for_vrp (phi))
6961 tree lhs = PHI_RESULT (phi);
6962 set_value_range_to_varying (get_value_range (lhs));
6963 prop_set_simulate_again (phi, false);
6965 else
6966 prop_set_simulate_again (phi, true);
6969 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
6970 gsi_next (&si))
6972 gimple *stmt = gsi_stmt (si);
6974 /* If the statement is a control insn, then we do not
6975 want to avoid simulating the statement once. Failure
6976 to do so means that those edges will never get added. */
6977 if (stmt_ends_bb_p (stmt))
6978 prop_set_simulate_again (stmt, true);
6979 else if (!stmt_interesting_for_vrp (stmt))
6981 ssa_op_iter i;
6982 tree def;
6983 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6984 set_value_range_to_varying (get_value_range (def));
6985 prop_set_simulate_again (stmt, false);
6987 else
6988 prop_set_simulate_again (stmt, true);
6993 /* Return the singleton value-range for NAME or NAME. */
6995 static inline tree
6996 vrp_valueize (tree name)
6998 if (TREE_CODE (name) == SSA_NAME)
7000 value_range *vr = get_value_range (name);
7001 if (vr->type == VR_RANGE
7002 && (vr->min == vr->max
7003 || operand_equal_p (vr->min, vr->max, 0)))
7004 return vr->min;
7006 return name;
7009 /* Return the singleton value-range for NAME if that is a constant
7010 but signal to not follow SSA edges. */
7012 static inline tree
7013 vrp_valueize_1 (tree name)
7015 if (TREE_CODE (name) == SSA_NAME)
7017 /* If the definition may be simulated again we cannot follow
7018 this SSA edge as the SSA propagator does not necessarily
7019 re-visit the use. */
7020 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7021 if (!gimple_nop_p (def_stmt)
7022 && prop_simulate_again_p (def_stmt))
7023 return NULL_TREE;
7024 value_range *vr = get_value_range (name);
7025 if (range_int_cst_singleton_p (vr))
7026 return vr->min;
7028 return name;
7031 /* Visit assignment STMT. If it produces an interesting range, record
7032 the SSA name in *OUTPUT_P. */
7034 static enum ssa_prop_result
7035 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p)
7037 tree def, lhs;
7038 ssa_op_iter iter;
7039 enum gimple_code code = gimple_code (stmt);
7040 lhs = gimple_get_lhs (stmt);
7042 /* We only keep track of ranges in integral and pointer types. */
7043 if (TREE_CODE (lhs) == SSA_NAME
7044 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7045 /* It is valid to have NULL MIN/MAX values on a type. See
7046 build_range_type. */
7047 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7048 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7049 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7051 value_range new_vr = VR_INITIALIZER;
7053 /* Try folding the statement to a constant first. */
7054 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7055 vrp_valueize_1);
7056 if (tem && is_gimple_min_invariant (tem))
7057 set_value_range_to_value (&new_vr, tem, NULL);
7058 /* Then dispatch to value-range extracting functions. */
7059 else if (code == GIMPLE_CALL)
7060 extract_range_basic (&new_vr, stmt);
7061 else
7062 extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt));
7064 if (update_value_range (lhs, &new_vr))
7066 *output_p = lhs;
7068 if (dump_file && (dump_flags & TDF_DETAILS))
7070 fprintf (dump_file, "Found new range for ");
7071 print_generic_expr (dump_file, lhs, 0);
7072 fprintf (dump_file, ": ");
7073 dump_value_range (dump_file, &new_vr);
7074 fprintf (dump_file, "\n");
7077 if (new_vr.type == VR_VARYING)
7078 return SSA_PROP_VARYING;
7080 return SSA_PROP_INTERESTING;
7083 return SSA_PROP_NOT_INTERESTING;
7085 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7086 switch (gimple_call_internal_fn (stmt))
7088 case IFN_ADD_OVERFLOW:
7089 case IFN_SUB_OVERFLOW:
7090 case IFN_MUL_OVERFLOW:
7091 /* These internal calls return _Complex integer type,
7092 which VRP does not track, but the immediate uses
7093 thereof might be interesting. */
7094 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7096 imm_use_iterator iter;
7097 use_operand_p use_p;
7098 enum ssa_prop_result res = SSA_PROP_VARYING;
7100 set_value_range_to_varying (get_value_range (lhs));
7102 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
7104 gimple *use_stmt = USE_STMT (use_p);
7105 if (!is_gimple_assign (use_stmt))
7106 continue;
7107 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
7108 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
7109 continue;
7110 tree rhs1 = gimple_assign_rhs1 (use_stmt);
7111 tree use_lhs = gimple_assign_lhs (use_stmt);
7112 if (TREE_CODE (rhs1) != rhs_code
7113 || TREE_OPERAND (rhs1, 0) != lhs
7114 || TREE_CODE (use_lhs) != SSA_NAME
7115 || !stmt_interesting_for_vrp (use_stmt)
7116 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
7117 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
7118 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
7119 continue;
7121 /* If there is a change in the value range for any of the
7122 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
7123 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
7124 or IMAGPART_EXPR immediate uses, but none of them have
7125 a change in their value ranges, return
7126 SSA_PROP_NOT_INTERESTING. If there are no
7127 {REAL,IMAG}PART_EXPR uses at all,
7128 return SSA_PROP_VARYING. */
7129 value_range new_vr = VR_INITIALIZER;
7130 extract_range_basic (&new_vr, use_stmt);
7131 value_range *old_vr = get_value_range (use_lhs);
7132 if (old_vr->type != new_vr.type
7133 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
7134 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
7135 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
7136 res = SSA_PROP_INTERESTING;
7137 else
7138 res = SSA_PROP_NOT_INTERESTING;
7139 BITMAP_FREE (new_vr.equiv);
7140 if (res == SSA_PROP_INTERESTING)
7142 *output_p = lhs;
7143 return res;
7147 return res;
7149 break;
7150 default:
7151 break;
7154 /* Every other statement produces no useful ranges. */
7155 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7156 set_value_range_to_varying (get_value_range (def));
7158 return SSA_PROP_VARYING;
7161 /* Helper that gets the value range of the SSA_NAME with version I
7162 or a symbolic range containing the SSA_NAME only if the value range
7163 is varying or undefined. */
7165 static inline value_range
7166 get_vr_for_comparison (int i)
7168 value_range vr = *get_value_range (ssa_name (i));
7170 /* If name N_i does not have a valid range, use N_i as its own
7171 range. This allows us to compare against names that may
7172 have N_i in their ranges. */
7173 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7175 vr.type = VR_RANGE;
7176 vr.min = ssa_name (i);
7177 vr.max = ssa_name (i);
7180 return vr;
7183 /* Compare all the value ranges for names equivalent to VAR with VAL
7184 using comparison code COMP. Return the same value returned by
7185 compare_range_with_value, including the setting of
7186 *STRICT_OVERFLOW_P. */
7188 static tree
7189 compare_name_with_value (enum tree_code comp, tree var, tree val,
7190 bool *strict_overflow_p, bool use_equiv_p)
7192 bitmap_iterator bi;
7193 unsigned i;
7194 bitmap e;
7195 tree retval, t;
7196 int used_strict_overflow;
7197 bool sop;
7198 value_range equiv_vr;
7200 /* Get the set of equivalences for VAR. */
7201 e = get_value_range (var)->equiv;
7203 /* Start at -1. Set it to 0 if we do a comparison without relying
7204 on overflow, or 1 if all comparisons rely on overflow. */
7205 used_strict_overflow = -1;
7207 /* Compare vars' value range with val. */
7208 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7209 sop = false;
7210 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7211 if (retval)
7212 used_strict_overflow = sop ? 1 : 0;
7214 /* If the equiv set is empty we have done all work we need to do. */
7215 if (e == NULL)
7217 if (retval
7218 && used_strict_overflow > 0)
7219 *strict_overflow_p = true;
7220 return retval;
7223 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7225 if (! use_equiv_p
7226 && ! SSA_NAME_IS_DEFAULT_DEF (ssa_name (i))
7227 && prop_simulate_again_p (SSA_NAME_DEF_STMT (ssa_name (i))))
7228 continue;
7230 equiv_vr = get_vr_for_comparison (i);
7231 sop = false;
7232 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7233 if (t)
7235 /* If we get different answers from different members
7236 of the equivalence set this check must be in a dead
7237 code region. Folding it to a trap representation
7238 would be correct here. For now just return don't-know. */
7239 if (retval != NULL
7240 && t != retval)
7242 retval = NULL_TREE;
7243 break;
7245 retval = t;
7247 if (!sop)
7248 used_strict_overflow = 0;
7249 else if (used_strict_overflow < 0)
7250 used_strict_overflow = 1;
7254 if (retval
7255 && used_strict_overflow > 0)
7256 *strict_overflow_p = true;
7258 return retval;
7262 /* Given a comparison code COMP and names N1 and N2, compare all the
7263 ranges equivalent to N1 against all the ranges equivalent to N2
7264 to determine the value of N1 COMP N2. Return the same value
7265 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7266 whether we relied on an overflow infinity in the comparison. */
7269 static tree
7270 compare_names (enum tree_code comp, tree n1, tree n2,
7271 bool *strict_overflow_p)
7273 tree t, retval;
7274 bitmap e1, e2;
7275 bitmap_iterator bi1, bi2;
7276 unsigned i1, i2;
7277 int used_strict_overflow;
7278 static bitmap_obstack *s_obstack = NULL;
7279 static bitmap s_e1 = NULL, s_e2 = NULL;
7281 /* Compare the ranges of every name equivalent to N1 against the
7282 ranges of every name equivalent to N2. */
7283 e1 = get_value_range (n1)->equiv;
7284 e2 = get_value_range (n2)->equiv;
7286 /* Use the fake bitmaps if e1 or e2 are not available. */
7287 if (s_obstack == NULL)
7289 s_obstack = XNEW (bitmap_obstack);
7290 bitmap_obstack_initialize (s_obstack);
7291 s_e1 = BITMAP_ALLOC (s_obstack);
7292 s_e2 = BITMAP_ALLOC (s_obstack);
7294 if (e1 == NULL)
7295 e1 = s_e1;
7296 if (e2 == NULL)
7297 e2 = s_e2;
7299 /* Add N1 and N2 to their own set of equivalences to avoid
7300 duplicating the body of the loop just to check N1 and N2
7301 ranges. */
7302 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7303 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7305 /* If the equivalence sets have a common intersection, then the two
7306 names can be compared without checking their ranges. */
7307 if (bitmap_intersect_p (e1, e2))
7309 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7310 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7312 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7313 ? boolean_true_node
7314 : boolean_false_node;
7317 /* Start at -1. Set it to 0 if we do a comparison without relying
7318 on overflow, or 1 if all comparisons rely on overflow. */
7319 used_strict_overflow = -1;
7321 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7322 N2 to their own set of equivalences to avoid duplicating the body
7323 of the loop just to check N1 and N2 ranges. */
7324 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7326 value_range vr1 = get_vr_for_comparison (i1);
7328 t = retval = NULL_TREE;
7329 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7331 bool sop = false;
7333 value_range vr2 = get_vr_for_comparison (i2);
7335 t = compare_ranges (comp, &vr1, &vr2, &sop);
7336 if (t)
7338 /* If we get different answers from different members
7339 of the equivalence set this check must be in a dead
7340 code region. Folding it to a trap representation
7341 would be correct here. For now just return don't-know. */
7342 if (retval != NULL
7343 && t != retval)
7345 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7346 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7347 return NULL_TREE;
7349 retval = t;
7351 if (!sop)
7352 used_strict_overflow = 0;
7353 else if (used_strict_overflow < 0)
7354 used_strict_overflow = 1;
7358 if (retval)
7360 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7361 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7362 if (used_strict_overflow > 0)
7363 *strict_overflow_p = true;
7364 return retval;
7368 /* None of the equivalent ranges are useful in computing this
7369 comparison. */
7370 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7371 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7372 return NULL_TREE;
7375 /* Helper function for vrp_evaluate_conditional_warnv & other
7376 optimizers. */
7378 static tree
7379 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7380 tree op0, tree op1,
7381 bool * strict_overflow_p)
7383 value_range *vr0, *vr1;
7385 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7386 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7388 tree res = NULL_TREE;
7389 if (vr0 && vr1)
7390 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7391 if (!res && vr0)
7392 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7393 if (!res && vr1)
7394 res = (compare_range_with_value
7395 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7396 return res;
7399 /* Helper function for vrp_evaluate_conditional_warnv. */
7401 static tree
7402 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7403 tree op1, bool use_equiv_p,
7404 bool *strict_overflow_p, bool *only_ranges)
7406 tree ret;
7407 if (only_ranges)
7408 *only_ranges = true;
7410 /* We only deal with integral and pointer types. */
7411 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7412 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7413 return NULL_TREE;
7415 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7416 (code, op0, op1, strict_overflow_p)))
7417 return ret;
7418 if (only_ranges)
7419 *only_ranges = false;
7420 /* Do not use compare_names during propagation, it's quadratic. */
7421 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7422 && use_equiv_p)
7423 return compare_names (code, op0, op1, strict_overflow_p);
7424 else if (TREE_CODE (op0) == SSA_NAME)
7425 return compare_name_with_value (code, op0, op1,
7426 strict_overflow_p, use_equiv_p);
7427 else if (TREE_CODE (op1) == SSA_NAME)
7428 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7429 strict_overflow_p, use_equiv_p);
7430 return NULL_TREE;
7433 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7434 information. Return NULL if the conditional can not be evaluated.
7435 The ranges of all the names equivalent with the operands in COND
7436 will be used when trying to compute the value. If the result is
7437 based on undefined signed overflow, issue a warning if
7438 appropriate. */
7440 static tree
7441 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7443 bool sop;
7444 tree ret;
7445 bool only_ranges;
7447 /* Some passes and foldings leak constants with overflow flag set
7448 into the IL. Avoid doing wrong things with these and bail out. */
7449 if ((TREE_CODE (op0) == INTEGER_CST
7450 && TREE_OVERFLOW (op0))
7451 || (TREE_CODE (op1) == INTEGER_CST
7452 && TREE_OVERFLOW (op1)))
7453 return NULL_TREE;
7455 sop = false;
7456 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7457 &only_ranges);
7459 if (ret && sop)
7461 enum warn_strict_overflow_code wc;
7462 const char* warnmsg;
7464 if (is_gimple_min_invariant (ret))
7466 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7467 warnmsg = G_("assuming signed overflow does not occur when "
7468 "simplifying conditional to constant");
7470 else
7472 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7473 warnmsg = G_("assuming signed overflow does not occur when "
7474 "simplifying conditional");
7477 if (issue_strict_overflow_warning (wc))
7479 location_t location;
7481 if (!gimple_has_location (stmt))
7482 location = input_location;
7483 else
7484 location = gimple_location (stmt);
7485 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7489 if (warn_type_limits
7490 && ret && only_ranges
7491 && TREE_CODE_CLASS (code) == tcc_comparison
7492 && TREE_CODE (op0) == SSA_NAME)
7494 /* If the comparison is being folded and the operand on the LHS
7495 is being compared against a constant value that is outside of
7496 the natural range of OP0's type, then the predicate will
7497 always fold regardless of the value of OP0. If -Wtype-limits
7498 was specified, emit a warning. */
7499 tree type = TREE_TYPE (op0);
7500 value_range *vr0 = get_value_range (op0);
7502 if (vr0->type == VR_RANGE
7503 && INTEGRAL_TYPE_P (type)
7504 && vrp_val_is_min (vr0->min)
7505 && vrp_val_is_max (vr0->max)
7506 && is_gimple_min_invariant (op1))
7508 location_t location;
7510 if (!gimple_has_location (stmt))
7511 location = input_location;
7512 else
7513 location = gimple_location (stmt);
7515 warning_at (location, OPT_Wtype_limits,
7516 integer_zerop (ret)
7517 ? G_("comparison always false "
7518 "due to limited range of data type")
7519 : G_("comparison always true "
7520 "due to limited range of data type"));
7524 return ret;
7528 /* Visit conditional statement STMT. If we can determine which edge
7529 will be taken out of STMT's basic block, record it in
7530 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7531 SSA_PROP_VARYING. */
7533 static enum ssa_prop_result
7534 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7536 tree val;
7537 bool sop;
7539 *taken_edge_p = NULL;
7541 if (dump_file && (dump_flags & TDF_DETAILS))
7543 tree use;
7544 ssa_op_iter i;
7546 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7547 print_gimple_stmt (dump_file, stmt, 0, 0);
7548 fprintf (dump_file, "\nWith known ranges\n");
7550 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7552 fprintf (dump_file, "\t");
7553 print_generic_expr (dump_file, use, 0);
7554 fprintf (dump_file, ": ");
7555 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7558 fprintf (dump_file, "\n");
7561 /* Compute the value of the predicate COND by checking the known
7562 ranges of each of its operands.
7564 Note that we cannot evaluate all the equivalent ranges here
7565 because those ranges may not yet be final and with the current
7566 propagation strategy, we cannot determine when the value ranges
7567 of the names in the equivalence set have changed.
7569 For instance, given the following code fragment
7571 i_5 = PHI <8, i_13>
7573 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7574 if (i_14 == 1)
7577 Assume that on the first visit to i_14, i_5 has the temporary
7578 range [8, 8] because the second argument to the PHI function is
7579 not yet executable. We derive the range ~[0, 0] for i_14 and the
7580 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7581 the first time, since i_14 is equivalent to the range [8, 8], we
7582 determine that the predicate is always false.
7584 On the next round of propagation, i_13 is determined to be
7585 VARYING, which causes i_5 to drop down to VARYING. So, another
7586 visit to i_14 is scheduled. In this second visit, we compute the
7587 exact same range and equivalence set for i_14, namely ~[0, 0] and
7588 { i_5 }. But we did not have the previous range for i_5
7589 registered, so vrp_visit_assignment thinks that the range for
7590 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7591 is not visited again, which stops propagation from visiting
7592 statements in the THEN clause of that if().
7594 To properly fix this we would need to keep the previous range
7595 value for the names in the equivalence set. This way we would've
7596 discovered that from one visit to the other i_5 changed from
7597 range [8, 8] to VR_VARYING.
7599 However, fixing this apparent limitation may not be worth the
7600 additional checking. Testing on several code bases (GCC, DLV,
7601 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7602 4 more predicates folded in SPEC. */
7603 sop = false;
7605 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7606 gimple_cond_lhs (stmt),
7607 gimple_cond_rhs (stmt),
7608 false, &sop, NULL);
7609 if (val)
7611 if (!sop)
7612 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7613 else
7615 if (dump_file && (dump_flags & TDF_DETAILS))
7616 fprintf (dump_file,
7617 "\nIgnoring predicate evaluation because "
7618 "it assumes that signed overflow is undefined");
7619 val = NULL_TREE;
7623 if (dump_file && (dump_flags & TDF_DETAILS))
7625 fprintf (dump_file, "\nPredicate evaluates to: ");
7626 if (val == NULL_TREE)
7627 fprintf (dump_file, "DON'T KNOW\n");
7628 else
7629 print_generic_stmt (dump_file, val, 0);
7632 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7635 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7636 that includes the value VAL. The search is restricted to the range
7637 [START_IDX, n - 1] where n is the size of VEC.
7639 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7640 returned.
7642 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7643 it is placed in IDX and false is returned.
7645 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7646 returned. */
7648 static bool
7649 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7651 size_t n = gimple_switch_num_labels (stmt);
7652 size_t low, high;
7654 /* Find case label for minimum of the value range or the next one.
7655 At each iteration we are searching in [low, high - 1]. */
7657 for (low = start_idx, high = n; high != low; )
7659 tree t;
7660 int cmp;
7661 /* Note that i != high, so we never ask for n. */
7662 size_t i = (high + low) / 2;
7663 t = gimple_switch_label (stmt, i);
7665 /* Cache the result of comparing CASE_LOW and val. */
7666 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7668 if (cmp == 0)
7670 /* Ranges cannot be empty. */
7671 *idx = i;
7672 return true;
7674 else if (cmp > 0)
7675 high = i;
7676 else
7678 low = i + 1;
7679 if (CASE_HIGH (t) != NULL
7680 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7682 *idx = i;
7683 return true;
7688 *idx = high;
7689 return false;
7692 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7693 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7694 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7695 then MAX_IDX < MIN_IDX.
7696 Returns true if the default label is not needed. */
7698 static bool
7699 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
7700 size_t *max_idx)
7702 size_t i, j;
7703 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7704 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7706 if (i == j
7707 && min_take_default
7708 && max_take_default)
7710 /* Only the default case label reached.
7711 Return an empty range. */
7712 *min_idx = 1;
7713 *max_idx = 0;
7714 return false;
7716 else
7718 bool take_default = min_take_default || max_take_default;
7719 tree low, high;
7720 size_t k;
7722 if (max_take_default)
7723 j--;
7725 /* If the case label range is continuous, we do not need
7726 the default case label. Verify that. */
7727 high = CASE_LOW (gimple_switch_label (stmt, i));
7728 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7729 high = CASE_HIGH (gimple_switch_label (stmt, i));
7730 for (k = i + 1; k <= j; ++k)
7732 low = CASE_LOW (gimple_switch_label (stmt, k));
7733 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7735 take_default = true;
7736 break;
7738 high = low;
7739 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7740 high = CASE_HIGH (gimple_switch_label (stmt, k));
7743 *min_idx = i;
7744 *max_idx = j;
7745 return !take_default;
7749 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7750 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7751 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7752 Returns true if the default label is not needed. */
7754 static bool
7755 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
7756 size_t *max_idx1, size_t *min_idx2,
7757 size_t *max_idx2)
7759 size_t i, j, k, l;
7760 unsigned int n = gimple_switch_num_labels (stmt);
7761 bool take_default;
7762 tree case_low, case_high;
7763 tree min = vr->min, max = vr->max;
7765 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7767 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7769 /* Set second range to emtpy. */
7770 *min_idx2 = 1;
7771 *max_idx2 = 0;
7773 if (vr->type == VR_RANGE)
7775 *min_idx1 = i;
7776 *max_idx1 = j;
7777 return !take_default;
7780 /* Set first range to all case labels. */
7781 *min_idx1 = 1;
7782 *max_idx1 = n - 1;
7784 if (i > j)
7785 return false;
7787 /* Make sure all the values of case labels [i , j] are contained in
7788 range [MIN, MAX]. */
7789 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7790 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7791 if (tree_int_cst_compare (case_low, min) < 0)
7792 i += 1;
7793 if (case_high != NULL_TREE
7794 && tree_int_cst_compare (max, case_high) < 0)
7795 j -= 1;
7797 if (i > j)
7798 return false;
7800 /* If the range spans case labels [i, j], the corresponding anti-range spans
7801 the labels [1, i - 1] and [j + 1, n - 1]. */
7802 k = j + 1;
7803 l = n - 1;
7804 if (k > l)
7806 k = 1;
7807 l = 0;
7810 j = i - 1;
7811 i = 1;
7812 if (i > j)
7814 i = k;
7815 j = l;
7816 k = 1;
7817 l = 0;
7820 *min_idx1 = i;
7821 *max_idx1 = j;
7822 *min_idx2 = k;
7823 *max_idx2 = l;
7824 return false;
7827 /* Visit switch statement STMT. If we can determine which edge
7828 will be taken out of STMT's basic block, record it in
7829 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7830 SSA_PROP_VARYING. */
7832 static enum ssa_prop_result
7833 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
7835 tree op, val;
7836 value_range *vr;
7837 size_t i = 0, j = 0, k, l;
7838 bool take_default;
7840 *taken_edge_p = NULL;
7841 op = gimple_switch_index (stmt);
7842 if (TREE_CODE (op) != SSA_NAME)
7843 return SSA_PROP_VARYING;
7845 vr = get_value_range (op);
7846 if (dump_file && (dump_flags & TDF_DETAILS))
7848 fprintf (dump_file, "\nVisiting switch expression with operand ");
7849 print_generic_expr (dump_file, op, 0);
7850 fprintf (dump_file, " with known range ");
7851 dump_value_range (dump_file, vr);
7852 fprintf (dump_file, "\n");
7855 if ((vr->type != VR_RANGE
7856 && vr->type != VR_ANTI_RANGE)
7857 || symbolic_range_p (vr))
7858 return SSA_PROP_VARYING;
7860 /* Find the single edge that is taken from the switch expression. */
7861 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7863 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7864 label */
7865 if (j < i)
7867 gcc_assert (take_default);
7868 val = gimple_switch_default_label (stmt);
7870 else
7872 /* Check if labels with index i to j and maybe the default label
7873 are all reaching the same label. */
7875 val = gimple_switch_label (stmt, i);
7876 if (take_default
7877 && CASE_LABEL (gimple_switch_default_label (stmt))
7878 != CASE_LABEL (val))
7880 if (dump_file && (dump_flags & TDF_DETAILS))
7881 fprintf (dump_file, " not a single destination for this "
7882 "range\n");
7883 return SSA_PROP_VARYING;
7885 for (++i; i <= j; ++i)
7887 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7889 if (dump_file && (dump_flags & TDF_DETAILS))
7890 fprintf (dump_file, " not a single destination for this "
7891 "range\n");
7892 return SSA_PROP_VARYING;
7895 for (; k <= l; ++k)
7897 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7899 if (dump_file && (dump_flags & TDF_DETAILS))
7900 fprintf (dump_file, " not a single destination for this "
7901 "range\n");
7902 return SSA_PROP_VARYING;
7907 *taken_edge_p = find_edge (gimple_bb (stmt),
7908 label_to_block (CASE_LABEL (val)));
7910 if (dump_file && (dump_flags & TDF_DETAILS))
7912 fprintf (dump_file, " will take edge to ");
7913 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7916 return SSA_PROP_INTERESTING;
7920 /* Evaluate statement STMT. If the statement produces a useful range,
7921 return SSA_PROP_INTERESTING and record the SSA name with the
7922 interesting range into *OUTPUT_P.
7924 If STMT is a conditional branch and we can determine its truth
7925 value, the taken edge is recorded in *TAKEN_EDGE_P.
7927 If STMT produces a varying value, return SSA_PROP_VARYING. */
7929 static enum ssa_prop_result
7930 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
7932 tree def;
7933 ssa_op_iter iter;
7935 if (dump_file && (dump_flags & TDF_DETAILS))
7937 fprintf (dump_file, "\nVisiting statement:\n");
7938 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7941 if (!stmt_interesting_for_vrp (stmt))
7942 gcc_assert (stmt_ends_bb_p (stmt));
7943 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7944 return vrp_visit_assignment_or_call (stmt, output_p);
7945 else if (gimple_code (stmt) == GIMPLE_COND)
7946 return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
7947 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7948 return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
7950 /* All other statements produce nothing of interest for VRP, so mark
7951 their outputs varying and prevent further simulation. */
7952 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7953 set_value_range_to_varying (get_value_range (def));
7955 return SSA_PROP_VARYING;
7958 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7959 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7960 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7961 possible such range. The resulting range is not canonicalized. */
7963 static void
7964 union_ranges (enum value_range_type *vr0type,
7965 tree *vr0min, tree *vr0max,
7966 enum value_range_type vr1type,
7967 tree vr1min, tree vr1max)
7969 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7970 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7972 /* [] is vr0, () is vr1 in the following classification comments. */
7973 if (mineq && maxeq)
7975 /* [( )] */
7976 if (*vr0type == vr1type)
7977 /* Nothing to do for equal ranges. */
7979 else if ((*vr0type == VR_RANGE
7980 && vr1type == VR_ANTI_RANGE)
7981 || (*vr0type == VR_ANTI_RANGE
7982 && vr1type == VR_RANGE))
7984 /* For anti-range with range union the result is varying. */
7985 goto give_up;
7987 else
7988 gcc_unreachable ();
7990 else if (operand_less_p (*vr0max, vr1min) == 1
7991 || operand_less_p (vr1max, *vr0min) == 1)
7993 /* [ ] ( ) or ( ) [ ]
7994 If the ranges have an empty intersection, result of the union
7995 operation is the anti-range or if both are anti-ranges
7996 it covers all. */
7997 if (*vr0type == VR_ANTI_RANGE
7998 && vr1type == VR_ANTI_RANGE)
7999 goto give_up;
8000 else if (*vr0type == VR_ANTI_RANGE
8001 && vr1type == VR_RANGE)
8003 else if (*vr0type == VR_RANGE
8004 && vr1type == VR_ANTI_RANGE)
8006 *vr0type = vr1type;
8007 *vr0min = vr1min;
8008 *vr0max = vr1max;
8010 else if (*vr0type == VR_RANGE
8011 && vr1type == VR_RANGE)
8013 /* The result is the convex hull of both ranges. */
8014 if (operand_less_p (*vr0max, vr1min) == 1)
8016 /* If the result can be an anti-range, create one. */
8017 if (TREE_CODE (*vr0max) == INTEGER_CST
8018 && TREE_CODE (vr1min) == INTEGER_CST
8019 && vrp_val_is_min (*vr0min)
8020 && vrp_val_is_max (vr1max))
8022 tree min = int_const_binop (PLUS_EXPR,
8023 *vr0max,
8024 build_int_cst (TREE_TYPE (*vr0max), 1));
8025 tree max = int_const_binop (MINUS_EXPR,
8026 vr1min,
8027 build_int_cst (TREE_TYPE (vr1min), 1));
8028 if (!operand_less_p (max, min))
8030 *vr0type = VR_ANTI_RANGE;
8031 *vr0min = min;
8032 *vr0max = max;
8034 else
8035 *vr0max = vr1max;
8037 else
8038 *vr0max = vr1max;
8040 else
8042 /* If the result can be an anti-range, create one. */
8043 if (TREE_CODE (vr1max) == INTEGER_CST
8044 && TREE_CODE (*vr0min) == INTEGER_CST
8045 && vrp_val_is_min (vr1min)
8046 && vrp_val_is_max (*vr0max))
8048 tree min = int_const_binop (PLUS_EXPR,
8049 vr1max,
8050 build_int_cst (TREE_TYPE (vr1max), 1));
8051 tree max = int_const_binop (MINUS_EXPR,
8052 *vr0min,
8053 build_int_cst (TREE_TYPE (*vr0min), 1));
8054 if (!operand_less_p (max, min))
8056 *vr0type = VR_ANTI_RANGE;
8057 *vr0min = min;
8058 *vr0max = max;
8060 else
8061 *vr0min = vr1min;
8063 else
8064 *vr0min = vr1min;
8067 else
8068 gcc_unreachable ();
8070 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8071 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8073 /* [ ( ) ] or [( ) ] or [ ( )] */
8074 if (*vr0type == VR_RANGE
8075 && vr1type == VR_RANGE)
8077 else if (*vr0type == VR_ANTI_RANGE
8078 && vr1type == VR_ANTI_RANGE)
8080 *vr0type = vr1type;
8081 *vr0min = vr1min;
8082 *vr0max = vr1max;
8084 else if (*vr0type == VR_ANTI_RANGE
8085 && vr1type == VR_RANGE)
8087 /* Arbitrarily choose the right or left gap. */
8088 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8089 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8090 build_int_cst (TREE_TYPE (vr1min), 1));
8091 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8092 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8093 build_int_cst (TREE_TYPE (vr1max), 1));
8094 else
8095 goto give_up;
8097 else if (*vr0type == VR_RANGE
8098 && vr1type == VR_ANTI_RANGE)
8099 /* The result covers everything. */
8100 goto give_up;
8101 else
8102 gcc_unreachable ();
8104 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8105 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8107 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8108 if (*vr0type == VR_RANGE
8109 && vr1type == VR_RANGE)
8111 *vr0type = vr1type;
8112 *vr0min = vr1min;
8113 *vr0max = vr1max;
8115 else if (*vr0type == VR_ANTI_RANGE
8116 && vr1type == VR_ANTI_RANGE)
8118 else if (*vr0type == VR_RANGE
8119 && vr1type == VR_ANTI_RANGE)
8121 *vr0type = VR_ANTI_RANGE;
8122 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8124 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8125 build_int_cst (TREE_TYPE (*vr0min), 1));
8126 *vr0min = vr1min;
8128 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8130 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8131 build_int_cst (TREE_TYPE (*vr0max), 1));
8132 *vr0max = vr1max;
8134 else
8135 goto give_up;
8137 else if (*vr0type == VR_ANTI_RANGE
8138 && vr1type == VR_RANGE)
8139 /* The result covers everything. */
8140 goto give_up;
8141 else
8142 gcc_unreachable ();
8144 else if ((operand_less_p (vr1min, *vr0max) == 1
8145 || operand_equal_p (vr1min, *vr0max, 0))
8146 && operand_less_p (*vr0min, vr1min) == 1
8147 && operand_less_p (*vr0max, vr1max) == 1)
8149 /* [ ( ] ) or [ ]( ) */
8150 if (*vr0type == VR_RANGE
8151 && vr1type == VR_RANGE)
8152 *vr0max = vr1max;
8153 else if (*vr0type == VR_ANTI_RANGE
8154 && vr1type == VR_ANTI_RANGE)
8155 *vr0min = vr1min;
8156 else if (*vr0type == VR_ANTI_RANGE
8157 && vr1type == VR_RANGE)
8159 if (TREE_CODE (vr1min) == INTEGER_CST)
8160 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8161 build_int_cst (TREE_TYPE (vr1min), 1));
8162 else
8163 goto give_up;
8165 else if (*vr0type == VR_RANGE
8166 && vr1type == VR_ANTI_RANGE)
8168 if (TREE_CODE (*vr0max) == INTEGER_CST)
8170 *vr0type = vr1type;
8171 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8172 build_int_cst (TREE_TYPE (*vr0max), 1));
8173 *vr0max = vr1max;
8175 else
8176 goto give_up;
8178 else
8179 gcc_unreachable ();
8181 else if ((operand_less_p (*vr0min, vr1max) == 1
8182 || operand_equal_p (*vr0min, vr1max, 0))
8183 && operand_less_p (vr1min, *vr0min) == 1
8184 && operand_less_p (vr1max, *vr0max) == 1)
8186 /* ( [ ) ] or ( )[ ] */
8187 if (*vr0type == VR_RANGE
8188 && vr1type == VR_RANGE)
8189 *vr0min = vr1min;
8190 else if (*vr0type == VR_ANTI_RANGE
8191 && vr1type == VR_ANTI_RANGE)
8192 *vr0max = vr1max;
8193 else if (*vr0type == VR_ANTI_RANGE
8194 && vr1type == VR_RANGE)
8196 if (TREE_CODE (vr1max) == INTEGER_CST)
8197 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8198 build_int_cst (TREE_TYPE (vr1max), 1));
8199 else
8200 goto give_up;
8202 else if (*vr0type == VR_RANGE
8203 && vr1type == VR_ANTI_RANGE)
8205 if (TREE_CODE (*vr0min) == INTEGER_CST)
8207 *vr0type = vr1type;
8208 *vr0min = vr1min;
8209 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8210 build_int_cst (TREE_TYPE (*vr0min), 1));
8212 else
8213 goto give_up;
8215 else
8216 gcc_unreachable ();
8218 else
8219 goto give_up;
8221 return;
8223 give_up:
8224 *vr0type = VR_VARYING;
8225 *vr0min = NULL_TREE;
8226 *vr0max = NULL_TREE;
8229 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8230 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8231 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8232 possible such range. The resulting range is not canonicalized. */
8234 static void
8235 intersect_ranges (enum value_range_type *vr0type,
8236 tree *vr0min, tree *vr0max,
8237 enum value_range_type vr1type,
8238 tree vr1min, tree vr1max)
8240 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
8241 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
8243 /* [] is vr0, () is vr1 in the following classification comments. */
8244 if (mineq && maxeq)
8246 /* [( )] */
8247 if (*vr0type == vr1type)
8248 /* Nothing to do for equal ranges. */
8250 else if ((*vr0type == VR_RANGE
8251 && vr1type == VR_ANTI_RANGE)
8252 || (*vr0type == VR_ANTI_RANGE
8253 && vr1type == VR_RANGE))
8255 /* For anti-range with range intersection the result is empty. */
8256 *vr0type = VR_UNDEFINED;
8257 *vr0min = NULL_TREE;
8258 *vr0max = NULL_TREE;
8260 else
8261 gcc_unreachable ();
8263 else if (operand_less_p (*vr0max, vr1min) == 1
8264 || operand_less_p (vr1max, *vr0min) == 1)
8266 /* [ ] ( ) or ( ) [ ]
8267 If the ranges have an empty intersection, the result of the
8268 intersect operation is the range for intersecting an
8269 anti-range with a range or empty when intersecting two ranges. */
8270 if (*vr0type == VR_RANGE
8271 && vr1type == VR_ANTI_RANGE)
8273 else if (*vr0type == VR_ANTI_RANGE
8274 && vr1type == VR_RANGE)
8276 *vr0type = vr1type;
8277 *vr0min = vr1min;
8278 *vr0max = vr1max;
8280 else if (*vr0type == VR_RANGE
8281 && vr1type == VR_RANGE)
8283 *vr0type = VR_UNDEFINED;
8284 *vr0min = NULL_TREE;
8285 *vr0max = NULL_TREE;
8287 else if (*vr0type == VR_ANTI_RANGE
8288 && vr1type == VR_ANTI_RANGE)
8290 /* If the anti-ranges are adjacent to each other merge them. */
8291 if (TREE_CODE (*vr0max) == INTEGER_CST
8292 && TREE_CODE (vr1min) == INTEGER_CST
8293 && operand_less_p (*vr0max, vr1min) == 1
8294 && integer_onep (int_const_binop (MINUS_EXPR,
8295 vr1min, *vr0max)))
8296 *vr0max = vr1max;
8297 else if (TREE_CODE (vr1max) == INTEGER_CST
8298 && TREE_CODE (*vr0min) == INTEGER_CST
8299 && operand_less_p (vr1max, *vr0min) == 1
8300 && integer_onep (int_const_binop (MINUS_EXPR,
8301 *vr0min, vr1max)))
8302 *vr0min = vr1min;
8303 /* Else arbitrarily take VR0. */
8306 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8307 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8309 /* [ ( ) ] or [( ) ] or [ ( )] */
8310 if (*vr0type == VR_RANGE
8311 && vr1type == VR_RANGE)
8313 /* If both are ranges the result is the inner one. */
8314 *vr0type = vr1type;
8315 *vr0min = vr1min;
8316 *vr0max = vr1max;
8318 else if (*vr0type == VR_RANGE
8319 && vr1type == VR_ANTI_RANGE)
8321 /* Choose the right gap if the left one is empty. */
8322 if (mineq)
8324 if (TREE_CODE (vr1max) == INTEGER_CST)
8325 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8326 build_int_cst (TREE_TYPE (vr1max), 1));
8327 else
8328 *vr0min = vr1max;
8330 /* Choose the left gap if the right one is empty. */
8331 else if (maxeq)
8333 if (TREE_CODE (vr1min) == INTEGER_CST)
8334 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8335 build_int_cst (TREE_TYPE (vr1min), 1));
8336 else
8337 *vr0max = vr1min;
8339 /* Choose the anti-range if the range is effectively varying. */
8340 else if (vrp_val_is_min (*vr0min)
8341 && vrp_val_is_max (*vr0max))
8343 *vr0type = vr1type;
8344 *vr0min = vr1min;
8345 *vr0max = vr1max;
8347 /* Else choose the range. */
8349 else if (*vr0type == VR_ANTI_RANGE
8350 && vr1type == VR_ANTI_RANGE)
8351 /* If both are anti-ranges the result is the outer one. */
8353 else if (*vr0type == VR_ANTI_RANGE
8354 && vr1type == VR_RANGE)
8356 /* The intersection is empty. */
8357 *vr0type = VR_UNDEFINED;
8358 *vr0min = NULL_TREE;
8359 *vr0max = NULL_TREE;
8361 else
8362 gcc_unreachable ();
8364 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8365 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8367 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8368 if (*vr0type == VR_RANGE
8369 && vr1type == VR_RANGE)
8370 /* Choose the inner range. */
8372 else if (*vr0type == VR_ANTI_RANGE
8373 && vr1type == VR_RANGE)
8375 /* Choose the right gap if the left is empty. */
8376 if (mineq)
8378 *vr0type = VR_RANGE;
8379 if (TREE_CODE (*vr0max) == INTEGER_CST)
8380 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8381 build_int_cst (TREE_TYPE (*vr0max), 1));
8382 else
8383 *vr0min = *vr0max;
8384 *vr0max = vr1max;
8386 /* Choose the left gap if the right is empty. */
8387 else if (maxeq)
8389 *vr0type = VR_RANGE;
8390 if (TREE_CODE (*vr0min) == INTEGER_CST)
8391 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8392 build_int_cst (TREE_TYPE (*vr0min), 1));
8393 else
8394 *vr0max = *vr0min;
8395 *vr0min = vr1min;
8397 /* Choose the anti-range if the range is effectively varying. */
8398 else if (vrp_val_is_min (vr1min)
8399 && vrp_val_is_max (vr1max))
8401 /* Else choose the range. */
8402 else
8404 *vr0type = vr1type;
8405 *vr0min = vr1min;
8406 *vr0max = vr1max;
8409 else if (*vr0type == VR_ANTI_RANGE
8410 && vr1type == VR_ANTI_RANGE)
8412 /* If both are anti-ranges the result is the outer one. */
8413 *vr0type = vr1type;
8414 *vr0min = vr1min;
8415 *vr0max = vr1max;
8417 else if (vr1type == VR_ANTI_RANGE
8418 && *vr0type == VR_RANGE)
8420 /* The intersection is empty. */
8421 *vr0type = VR_UNDEFINED;
8422 *vr0min = NULL_TREE;
8423 *vr0max = NULL_TREE;
8425 else
8426 gcc_unreachable ();
8428 else if ((operand_less_p (vr1min, *vr0max) == 1
8429 || operand_equal_p (vr1min, *vr0max, 0))
8430 && operand_less_p (*vr0min, vr1min) == 1)
8432 /* [ ( ] ) or [ ]( ) */
8433 if (*vr0type == VR_ANTI_RANGE
8434 && vr1type == VR_ANTI_RANGE)
8435 *vr0max = vr1max;
8436 else if (*vr0type == VR_RANGE
8437 && vr1type == VR_RANGE)
8438 *vr0min = vr1min;
8439 else if (*vr0type == VR_RANGE
8440 && vr1type == VR_ANTI_RANGE)
8442 if (TREE_CODE (vr1min) == INTEGER_CST)
8443 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8444 build_int_cst (TREE_TYPE (vr1min), 1));
8445 else
8446 *vr0max = vr1min;
8448 else if (*vr0type == VR_ANTI_RANGE
8449 && vr1type == VR_RANGE)
8451 *vr0type = VR_RANGE;
8452 if (TREE_CODE (*vr0max) == INTEGER_CST)
8453 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8454 build_int_cst (TREE_TYPE (*vr0max), 1));
8455 else
8456 *vr0min = *vr0max;
8457 *vr0max = vr1max;
8459 else
8460 gcc_unreachable ();
8462 else if ((operand_less_p (*vr0min, vr1max) == 1
8463 || operand_equal_p (*vr0min, vr1max, 0))
8464 && operand_less_p (vr1min, *vr0min) == 1)
8466 /* ( [ ) ] or ( )[ ] */
8467 if (*vr0type == VR_ANTI_RANGE
8468 && vr1type == VR_ANTI_RANGE)
8469 *vr0min = vr1min;
8470 else if (*vr0type == VR_RANGE
8471 && vr1type == VR_RANGE)
8472 *vr0max = vr1max;
8473 else if (*vr0type == VR_RANGE
8474 && vr1type == VR_ANTI_RANGE)
8476 if (TREE_CODE (vr1max) == INTEGER_CST)
8477 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8478 build_int_cst (TREE_TYPE (vr1max), 1));
8479 else
8480 *vr0min = vr1max;
8482 else if (*vr0type == VR_ANTI_RANGE
8483 && vr1type == VR_RANGE)
8485 *vr0type = VR_RANGE;
8486 if (TREE_CODE (*vr0min) == INTEGER_CST)
8487 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8488 build_int_cst (TREE_TYPE (*vr0min), 1));
8489 else
8490 *vr0max = *vr0min;
8491 *vr0min = vr1min;
8493 else
8494 gcc_unreachable ();
8497 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8498 result for the intersection. That's always a conservative
8499 correct estimate. */
8501 return;
8505 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8506 in *VR0. This may not be the smallest possible such range. */
8508 static void
8509 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8511 value_range saved;
8513 /* If either range is VR_VARYING the other one wins. */
8514 if (vr1->type == VR_VARYING)
8515 return;
8516 if (vr0->type == VR_VARYING)
8518 copy_value_range (vr0, vr1);
8519 return;
8522 /* When either range is VR_UNDEFINED the resulting range is
8523 VR_UNDEFINED, too. */
8524 if (vr0->type == VR_UNDEFINED)
8525 return;
8526 if (vr1->type == VR_UNDEFINED)
8528 set_value_range_to_undefined (vr0);
8529 return;
8532 /* Save the original vr0 so we can return it as conservative intersection
8533 result when our worker turns things to varying. */
8534 saved = *vr0;
8535 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8536 vr1->type, vr1->min, vr1->max);
8537 /* Make sure to canonicalize the result though as the inversion of a
8538 VR_RANGE can still be a VR_RANGE. */
8539 set_and_canonicalize_value_range (vr0, vr0->type,
8540 vr0->min, vr0->max, vr0->equiv);
8541 /* If that failed, use the saved original VR0. */
8542 if (vr0->type == VR_VARYING)
8544 *vr0 = saved;
8545 return;
8547 /* If the result is VR_UNDEFINED there is no need to mess with
8548 the equivalencies. */
8549 if (vr0->type == VR_UNDEFINED)
8550 return;
8552 /* The resulting set of equivalences for range intersection is the union of
8553 the two sets. */
8554 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8555 bitmap_ior_into (vr0->equiv, vr1->equiv);
8556 else if (vr1->equiv && !vr0->equiv)
8557 bitmap_copy (vr0->equiv, vr1->equiv);
8560 static void
8561 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
8563 if (dump_file && (dump_flags & TDF_DETAILS))
8565 fprintf (dump_file, "Intersecting\n ");
8566 dump_value_range (dump_file, vr0);
8567 fprintf (dump_file, "\nand\n ");
8568 dump_value_range (dump_file, vr1);
8569 fprintf (dump_file, "\n");
8571 vrp_intersect_ranges_1 (vr0, vr1);
8572 if (dump_file && (dump_flags & TDF_DETAILS))
8574 fprintf (dump_file, "to\n ");
8575 dump_value_range (dump_file, vr0);
8576 fprintf (dump_file, "\n");
8580 /* Meet operation for value ranges. Given two value ranges VR0 and
8581 VR1, store in VR0 a range that contains both VR0 and VR1. This
8582 may not be the smallest possible such range. */
8584 static void
8585 vrp_meet_1 (value_range *vr0, value_range *vr1)
8587 value_range saved;
8589 if (vr0->type == VR_UNDEFINED)
8591 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8592 return;
8595 if (vr1->type == VR_UNDEFINED)
8597 /* VR0 already has the resulting range. */
8598 return;
8601 if (vr0->type == VR_VARYING)
8603 /* Nothing to do. VR0 already has the resulting range. */
8604 return;
8607 if (vr1->type == VR_VARYING)
8609 set_value_range_to_varying (vr0);
8610 return;
8613 saved = *vr0;
8614 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8615 vr1->type, vr1->min, vr1->max);
8616 if (vr0->type == VR_VARYING)
8618 /* Failed to find an efficient meet. Before giving up and setting
8619 the result to VARYING, see if we can at least derive a useful
8620 anti-range. FIXME, all this nonsense about distinguishing
8621 anti-ranges from ranges is necessary because of the odd
8622 semantics of range_includes_zero_p and friends. */
8623 if (((saved.type == VR_RANGE
8624 && range_includes_zero_p (saved.min, saved.max) == 0)
8625 || (saved.type == VR_ANTI_RANGE
8626 && range_includes_zero_p (saved.min, saved.max) == 1))
8627 && ((vr1->type == VR_RANGE
8628 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8629 || (vr1->type == VR_ANTI_RANGE
8630 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8632 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8634 /* Since this meet operation did not result from the meeting of
8635 two equivalent names, VR0 cannot have any equivalences. */
8636 if (vr0->equiv)
8637 bitmap_clear (vr0->equiv);
8638 return;
8641 set_value_range_to_varying (vr0);
8642 return;
8644 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8645 vr0->equiv);
8646 if (vr0->type == VR_VARYING)
8647 return;
8649 /* The resulting set of equivalences is always the intersection of
8650 the two sets. */
8651 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8652 bitmap_and_into (vr0->equiv, vr1->equiv);
8653 else if (vr0->equiv && !vr1->equiv)
8654 bitmap_clear (vr0->equiv);
8657 static void
8658 vrp_meet (value_range *vr0, value_range *vr1)
8660 if (dump_file && (dump_flags & TDF_DETAILS))
8662 fprintf (dump_file, "Meeting\n ");
8663 dump_value_range (dump_file, vr0);
8664 fprintf (dump_file, "\nand\n ");
8665 dump_value_range (dump_file, vr1);
8666 fprintf (dump_file, "\n");
8668 vrp_meet_1 (vr0, vr1);
8669 if (dump_file && (dump_flags & TDF_DETAILS))
8671 fprintf (dump_file, "to\n ");
8672 dump_value_range (dump_file, vr0);
8673 fprintf (dump_file, "\n");
8678 /* Visit all arguments for PHI node PHI that flow through executable
8679 edges. If a valid value range can be derived from all the incoming
8680 value ranges, set a new range for the LHS of PHI. */
8682 static enum ssa_prop_result
8683 vrp_visit_phi_node (gphi *phi)
8685 size_t i;
8686 tree lhs = PHI_RESULT (phi);
8687 value_range *lhs_vr = get_value_range (lhs);
8688 value_range vr_result = VR_INITIALIZER;
8689 bool first = true;
8690 int edges, old_edges;
8691 struct loop *l;
8693 if (dump_file && (dump_flags & TDF_DETAILS))
8695 fprintf (dump_file, "\nVisiting PHI node: ");
8696 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8699 edges = 0;
8700 for (i = 0; i < gimple_phi_num_args (phi); i++)
8702 edge e = gimple_phi_arg_edge (phi, i);
8704 if (dump_file && (dump_flags & TDF_DETAILS))
8706 fprintf (dump_file,
8707 " Argument #%d (%d -> %d %sexecutable)\n",
8708 (int) i, e->src->index, e->dest->index,
8709 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8712 if (e->flags & EDGE_EXECUTABLE)
8714 tree arg = PHI_ARG_DEF (phi, i);
8715 value_range vr_arg;
8717 ++edges;
8719 if (TREE_CODE (arg) == SSA_NAME)
8721 vr_arg = *(get_value_range (arg));
8722 /* Do not allow equivalences or symbolic ranges to leak in from
8723 backedges. That creates invalid equivalencies.
8724 See PR53465 and PR54767. */
8725 if (e->flags & EDGE_DFS_BACK)
8727 if (vr_arg.type == VR_RANGE
8728 || vr_arg.type == VR_ANTI_RANGE)
8730 vr_arg.equiv = NULL;
8731 if (symbolic_range_p (&vr_arg))
8733 vr_arg.type = VR_VARYING;
8734 vr_arg.min = NULL_TREE;
8735 vr_arg.max = NULL_TREE;
8739 else
8741 /* If the non-backedge arguments range is VR_VARYING then
8742 we can still try recording a simple equivalence. */
8743 if (vr_arg.type == VR_VARYING)
8745 vr_arg.type = VR_RANGE;
8746 vr_arg.min = arg;
8747 vr_arg.max = arg;
8748 vr_arg.equiv = NULL;
8752 else
8754 if (TREE_OVERFLOW_P (arg))
8755 arg = drop_tree_overflow (arg);
8757 vr_arg.type = VR_RANGE;
8758 vr_arg.min = arg;
8759 vr_arg.max = arg;
8760 vr_arg.equiv = NULL;
8763 if (dump_file && (dump_flags & TDF_DETAILS))
8765 fprintf (dump_file, "\t");
8766 print_generic_expr (dump_file, arg, dump_flags);
8767 fprintf (dump_file, ": ");
8768 dump_value_range (dump_file, &vr_arg);
8769 fprintf (dump_file, "\n");
8772 if (first)
8773 copy_value_range (&vr_result, &vr_arg);
8774 else
8775 vrp_meet (&vr_result, &vr_arg);
8776 first = false;
8778 if (vr_result.type == VR_VARYING)
8779 break;
8783 if (vr_result.type == VR_VARYING)
8784 goto varying;
8785 else if (vr_result.type == VR_UNDEFINED)
8786 goto update_range;
8788 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8789 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8791 /* To prevent infinite iterations in the algorithm, derive ranges
8792 when the new value is slightly bigger or smaller than the
8793 previous one. We don't do this if we have seen a new executable
8794 edge; this helps us avoid an overflow infinity for conditionals
8795 which are not in a loop. If the old value-range was VR_UNDEFINED
8796 use the updated range and iterate one more time. */
8797 if (edges > 0
8798 && gimple_phi_num_args (phi) > 1
8799 && edges == old_edges
8800 && lhs_vr->type != VR_UNDEFINED)
8802 /* Compare old and new ranges, fall back to varying if the
8803 values are not comparable. */
8804 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8805 if (cmp_min == -2)
8806 goto varying;
8807 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8808 if (cmp_max == -2)
8809 goto varying;
8811 /* For non VR_RANGE or for pointers fall back to varying if
8812 the range changed. */
8813 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8814 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8815 && (cmp_min != 0 || cmp_max != 0))
8816 goto varying;
8818 /* If the new minimum is larger than the previous one
8819 retain the old value. If the new minimum value is smaller
8820 than the previous one and not -INF go all the way to -INF + 1.
8821 In the first case, to avoid infinite bouncing between different
8822 minimums, and in the other case to avoid iterating millions of
8823 times to reach -INF. Going to -INF + 1 also lets the following
8824 iteration compute whether there will be any overflow, at the
8825 expense of one additional iteration. */
8826 if (cmp_min < 0)
8827 vr_result.min = lhs_vr->min;
8828 else if (cmp_min > 0
8829 && !vrp_val_is_min (vr_result.min))
8830 vr_result.min
8831 = int_const_binop (PLUS_EXPR,
8832 vrp_val_min (TREE_TYPE (vr_result.min)),
8833 build_int_cst (TREE_TYPE (vr_result.min), 1));
8835 /* Similarly for the maximum value. */
8836 if (cmp_max > 0)
8837 vr_result.max = lhs_vr->max;
8838 else if (cmp_max < 0
8839 && !vrp_val_is_max (vr_result.max))
8840 vr_result.max
8841 = int_const_binop (MINUS_EXPR,
8842 vrp_val_max (TREE_TYPE (vr_result.min)),
8843 build_int_cst (TREE_TYPE (vr_result.min), 1));
8845 /* If we dropped either bound to +-INF then if this is a loop
8846 PHI node SCEV may known more about its value-range. */
8847 if (cmp_min > 0 || cmp_min < 0
8848 || cmp_max < 0 || cmp_max > 0)
8849 goto scev_check;
8851 goto infinite_check;
8854 /* If the new range is different than the previous value, keep
8855 iterating. */
8856 update_range:
8857 if (update_value_range (lhs, &vr_result))
8859 if (dump_file && (dump_flags & TDF_DETAILS))
8861 fprintf (dump_file, "Found new range for ");
8862 print_generic_expr (dump_file, lhs, 0);
8863 fprintf (dump_file, ": ");
8864 dump_value_range (dump_file, &vr_result);
8865 fprintf (dump_file, "\n");
8868 if (vr_result.type == VR_VARYING)
8869 return SSA_PROP_VARYING;
8871 return SSA_PROP_INTERESTING;
8874 /* Nothing changed, don't add outgoing edges. */
8875 return SSA_PROP_NOT_INTERESTING;
8877 varying:
8878 set_value_range_to_varying (&vr_result);
8880 scev_check:
8881 /* If this is a loop PHI node SCEV may known more about its value-range.
8882 scev_check can be reached from two paths, one is a fall through from above
8883 "varying" label, the other is direct goto from code block which tries to
8884 avoid infinite simulation. */
8885 if ((l = loop_containing_stmt (phi))
8886 && l->header == gimple_bb (phi))
8887 adjust_range_with_scev (&vr_result, l, phi, lhs);
8889 infinite_check:
8890 /* If we will end up with a (-INF, +INF) range, set it to
8891 VARYING. Same if the previous max value was invalid for
8892 the type and we end up with vr_result.min > vr_result.max. */
8893 if ((vr_result.type == VR_RANGE || vr_result.type == VR_ANTI_RANGE)
8894 && !((vrp_val_is_max (vr_result.max) && vrp_val_is_min (vr_result.min))
8895 || compare_values (vr_result.min, vr_result.max) > 0))
8896 goto update_range;
8898 /* No match found. Set the LHS to VARYING. */
8899 set_value_range_to_varying (lhs_vr);
8900 return SSA_PROP_VARYING;
8903 /* Simplify boolean operations if the source is known
8904 to be already a boolean. */
8905 static bool
8906 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8908 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8909 tree lhs, op0, op1;
8910 bool need_conversion;
8912 /* We handle only !=/== case here. */
8913 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8915 op0 = gimple_assign_rhs1 (stmt);
8916 if (!op_with_boolean_value_range_p (op0))
8917 return false;
8919 op1 = gimple_assign_rhs2 (stmt);
8920 if (!op_with_boolean_value_range_p (op1))
8921 return false;
8923 /* Reduce number of cases to handle to NE_EXPR. As there is no
8924 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8925 if (rhs_code == EQ_EXPR)
8927 if (TREE_CODE (op1) == INTEGER_CST)
8928 op1 = int_const_binop (BIT_XOR_EXPR, op1,
8929 build_int_cst (TREE_TYPE (op1), 1));
8930 else
8931 return false;
8934 lhs = gimple_assign_lhs (stmt);
8935 need_conversion
8936 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8938 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8939 if (need_conversion
8940 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8941 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8942 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8943 return false;
8945 /* For A != 0 we can substitute A itself. */
8946 if (integer_zerop (op1))
8947 gimple_assign_set_rhs_with_ops (gsi,
8948 need_conversion
8949 ? NOP_EXPR : TREE_CODE (op0), op0);
8950 /* For A != B we substitute A ^ B. Either with conversion. */
8951 else if (need_conversion)
8953 tree tem = make_ssa_name (TREE_TYPE (op0));
8954 gassign *newop
8955 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
8956 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8957 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
8958 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
8959 set_range_info (tem, VR_RANGE,
8960 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
8961 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
8962 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
8964 /* Or without. */
8965 else
8966 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8967 update_stmt (gsi_stmt (*gsi));
8969 return true;
8972 /* Simplify a division or modulo operator to a right shift or
8973 bitwise and if the first operand is unsigned or is greater
8974 than zero and the second operand is an exact power of two.
8975 For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it
8976 into just op0 if op0's range is known to be a subset of
8977 [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned
8978 modulo. */
8980 static bool
8981 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8983 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8984 tree val = NULL;
8985 tree op0 = gimple_assign_rhs1 (stmt);
8986 tree op1 = gimple_assign_rhs2 (stmt);
8987 value_range *vr = get_value_range (op0);
8989 if (rhs_code == TRUNC_MOD_EXPR
8990 && TREE_CODE (op1) == INTEGER_CST
8991 && tree_int_cst_sgn (op1) == 1
8992 && range_int_cst_p (vr)
8993 && tree_int_cst_lt (vr->max, op1))
8995 if (TYPE_UNSIGNED (TREE_TYPE (op0))
8996 || tree_int_cst_sgn (vr->min) >= 0
8997 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1),
8998 vr->min))
9000 /* If op0 already has the range op0 % op1 has,
9001 then TRUNC_MOD_EXPR won't change anything. */
9002 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9003 gimple_assign_set_rhs_from_tree (&gsi, op0);
9004 update_stmt (stmt);
9005 return true;
9009 if (!integer_pow2p (op1))
9011 /* X % -Y can be only optimized into X % Y either if
9012 X is not INT_MIN, or Y is not -1. Fold it now, as after
9013 remove_range_assertions the range info might be not available
9014 anymore. */
9015 if (rhs_code == TRUNC_MOD_EXPR
9016 && fold_stmt (gsi, follow_single_use_edges))
9017 return true;
9018 return false;
9021 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9022 val = integer_one_node;
9023 else
9025 bool sop = false;
9027 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9029 if (val
9030 && sop
9031 && integer_onep (val)
9032 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9034 location_t location;
9036 if (!gimple_has_location (stmt))
9037 location = input_location;
9038 else
9039 location = gimple_location (stmt);
9040 warning_at (location, OPT_Wstrict_overflow,
9041 "assuming signed overflow does not occur when "
9042 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9046 if (val && integer_onep (val))
9048 tree t;
9050 if (rhs_code == TRUNC_DIV_EXPR)
9052 t = build_int_cst (integer_type_node, tree_log2 (op1));
9053 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9054 gimple_assign_set_rhs1 (stmt, op0);
9055 gimple_assign_set_rhs2 (stmt, t);
9057 else
9059 t = build_int_cst (TREE_TYPE (op1), 1);
9060 t = int_const_binop (MINUS_EXPR, op1, t);
9061 t = fold_convert (TREE_TYPE (op0), t);
9063 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9064 gimple_assign_set_rhs1 (stmt, op0);
9065 gimple_assign_set_rhs2 (stmt, t);
9068 update_stmt (stmt);
9069 return true;
9072 return false;
9075 /* Simplify a min or max if the ranges of the two operands are
9076 disjoint. Return true if we do simplify. */
9078 static bool
9079 simplify_min_or_max_using_ranges (gimple *stmt)
9081 tree op0 = gimple_assign_rhs1 (stmt);
9082 tree op1 = gimple_assign_rhs2 (stmt);
9083 bool sop = false;
9084 tree val;
9086 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9087 (LE_EXPR, op0, op1, &sop));
9088 if (!val)
9090 sop = false;
9091 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9092 (LT_EXPR, op0, op1, &sop));
9095 if (val)
9097 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9099 location_t location;
9101 if (!gimple_has_location (stmt))
9102 location = input_location;
9103 else
9104 location = gimple_location (stmt);
9105 warning_at (location, OPT_Wstrict_overflow,
9106 "assuming signed overflow does not occur when "
9107 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9110 /* VAL == TRUE -> OP0 < or <= op1
9111 VAL == FALSE -> OP0 > or >= op1. */
9112 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9113 == integer_zerop (val)) ? op0 : op1;
9114 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9115 gimple_assign_set_rhs_from_tree (&gsi, res);
9116 update_stmt (stmt);
9117 return true;
9120 return false;
9123 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9124 ABS_EXPR. If the operand is <= 0, then simplify the
9125 ABS_EXPR into a NEGATE_EXPR. */
9127 static bool
9128 simplify_abs_using_ranges (gimple *stmt)
9130 tree op = gimple_assign_rhs1 (stmt);
9131 value_range *vr = get_value_range (op);
9133 if (vr)
9135 tree val = NULL;
9136 bool sop = false;
9138 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9139 if (!val)
9141 /* The range is neither <= 0 nor > 0. Now see if it is
9142 either < 0 or >= 0. */
9143 sop = false;
9144 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9145 &sop);
9148 if (val)
9150 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9152 location_t location;
9154 if (!gimple_has_location (stmt))
9155 location = input_location;
9156 else
9157 location = gimple_location (stmt);
9158 warning_at (location, OPT_Wstrict_overflow,
9159 "assuming signed overflow does not occur when "
9160 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9163 gimple_assign_set_rhs1 (stmt, op);
9164 if (integer_zerop (val))
9165 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9166 else
9167 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9168 update_stmt (stmt);
9169 return true;
9173 return false;
9176 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9177 If all the bits that are being cleared by & are already
9178 known to be zero from VR, or all the bits that are being
9179 set by | are already known to be one from VR, the bit
9180 operation is redundant. */
9182 static bool
9183 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9185 tree op0 = gimple_assign_rhs1 (stmt);
9186 tree op1 = gimple_assign_rhs2 (stmt);
9187 tree op = NULL_TREE;
9188 value_range vr0 = VR_INITIALIZER;
9189 value_range vr1 = VR_INITIALIZER;
9190 wide_int may_be_nonzero0, may_be_nonzero1;
9191 wide_int must_be_nonzero0, must_be_nonzero1;
9192 wide_int mask;
9194 if (TREE_CODE (op0) == SSA_NAME)
9195 vr0 = *(get_value_range (op0));
9196 else if (is_gimple_min_invariant (op0))
9197 set_value_range_to_value (&vr0, op0, NULL);
9198 else
9199 return false;
9201 if (TREE_CODE (op1) == SSA_NAME)
9202 vr1 = *(get_value_range (op1));
9203 else if (is_gimple_min_invariant (op1))
9204 set_value_range_to_value (&vr1, op1, NULL);
9205 else
9206 return false;
9208 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9209 &must_be_nonzero0))
9210 return false;
9211 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9212 &must_be_nonzero1))
9213 return false;
9215 switch (gimple_assign_rhs_code (stmt))
9217 case BIT_AND_EXPR:
9218 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9219 if (mask == 0)
9221 op = op0;
9222 break;
9224 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9225 if (mask == 0)
9227 op = op1;
9228 break;
9230 break;
9231 case BIT_IOR_EXPR:
9232 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9233 if (mask == 0)
9235 op = op1;
9236 break;
9238 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9239 if (mask == 0)
9241 op = op0;
9242 break;
9244 break;
9245 default:
9246 gcc_unreachable ();
9249 if (op == NULL_TREE)
9250 return false;
9252 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9253 update_stmt (gsi_stmt (*gsi));
9254 return true;
9257 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9258 a known value range VR.
9260 If there is one and only one value which will satisfy the
9261 conditional, then return that value. Else return NULL.
9263 If signed overflow must be undefined for the value to satisfy
9264 the conditional, then set *STRICT_OVERFLOW_P to true. */
9266 static tree
9267 test_for_singularity (enum tree_code cond_code, tree op0,
9268 tree op1, value_range *vr,
9269 bool *strict_overflow_p)
9271 tree min = NULL;
9272 tree max = NULL;
9274 /* Extract minimum/maximum values which satisfy the conditional as it was
9275 written. */
9276 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9278 /* This should not be negative infinity; there is no overflow
9279 here. */
9280 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9282 max = op1;
9283 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9285 tree one = build_int_cst (TREE_TYPE (op0), 1);
9286 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9287 if (EXPR_P (max))
9288 TREE_NO_WARNING (max) = 1;
9291 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9293 /* This should not be positive infinity; there is no overflow
9294 here. */
9295 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9297 min = op1;
9298 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9300 tree one = build_int_cst (TREE_TYPE (op0), 1);
9301 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9302 if (EXPR_P (min))
9303 TREE_NO_WARNING (min) = 1;
9307 /* Now refine the minimum and maximum values using any
9308 value range information we have for op0. */
9309 if (min && max)
9311 if (compare_values (vr->min, min) == 1)
9312 min = vr->min;
9313 if (compare_values (vr->max, max) == -1)
9314 max = vr->max;
9316 /* If the new min/max values have converged to a single value,
9317 then there is only one value which can satisfy the condition,
9318 return that value. */
9319 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9321 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9322 && is_overflow_infinity (vr->max))
9323 *strict_overflow_p = true;
9324 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9325 && is_overflow_infinity (vr->min))
9326 *strict_overflow_p = true;
9328 return min;
9331 return NULL;
9334 /* Return whether the value range *VR fits in an integer type specified
9335 by PRECISION and UNSIGNED_P. */
9337 static bool
9338 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9340 tree src_type;
9341 unsigned src_precision;
9342 widest_int tem;
9343 signop src_sgn;
9345 /* We can only handle integral and pointer types. */
9346 src_type = TREE_TYPE (vr->min);
9347 if (!INTEGRAL_TYPE_P (src_type)
9348 && !POINTER_TYPE_P (src_type))
9349 return false;
9351 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9352 and so is an identity transform. */
9353 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9354 src_sgn = TYPE_SIGN (src_type);
9355 if ((src_precision < dest_precision
9356 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9357 || (src_precision == dest_precision && src_sgn == dest_sgn))
9358 return true;
9360 /* Now we can only handle ranges with constant bounds. */
9361 if (vr->type != VR_RANGE
9362 || TREE_CODE (vr->min) != INTEGER_CST
9363 || TREE_CODE (vr->max) != INTEGER_CST)
9364 return false;
9366 /* For sign changes, the MSB of the wide_int has to be clear.
9367 An unsigned value with its MSB set cannot be represented by
9368 a signed wide_int, while a negative value cannot be represented
9369 by an unsigned wide_int. */
9370 if (src_sgn != dest_sgn
9371 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9372 return false;
9374 /* Then we can perform the conversion on both ends and compare
9375 the result for equality. */
9376 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9377 if (tem != wi::to_widest (vr->min))
9378 return false;
9379 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9380 if (tem != wi::to_widest (vr->max))
9381 return false;
9383 return true;
9386 /* Simplify a conditional using a relational operator to an equality
9387 test if the range information indicates only one value can satisfy
9388 the original conditional. */
9390 static bool
9391 simplify_cond_using_ranges (gcond *stmt)
9393 tree op0 = gimple_cond_lhs (stmt);
9394 tree op1 = gimple_cond_rhs (stmt);
9395 enum tree_code cond_code = gimple_cond_code (stmt);
9397 if (cond_code != NE_EXPR
9398 && cond_code != EQ_EXPR
9399 && TREE_CODE (op0) == SSA_NAME
9400 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9401 && is_gimple_min_invariant (op1))
9403 value_range *vr = get_value_range (op0);
9405 /* If we have range information for OP0, then we might be
9406 able to simplify this conditional. */
9407 if (vr->type == VR_RANGE)
9409 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9410 bool sop = false;
9411 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9413 if (new_tree
9414 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9416 if (dump_file)
9418 fprintf (dump_file, "Simplified relational ");
9419 print_gimple_stmt (dump_file, stmt, 0, 0);
9420 fprintf (dump_file, " into ");
9423 gimple_cond_set_code (stmt, EQ_EXPR);
9424 gimple_cond_set_lhs (stmt, op0);
9425 gimple_cond_set_rhs (stmt, new_tree);
9427 update_stmt (stmt);
9429 if (dump_file)
9431 print_gimple_stmt (dump_file, stmt, 0, 0);
9432 fprintf (dump_file, "\n");
9435 if (sop && issue_strict_overflow_warning (wc))
9437 location_t location = input_location;
9438 if (gimple_has_location (stmt))
9439 location = gimple_location (stmt);
9441 warning_at (location, OPT_Wstrict_overflow,
9442 "assuming signed overflow does not occur when "
9443 "simplifying conditional");
9446 return true;
9449 /* Try again after inverting the condition. We only deal
9450 with integral types here, so no need to worry about
9451 issues with inverting FP comparisons. */
9452 sop = false;
9453 new_tree = test_for_singularity
9454 (invert_tree_comparison (cond_code, false),
9455 op0, op1, vr, &sop);
9457 if (new_tree
9458 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9460 if (dump_file)
9462 fprintf (dump_file, "Simplified relational ");
9463 print_gimple_stmt (dump_file, stmt, 0, 0);
9464 fprintf (dump_file, " into ");
9467 gimple_cond_set_code (stmt, NE_EXPR);
9468 gimple_cond_set_lhs (stmt, op0);
9469 gimple_cond_set_rhs (stmt, new_tree);
9471 update_stmt (stmt);
9473 if (dump_file)
9475 print_gimple_stmt (dump_file, stmt, 0, 0);
9476 fprintf (dump_file, "\n");
9479 if (sop && issue_strict_overflow_warning (wc))
9481 location_t location = input_location;
9482 if (gimple_has_location (stmt))
9483 location = gimple_location (stmt);
9485 warning_at (location, OPT_Wstrict_overflow,
9486 "assuming signed overflow does not occur when "
9487 "simplifying conditional");
9490 return true;
9495 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9496 see if OP0 was set by a type conversion where the source of
9497 the conversion is another SSA_NAME with a range that fits
9498 into the range of OP0's type.
9500 If so, the conversion is redundant as the earlier SSA_NAME can be
9501 used for the comparison directly if we just massage the constant in the
9502 comparison. */
9503 if (TREE_CODE (op0) == SSA_NAME
9504 && TREE_CODE (op1) == INTEGER_CST)
9506 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9507 tree innerop;
9509 if (!is_gimple_assign (def_stmt)
9510 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9511 return false;
9513 innerop = gimple_assign_rhs1 (def_stmt);
9515 if (TREE_CODE (innerop) == SSA_NAME
9516 && !POINTER_TYPE_P (TREE_TYPE (innerop))
9517 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
9518 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
9520 value_range *vr = get_value_range (innerop);
9522 if (range_int_cst_p (vr)
9523 && range_fits_type_p (vr,
9524 TYPE_PRECISION (TREE_TYPE (op0)),
9525 TYPE_SIGN (TREE_TYPE (op0)))
9526 && int_fits_type_p (op1, TREE_TYPE (innerop))
9527 /* The range must not have overflowed, or if it did overflow
9528 we must not be wrapping/trapping overflow and optimizing
9529 with strict overflow semantics. */
9530 && ((!is_negative_overflow_infinity (vr->min)
9531 && !is_positive_overflow_infinity (vr->max))
9532 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
9534 /* If the range overflowed and the user has asked for warnings
9535 when strict overflow semantics were used to optimize code,
9536 issue an appropriate warning. */
9537 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
9538 && (is_negative_overflow_infinity (vr->min)
9539 || is_positive_overflow_infinity (vr->max))
9540 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
9542 location_t location;
9544 if (!gimple_has_location (stmt))
9545 location = input_location;
9546 else
9547 location = gimple_location (stmt);
9548 warning_at (location, OPT_Wstrict_overflow,
9549 "assuming signed overflow does not occur when "
9550 "simplifying conditional");
9553 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9554 gimple_cond_set_lhs (stmt, innerop);
9555 gimple_cond_set_rhs (stmt, newconst);
9556 return true;
9561 return false;
9564 /* Simplify a switch statement using the value range of the switch
9565 argument. */
9567 static bool
9568 simplify_switch_using_ranges (gswitch *stmt)
9570 tree op = gimple_switch_index (stmt);
9571 value_range *vr;
9572 bool take_default;
9573 edge e;
9574 edge_iterator ei;
9575 size_t i = 0, j = 0, n, n2;
9576 tree vec2;
9577 switch_update su;
9578 size_t k = 1, l = 0;
9580 if (TREE_CODE (op) == SSA_NAME)
9582 vr = get_value_range (op);
9584 /* We can only handle integer ranges. */
9585 if ((vr->type != VR_RANGE
9586 && vr->type != VR_ANTI_RANGE)
9587 || symbolic_range_p (vr))
9588 return false;
9590 /* Find case label for min/max of the value range. */
9591 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9593 else if (TREE_CODE (op) == INTEGER_CST)
9595 take_default = !find_case_label_index (stmt, 1, op, &i);
9596 if (take_default)
9598 i = 1;
9599 j = 0;
9601 else
9603 j = i;
9606 else
9607 return false;
9609 n = gimple_switch_num_labels (stmt);
9611 /* Bail out if this is just all edges taken. */
9612 if (i == 1
9613 && j == n - 1
9614 && take_default)
9615 return false;
9617 /* Build a new vector of taken case labels. */
9618 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9619 n2 = 0;
9621 /* Add the default edge, if necessary. */
9622 if (take_default)
9623 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9625 for (; i <= j; ++i, ++n2)
9626 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9628 for (; k <= l; ++k, ++n2)
9629 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9631 /* Mark needed edges. */
9632 for (i = 0; i < n2; ++i)
9634 e = find_edge (gimple_bb (stmt),
9635 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9636 e->aux = (void *)-1;
9639 /* Queue not needed edges for later removal. */
9640 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9642 if (e->aux == (void *)-1)
9644 e->aux = NULL;
9645 continue;
9648 if (dump_file && (dump_flags & TDF_DETAILS))
9650 fprintf (dump_file, "removing unreachable case label\n");
9652 to_remove_edges.safe_push (e);
9653 e->flags &= ~EDGE_EXECUTABLE;
9656 /* And queue an update for the stmt. */
9657 su.stmt = stmt;
9658 su.vec = vec2;
9659 to_update_switch_stmts.safe_push (su);
9660 return false;
9663 /* Simplify an integral conversion from an SSA name in STMT. */
9665 static bool
9666 simplify_conversion_using_ranges (gimple *stmt)
9668 tree innerop, middleop, finaltype;
9669 gimple *def_stmt;
9670 signop inner_sgn, middle_sgn, final_sgn;
9671 unsigned inner_prec, middle_prec, final_prec;
9672 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9674 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9675 if (!INTEGRAL_TYPE_P (finaltype))
9676 return false;
9677 middleop = gimple_assign_rhs1 (stmt);
9678 def_stmt = SSA_NAME_DEF_STMT (middleop);
9679 if (!is_gimple_assign (def_stmt)
9680 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9681 return false;
9682 innerop = gimple_assign_rhs1 (def_stmt);
9683 if (TREE_CODE (innerop) != SSA_NAME
9684 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9685 return false;
9687 /* Get the value-range of the inner operand. Use get_range_info in
9688 case innerop was created during substitute-and-fold. */
9689 wide_int imin, imax;
9690 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
9691 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
9692 return false;
9693 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
9694 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
9696 /* Simulate the conversion chain to check if the result is equal if
9697 the middle conversion is removed. */
9698 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9699 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9700 final_prec = TYPE_PRECISION (finaltype);
9702 /* If the first conversion is not injective, the second must not
9703 be widening. */
9704 if (wi::gtu_p (innermax - innermin,
9705 wi::mask <widest_int> (middle_prec, false))
9706 && middle_prec < final_prec)
9707 return false;
9708 /* We also want a medium value so that we can track the effect that
9709 narrowing conversions with sign change have. */
9710 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9711 if (inner_sgn == UNSIGNED)
9712 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
9713 else
9714 innermed = 0;
9715 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9716 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
9717 innermed = innermin;
9719 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9720 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9721 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9722 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
9724 /* Require that the final conversion applied to both the original
9725 and the intermediate range produces the same result. */
9726 final_sgn = TYPE_SIGN (finaltype);
9727 if (wi::ext (middlemin, final_prec, final_sgn)
9728 != wi::ext (innermin, final_prec, final_sgn)
9729 || wi::ext (middlemed, final_prec, final_sgn)
9730 != wi::ext (innermed, final_prec, final_sgn)
9731 || wi::ext (middlemax, final_prec, final_sgn)
9732 != wi::ext (innermax, final_prec, final_sgn))
9733 return false;
9735 gimple_assign_set_rhs1 (stmt, innerop);
9736 update_stmt (stmt);
9737 return true;
9740 /* Simplify a conversion from integral SSA name to float in STMT. */
9742 static bool
9743 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
9744 gimple *stmt)
9746 tree rhs1 = gimple_assign_rhs1 (stmt);
9747 value_range *vr = get_value_range (rhs1);
9748 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9749 machine_mode mode;
9750 tree tem;
9751 gassign *conv;
9753 /* We can only handle constant ranges. */
9754 if (vr->type != VR_RANGE
9755 || TREE_CODE (vr->min) != INTEGER_CST
9756 || TREE_CODE (vr->max) != INTEGER_CST)
9757 return false;
9759 /* First check if we can use a signed type in place of an unsigned. */
9760 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9761 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9762 != CODE_FOR_nothing)
9763 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
9764 mode = TYPE_MODE (TREE_TYPE (rhs1));
9765 /* If we can do the conversion in the current input mode do nothing. */
9766 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9767 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9768 return false;
9769 /* Otherwise search for a mode we can use, starting from the narrowest
9770 integer mode available. */
9771 else
9773 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9776 /* If we cannot do a signed conversion to float from mode
9777 or if the value-range does not fit in the signed type
9778 try with a wider mode. */
9779 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9780 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
9781 break;
9783 mode = GET_MODE_WIDER_MODE (mode);
9784 /* But do not widen the input. Instead leave that to the
9785 optabs expansion code. */
9786 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9787 return false;
9789 while (mode != VOIDmode);
9790 if (mode == VOIDmode)
9791 return false;
9794 /* It works, insert a truncation or sign-change before the
9795 float conversion. */
9796 tem = make_ssa_name (build_nonstandard_integer_type
9797 (GET_MODE_PRECISION (mode), 0));
9798 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
9799 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9800 gimple_assign_set_rhs1 (stmt, tem);
9801 update_stmt (stmt);
9803 return true;
9806 /* Simplify an internal fn call using ranges if possible. */
9808 static bool
9809 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9811 enum tree_code subcode;
9812 bool is_ubsan = false;
9813 bool ovf = false;
9814 switch (gimple_call_internal_fn (stmt))
9816 case IFN_UBSAN_CHECK_ADD:
9817 subcode = PLUS_EXPR;
9818 is_ubsan = true;
9819 break;
9820 case IFN_UBSAN_CHECK_SUB:
9821 subcode = MINUS_EXPR;
9822 is_ubsan = true;
9823 break;
9824 case IFN_UBSAN_CHECK_MUL:
9825 subcode = MULT_EXPR;
9826 is_ubsan = true;
9827 break;
9828 case IFN_ADD_OVERFLOW:
9829 subcode = PLUS_EXPR;
9830 break;
9831 case IFN_SUB_OVERFLOW:
9832 subcode = MINUS_EXPR;
9833 break;
9834 case IFN_MUL_OVERFLOW:
9835 subcode = MULT_EXPR;
9836 break;
9837 default:
9838 return false;
9841 tree op0 = gimple_call_arg (stmt, 0);
9842 tree op1 = gimple_call_arg (stmt, 1);
9843 tree type;
9844 if (is_ubsan)
9845 type = TREE_TYPE (op0);
9846 else if (gimple_call_lhs (stmt) == NULL_TREE)
9847 return false;
9848 else
9849 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
9850 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
9851 || (is_ubsan && ovf))
9852 return false;
9854 gimple *g;
9855 location_t loc = gimple_location (stmt);
9856 if (is_ubsan)
9857 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
9858 else
9860 int prec = TYPE_PRECISION (type);
9861 tree utype = type;
9862 if (ovf
9863 || !useless_type_conversion_p (type, TREE_TYPE (op0))
9864 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
9865 utype = build_nonstandard_integer_type (prec, 1);
9866 if (TREE_CODE (op0) == INTEGER_CST)
9867 op0 = fold_convert (utype, op0);
9868 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
9870 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
9871 gimple_set_location (g, loc);
9872 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9873 op0 = gimple_assign_lhs (g);
9875 if (TREE_CODE (op1) == INTEGER_CST)
9876 op1 = fold_convert (utype, op1);
9877 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
9879 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
9880 gimple_set_location (g, loc);
9881 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9882 op1 = gimple_assign_lhs (g);
9884 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
9885 gimple_set_location (g, loc);
9886 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9887 if (utype != type)
9889 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
9890 gimple_assign_lhs (g));
9891 gimple_set_location (g, loc);
9892 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9894 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
9895 gimple_assign_lhs (g),
9896 build_int_cst (type, ovf));
9898 gimple_set_location (g, loc);
9899 gsi_replace (gsi, g, false);
9900 return true;
9903 /* Simplify STMT using ranges if possible. */
9905 static bool
9906 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9908 gimple *stmt = gsi_stmt (*gsi);
9909 if (is_gimple_assign (stmt))
9911 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9912 tree rhs1 = gimple_assign_rhs1 (stmt);
9914 switch (rhs_code)
9916 case EQ_EXPR:
9917 case NE_EXPR:
9918 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9919 if the RHS is zero or one, and the LHS are known to be boolean
9920 values. */
9921 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9922 return simplify_truth_ops_using_ranges (gsi, stmt);
9923 break;
9925 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9926 and BIT_AND_EXPR respectively if the first operand is greater
9927 than zero and the second operand is an exact power of two.
9928 Also optimize TRUNC_MOD_EXPR away if the second operand is
9929 constant and the first operand already has the right value
9930 range. */
9931 case TRUNC_DIV_EXPR:
9932 case TRUNC_MOD_EXPR:
9933 if (TREE_CODE (rhs1) == SSA_NAME
9934 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9935 return simplify_div_or_mod_using_ranges (gsi, stmt);
9936 break;
9938 /* Transform ABS (X) into X or -X as appropriate. */
9939 case ABS_EXPR:
9940 if (TREE_CODE (rhs1) == SSA_NAME
9941 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9942 return simplify_abs_using_ranges (stmt);
9943 break;
9945 case BIT_AND_EXPR:
9946 case BIT_IOR_EXPR:
9947 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9948 if all the bits being cleared are already cleared or
9949 all the bits being set are already set. */
9950 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9951 return simplify_bit_ops_using_ranges (gsi, stmt);
9952 break;
9954 CASE_CONVERT:
9955 if (TREE_CODE (rhs1) == SSA_NAME
9956 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9957 return simplify_conversion_using_ranges (stmt);
9958 break;
9960 case FLOAT_EXPR:
9961 if (TREE_CODE (rhs1) == SSA_NAME
9962 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9963 return simplify_float_conversion_using_ranges (gsi, stmt);
9964 break;
9966 case MIN_EXPR:
9967 case MAX_EXPR:
9968 return simplify_min_or_max_using_ranges (stmt);
9969 break;
9971 default:
9972 break;
9975 else if (gimple_code (stmt) == GIMPLE_COND)
9976 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
9977 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9978 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
9979 else if (is_gimple_call (stmt)
9980 && gimple_call_internal_p (stmt))
9981 return simplify_internal_call_using_ranges (gsi, stmt);
9983 return false;
9986 /* If the statement pointed by SI has a predicate whose value can be
9987 computed using the value range information computed by VRP, compute
9988 its value and return true. Otherwise, return false. */
9990 static bool
9991 fold_predicate_in (gimple_stmt_iterator *si)
9993 bool assignment_p = false;
9994 tree val;
9995 gimple *stmt = gsi_stmt (*si);
9997 if (is_gimple_assign (stmt)
9998 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10000 assignment_p = true;
10001 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10002 gimple_assign_rhs1 (stmt),
10003 gimple_assign_rhs2 (stmt),
10004 stmt);
10006 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10007 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10008 gimple_cond_lhs (cond_stmt),
10009 gimple_cond_rhs (cond_stmt),
10010 stmt);
10011 else
10012 return false;
10014 if (val)
10016 if (assignment_p)
10017 val = fold_convert (gimple_expr_type (stmt), val);
10019 if (dump_file)
10021 fprintf (dump_file, "Folding predicate ");
10022 print_gimple_expr (dump_file, stmt, 0, 0);
10023 fprintf (dump_file, " to ");
10024 print_generic_expr (dump_file, val, 0);
10025 fprintf (dump_file, "\n");
10028 if (is_gimple_assign (stmt))
10029 gimple_assign_set_rhs_from_tree (si, val);
10030 else
10032 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10033 gcond *cond_stmt = as_a <gcond *> (stmt);
10034 if (integer_zerop (val))
10035 gimple_cond_make_false (cond_stmt);
10036 else if (integer_onep (val))
10037 gimple_cond_make_true (cond_stmt);
10038 else
10039 gcc_unreachable ();
10042 return true;
10045 return false;
10048 /* Callback for substitute_and_fold folding the stmt at *SI. */
10050 static bool
10051 vrp_fold_stmt (gimple_stmt_iterator *si)
10053 if (fold_predicate_in (si))
10054 return true;
10056 return simplify_stmt_using_ranges (si);
10059 /* Unwindable const/copy equivalences. */
10060 const_and_copies *equiv_stack;
10062 /* A trivial wrapper so that we can present the generic jump threading
10063 code with a simple API for simplifying statements. STMT is the
10064 statement we want to simplify, WITHIN_STMT provides the location
10065 for any overflow warnings. */
10067 static tree
10068 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10069 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10071 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10072 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10073 gimple_cond_lhs (cond_stmt),
10074 gimple_cond_rhs (cond_stmt),
10075 within_stmt);
10077 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10079 value_range new_vr = VR_INITIALIZER;
10080 tree lhs = gimple_assign_lhs (assign_stmt);
10082 if (TREE_CODE (lhs) == SSA_NAME
10083 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10084 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10086 extract_range_from_assignment (&new_vr, assign_stmt);
10087 if (range_int_cst_singleton_p (&new_vr))
10088 return new_vr.min;
10092 return NULL_TREE;
10095 /* Blocks which have more than one predecessor and more than
10096 one successor present jump threading opportunities, i.e.,
10097 when the block is reached from a specific predecessor, we
10098 may be able to determine which of the outgoing edges will
10099 be traversed. When this optimization applies, we are able
10100 to avoid conditionals at runtime and we may expose secondary
10101 optimization opportunities.
10103 This routine is effectively a driver for the generic jump
10104 threading code. It basically just presents the generic code
10105 with edges that may be suitable for jump threading.
10107 Unlike DOM, we do not iterate VRP if jump threading was successful.
10108 While iterating may expose new opportunities for VRP, it is expected
10109 those opportunities would be very limited and the compile time cost
10110 to expose those opportunities would be significant.
10112 As jump threading opportunities are discovered, they are registered
10113 for later realization. */
10115 static void
10116 identify_jump_threads (void)
10118 basic_block bb;
10119 gcond *dummy;
10120 int i;
10121 edge e;
10123 /* Ugh. When substituting values earlier in this pass we can
10124 wipe the dominance information. So rebuild the dominator
10125 information as we need it within the jump threading code. */
10126 calculate_dominance_info (CDI_DOMINATORS);
10128 /* We do not allow VRP information to be used for jump threading
10129 across a back edge in the CFG. Otherwise it becomes too
10130 difficult to avoid eliminating loop exit tests. Of course
10131 EDGE_DFS_BACK is not accurate at this time so we have to
10132 recompute it. */
10133 mark_dfs_back_edges ();
10135 /* Do not thread across edges we are about to remove. Just marking
10136 them as EDGE_IGNORE will do. */
10137 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10138 e->flags |= EDGE_IGNORE;
10140 /* Allocate our unwinder stack to unwind any temporary equivalences
10141 that might be recorded. */
10142 equiv_stack = new const_and_copies ();
10144 /* To avoid lots of silly node creation, we create a single
10145 conditional and just modify it in-place when attempting to
10146 thread jumps. */
10147 dummy = gimple_build_cond (EQ_EXPR,
10148 integer_zero_node, integer_zero_node,
10149 NULL, NULL);
10151 /* Walk through all the blocks finding those which present a
10152 potential jump threading opportunity. We could set this up
10153 as a dominator walker and record data during the walk, but
10154 I doubt it's worth the effort for the classes of jump
10155 threading opportunities we are trying to identify at this
10156 point in compilation. */
10157 FOR_EACH_BB_FN (bb, cfun)
10159 gimple *last;
10161 /* If the generic jump threading code does not find this block
10162 interesting, then there is nothing to do. */
10163 if (! potentially_threadable_block (bb))
10164 continue;
10166 last = last_stmt (bb);
10168 /* We're basically looking for a switch or any kind of conditional with
10169 integral or pointer type arguments. Note the type of the second
10170 argument will be the same as the first argument, so no need to
10171 check it explicitly.
10173 We also handle the case where there are no statements in the
10174 block. This come up with forwarder blocks that are not
10175 optimized away because they lead to a loop header. But we do
10176 want to thread through them as we can sometimes thread to the
10177 loop exit which is obviously profitable. */
10178 if (!last
10179 || gimple_code (last) == GIMPLE_SWITCH
10180 || (gimple_code (last) == GIMPLE_COND
10181 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10182 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10183 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10184 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10185 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10187 edge_iterator ei;
10189 /* We've got a block with multiple predecessors and multiple
10190 successors which also ends in a suitable conditional or
10191 switch statement. For each predecessor, see if we can thread
10192 it to a specific successor. */
10193 FOR_EACH_EDGE (e, ei, bb->preds)
10195 /* Do not thread across edges marked to ignoreor abnormal
10196 edges in the CFG. */
10197 if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
10198 continue;
10200 thread_across_edge (dummy, e, true, equiv_stack, NULL,
10201 simplify_stmt_for_jump_threading);
10206 /* Clear EDGE_IGNORE. */
10207 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10208 e->flags &= ~EDGE_IGNORE;
10210 /* We do not actually update the CFG or SSA graphs at this point as
10211 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10212 handle ASSERT_EXPRs gracefully. */
10215 /* We identified all the jump threading opportunities earlier, but could
10216 not transform the CFG at that time. This routine transforms the
10217 CFG and arranges for the dominator tree to be rebuilt if necessary.
10219 Note the SSA graph update will occur during the normal TODO
10220 processing by the pass manager. */
10221 static void
10222 finalize_jump_threads (void)
10224 thread_through_all_blocks (false);
10225 delete equiv_stack;
10229 /* Traverse all the blocks folding conditionals with known ranges. */
10231 static void
10232 vrp_finalize (bool warn_array_bounds_p)
10234 size_t i;
10236 values_propagated = true;
10238 if (dump_file)
10240 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10241 dump_all_value_ranges (dump_file);
10242 fprintf (dump_file, "\n");
10245 /* Set value range to non pointer SSA_NAMEs. */
10246 for (i = 0; i < num_vr_values; i++)
10247 if (vr_value[i])
10249 tree name = ssa_name (i);
10251 if (!name
10252 || POINTER_TYPE_P (TREE_TYPE (name))
10253 || (vr_value[i]->type == VR_VARYING)
10254 || (vr_value[i]->type == VR_UNDEFINED))
10255 continue;
10257 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
10258 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)
10259 && (vr_value[i]->type == VR_RANGE
10260 || vr_value[i]->type == VR_ANTI_RANGE))
10261 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10262 vr_value[i]->max);
10265 substitute_and_fold (op_with_constant_singleton_value_range,
10266 vrp_fold_stmt, false);
10268 if (warn_array_bounds && warn_array_bounds_p)
10269 check_all_array_refs ();
10271 /* We must identify jump threading opportunities before we release
10272 the datastructures built by VRP. */
10273 identify_jump_threads ();
10275 /* Free allocated memory. */
10276 for (i = 0; i < num_vr_values; i++)
10277 if (vr_value[i])
10279 BITMAP_FREE (vr_value[i]->equiv);
10280 free (vr_value[i]);
10283 free (vr_value);
10284 free (vr_phi_edge_counts);
10286 /* So that we can distinguish between VRP data being available
10287 and not available. */
10288 vr_value = NULL;
10289 vr_phi_edge_counts = NULL;
10293 /* Main entry point to VRP (Value Range Propagation). This pass is
10294 loosely based on J. R. C. Patterson, ``Accurate Static Branch
10295 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
10296 Programming Language Design and Implementation, pp. 67-78, 1995.
10297 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
10299 This is essentially an SSA-CCP pass modified to deal with ranges
10300 instead of constants.
10302 While propagating ranges, we may find that two or more SSA name
10303 have equivalent, though distinct ranges. For instance,
10305 1 x_9 = p_3->a;
10306 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
10307 3 if (p_4 == q_2)
10308 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
10309 5 endif
10310 6 if (q_2)
10312 In the code above, pointer p_5 has range [q_2, q_2], but from the
10313 code we can also determine that p_5 cannot be NULL and, if q_2 had
10314 a non-varying range, p_5's range should also be compatible with it.
10316 These equivalences are created by two expressions: ASSERT_EXPR and
10317 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
10318 result of another assertion, then we can use the fact that p_5 and
10319 p_4 are equivalent when evaluating p_5's range.
10321 Together with value ranges, we also propagate these equivalences
10322 between names so that we can take advantage of information from
10323 multiple ranges when doing final replacement. Note that this
10324 equivalency relation is transitive but not symmetric.
10326 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
10327 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
10328 in contexts where that assertion does not hold (e.g., in line 6).
10330 TODO, the main difference between this pass and Patterson's is that
10331 we do not propagate edge probabilities. We only compute whether
10332 edges can be taken or not. That is, instead of having a spectrum
10333 of jump probabilities between 0 and 1, we only deal with 0, 1 and
10334 DON'T KNOW. In the future, it may be worthwhile to propagate
10335 probabilities to aid branch prediction. */
10337 static unsigned int
10338 execute_vrp (bool warn_array_bounds_p)
10340 int i;
10341 edge e;
10342 switch_update *su;
10344 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
10345 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
10346 scev_initialize ();
10348 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
10349 Inserting assertions may split edges which will invalidate
10350 EDGE_DFS_BACK. */
10351 insert_range_assertions ();
10353 to_remove_edges.create (10);
10354 to_update_switch_stmts.create (5);
10355 threadedge_initialize_values ();
10357 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
10358 mark_dfs_back_edges ();
10360 vrp_initialize ();
10361 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
10362 vrp_finalize (warn_array_bounds_p);
10364 free_numbers_of_iterations_estimates (cfun);
10366 /* ASSERT_EXPRs must be removed before finalizing jump threads
10367 as finalizing jump threads calls the CFG cleanup code which
10368 does not properly handle ASSERT_EXPRs. */
10369 remove_range_assertions ();
10371 /* If we exposed any new variables, go ahead and put them into
10372 SSA form now, before we handle jump threading. This simplifies
10373 interactions between rewriting of _DECL nodes into SSA form
10374 and rewriting SSA_NAME nodes into SSA form after block
10375 duplication and CFG manipulation. */
10376 update_ssa (TODO_update_ssa);
10378 finalize_jump_threads ();
10380 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
10381 CFG in a broken state and requires a cfg_cleanup run. */
10382 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10383 remove_edge (e);
10384 /* Update SWITCH_EXPR case label vector. */
10385 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
10387 size_t j;
10388 size_t n = TREE_VEC_LENGTH (su->vec);
10389 tree label;
10390 gimple_switch_set_num_labels (su->stmt, n);
10391 for (j = 0; j < n; j++)
10392 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
10393 /* As we may have replaced the default label with a regular one
10394 make sure to make it a real default label again. This ensures
10395 optimal expansion. */
10396 label = gimple_switch_label (su->stmt, 0);
10397 CASE_LOW (label) = NULL_TREE;
10398 CASE_HIGH (label) = NULL_TREE;
10401 if (to_remove_edges.length () > 0)
10403 free_dominance_info (CDI_DOMINATORS);
10404 loops_state_set (LOOPS_NEED_FIXUP);
10407 to_remove_edges.release ();
10408 to_update_switch_stmts.release ();
10409 threadedge_finalize_values ();
10411 scev_finalize ();
10412 loop_optimizer_finalize ();
10413 return 0;
10416 namespace {
10418 const pass_data pass_data_vrp =
10420 GIMPLE_PASS, /* type */
10421 "vrp", /* name */
10422 OPTGROUP_NONE, /* optinfo_flags */
10423 TV_TREE_VRP, /* tv_id */
10424 PROP_ssa, /* properties_required */
10425 0, /* properties_provided */
10426 0, /* properties_destroyed */
10427 0, /* todo_flags_start */
10428 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
10431 class pass_vrp : public gimple_opt_pass
10433 public:
10434 pass_vrp (gcc::context *ctxt)
10435 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
10438 /* opt_pass methods: */
10439 opt_pass * clone () { return new pass_vrp (m_ctxt); }
10440 void set_pass_param (unsigned int n, bool param)
10442 gcc_assert (n == 0);
10443 warn_array_bounds_p = param;
10445 virtual bool gate (function *) { return flag_tree_vrp != 0; }
10446 virtual unsigned int execute (function *)
10447 { return execute_vrp (warn_array_bounds_p); }
10449 private:
10450 bool warn_array_bounds_p;
10451 }; // class pass_vrp
10453 } // anon namespace
10455 gimple_opt_pass *
10456 make_pass_vrp (gcc::context *ctxt)
10458 return new pass_vrp (ctxt);