2017-02-20 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / tree-vrp.c
blob95bf1cf86969f78bec11659fdb92b6b7e4fec368
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-general.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 #include "params.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
66 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
68 /* Allocation pools for tree-vrp allocations. */
69 static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges");
70 static bitmap_obstack vrp_equiv_obstack;
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Local functions. */
86 static int compare_values (tree val1, tree val2);
87 static int compare_values_warnv (tree val1, tree val2, bool *);
88 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
89 tree, tree, bool, bool *,
90 bool *);
92 /* Location information for ASSERT_EXPRs. Each instance of this
93 structure describes an ASSERT_EXPR for an SSA name. Since a single
94 SSA name may have more than one assertion associated with it, these
95 locations are kept in a linked list attached to the corresponding
96 SSA name. */
97 struct assert_locus
99 /* Basic block where the assertion would be inserted. */
100 basic_block bb;
102 /* Some assertions need to be inserted on an edge (e.g., assertions
103 generated by COND_EXPRs). In those cases, BB will be NULL. */
104 edge e;
106 /* Pointer to the statement that generated this assertion. */
107 gimple_stmt_iterator si;
109 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
110 enum tree_code comp_code;
112 /* Value being compared against. */
113 tree val;
115 /* Expression to compare. */
116 tree expr;
118 /* Next node in the linked list. */
119 assert_locus *next;
122 /* If bit I is present, it means that SSA name N_i has a list of
123 assertions that should be inserted in the IL. */
124 static bitmap need_assert_for;
126 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
127 holds a list of ASSERT_LOCUS_T nodes that describe where
128 ASSERT_EXPRs for SSA name N_I should be inserted. */
129 static assert_locus **asserts_for;
131 /* Value range array. After propagation, VR_VALUE[I] holds the range
132 of values that SSA name N_I may take. */
133 static unsigned num_vr_values;
134 static value_range **vr_value;
135 static bool values_propagated;
137 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
138 number of executable edges we saw the last time we visited the
139 node. */
140 static int *vr_phi_edge_counts;
142 struct switch_update {
143 gswitch *stmt;
144 tree vec;
147 static vec<edge> to_remove_edges;
148 static vec<switch_update> to_update_switch_stmts;
151 /* Return the maximum value for TYPE. */
153 static inline tree
154 vrp_val_max (const_tree type)
156 if (!INTEGRAL_TYPE_P (type))
157 return NULL_TREE;
159 return TYPE_MAX_VALUE (type);
162 /* Return the minimum value for TYPE. */
164 static inline tree
165 vrp_val_min (const_tree type)
167 if (!INTEGRAL_TYPE_P (type))
168 return NULL_TREE;
170 return TYPE_MIN_VALUE (type);
173 /* Return whether VAL is equal to the maximum value of its type. This
174 will be true for a positive overflow infinity. We can't do a
175 simple equality comparison with TYPE_MAX_VALUE because C typedefs
176 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
177 to the integer constant with the same value in the type. */
179 static inline bool
180 vrp_val_is_max (const_tree val)
182 tree type_max = vrp_val_max (TREE_TYPE (val));
183 return (val == type_max
184 || (type_max != NULL_TREE
185 && operand_equal_p (val, type_max, 0)));
188 /* Return whether VAL is equal to the minimum value of its type. This
189 will be true for a negative overflow infinity. */
191 static inline bool
192 vrp_val_is_min (const_tree val)
194 tree type_min = vrp_val_min (TREE_TYPE (val));
195 return (val == type_min
196 || (type_min != NULL_TREE
197 && operand_equal_p (val, type_min, 0)));
201 /* Return whether TYPE should use an overflow infinity distinct from
202 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
203 represent a signed overflow during VRP computations. An infinity
204 is distinct from a half-range, which will go from some number to
205 TYPE_{MIN,MAX}_VALUE. */
207 static inline bool
208 needs_overflow_infinity (const_tree type)
210 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
213 /* Return whether TYPE can support our overflow infinity
214 representation: we use the TREE_OVERFLOW flag, which only exists
215 for constants. If TYPE doesn't support this, we don't optimize
216 cases which would require signed overflow--we drop them to
217 VARYING. */
219 static inline bool
220 supports_overflow_infinity (const_tree type)
222 tree min = vrp_val_min (type), max = vrp_val_max (type);
223 gcc_checking_assert (needs_overflow_infinity (type));
224 return (min != NULL_TREE
225 && CONSTANT_CLASS_P (min)
226 && max != NULL_TREE
227 && CONSTANT_CLASS_P (max));
230 /* VAL is the maximum or minimum value of a type. Return a
231 corresponding overflow infinity. */
233 static inline tree
234 make_overflow_infinity (tree val)
236 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
237 val = copy_node (val);
238 TREE_OVERFLOW (val) = 1;
239 return val;
242 /* Return a negative overflow infinity for TYPE. */
244 static inline tree
245 negative_overflow_infinity (tree type)
247 gcc_checking_assert (supports_overflow_infinity (type));
248 return make_overflow_infinity (vrp_val_min (type));
251 /* Return a positive overflow infinity for TYPE. */
253 static inline tree
254 positive_overflow_infinity (tree type)
256 gcc_checking_assert (supports_overflow_infinity (type));
257 return make_overflow_infinity (vrp_val_max (type));
260 /* Return whether VAL is a negative overflow infinity. */
262 static inline bool
263 is_negative_overflow_infinity (const_tree val)
265 return (TREE_OVERFLOW_P (val)
266 && needs_overflow_infinity (TREE_TYPE (val))
267 && vrp_val_is_min (val));
270 /* Return whether VAL is a positive overflow infinity. */
272 static inline bool
273 is_positive_overflow_infinity (const_tree val)
275 return (TREE_OVERFLOW_P (val)
276 && needs_overflow_infinity (TREE_TYPE (val))
277 && vrp_val_is_max (val));
280 /* Return whether VAL is a positive or negative overflow infinity. */
282 static inline bool
283 is_overflow_infinity (const_tree val)
285 return (TREE_OVERFLOW_P (val)
286 && needs_overflow_infinity (TREE_TYPE (val))
287 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
290 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
292 static inline bool
293 stmt_overflow_infinity (gimple *stmt)
295 if (is_gimple_assign (stmt)
296 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
297 GIMPLE_SINGLE_RHS)
298 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
299 return false;
302 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
303 the same value with TREE_OVERFLOW clear. This can be used to avoid
304 confusing a regular value with an overflow value. */
306 static inline tree
307 avoid_overflow_infinity (tree val)
309 if (!is_overflow_infinity (val))
310 return val;
312 if (vrp_val_is_max (val))
313 return vrp_val_max (TREE_TYPE (val));
314 else
316 gcc_checking_assert (vrp_val_is_min (val));
317 return vrp_val_min (TREE_TYPE (val));
322 /* Set value range VR to VR_UNDEFINED. */
324 static inline void
325 set_value_range_to_undefined (value_range *vr)
327 vr->type = VR_UNDEFINED;
328 vr->min = vr->max = NULL_TREE;
329 if (vr->equiv)
330 bitmap_clear (vr->equiv);
334 /* Set value range VR to VR_VARYING. */
336 static inline void
337 set_value_range_to_varying (value_range *vr)
339 vr->type = VR_VARYING;
340 vr->min = vr->max = NULL_TREE;
341 if (vr->equiv)
342 bitmap_clear (vr->equiv);
346 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
348 static void
349 set_value_range (value_range *vr, enum value_range_type t, tree min,
350 tree max, bitmap equiv)
352 /* Check the validity of the range. */
353 if (flag_checking
354 && (t == VR_RANGE || t == VR_ANTI_RANGE))
356 int cmp;
358 gcc_assert (min && max);
360 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
361 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
363 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
364 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
366 cmp = compare_values (min, max);
367 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
370 if (flag_checking
371 && (t == VR_UNDEFINED || t == VR_VARYING))
373 gcc_assert (min == NULL_TREE && max == NULL_TREE);
374 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
377 vr->type = t;
378 vr->min = min;
379 vr->max = max;
381 /* Since updating the equivalence set involves deep copying the
382 bitmaps, only do it if absolutely necessary. */
383 if (vr->equiv == NULL
384 && equiv != NULL)
385 vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
387 if (equiv != vr->equiv)
389 if (equiv && !bitmap_empty_p (equiv))
390 bitmap_copy (vr->equiv, equiv);
391 else
392 bitmap_clear (vr->equiv);
397 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
398 This means adjusting T, MIN and MAX representing the case of a
399 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
400 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
401 In corner cases where MAX+1 or MIN-1 wraps this will fall back
402 to varying.
403 This routine exists to ease canonicalization in the case where we
404 extract ranges from var + CST op limit. */
406 static void
407 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
408 tree min, tree max, bitmap equiv)
410 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
411 if (t == VR_UNDEFINED)
413 set_value_range_to_undefined (vr);
414 return;
416 else if (t == VR_VARYING)
418 set_value_range_to_varying (vr);
419 return;
422 /* Nothing to canonicalize for symbolic ranges. */
423 if (TREE_CODE (min) != INTEGER_CST
424 || TREE_CODE (max) != INTEGER_CST)
426 set_value_range (vr, t, min, max, equiv);
427 return;
430 /* Wrong order for min and max, to swap them and the VR type we need
431 to adjust them. */
432 if (tree_int_cst_lt (max, min))
434 tree one, tmp;
436 /* For one bit precision if max < min, then the swapped
437 range covers all values, so for VR_RANGE it is varying and
438 for VR_ANTI_RANGE empty range, so drop to varying as well. */
439 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
441 set_value_range_to_varying (vr);
442 return;
445 one = build_int_cst (TREE_TYPE (min), 1);
446 tmp = int_const_binop (PLUS_EXPR, max, one);
447 max = int_const_binop (MINUS_EXPR, min, one);
448 min = tmp;
450 /* There's one corner case, if we had [C+1, C] before we now have
451 that again. But this represents an empty value range, so drop
452 to varying in this case. */
453 if (tree_int_cst_lt (max, min))
455 set_value_range_to_varying (vr);
456 return;
459 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
462 /* Anti-ranges that can be represented as ranges should be so. */
463 if (t == VR_ANTI_RANGE)
465 bool is_min = vrp_val_is_min (min);
466 bool is_max = vrp_val_is_max (max);
468 if (is_min && is_max)
470 /* We cannot deal with empty ranges, drop to varying.
471 ??? This could be VR_UNDEFINED instead. */
472 set_value_range_to_varying (vr);
473 return;
475 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
476 && (is_min || is_max))
478 /* Non-empty boolean ranges can always be represented
479 as a singleton range. */
480 if (is_min)
481 min = max = vrp_val_max (TREE_TYPE (min));
482 else
483 min = max = vrp_val_min (TREE_TYPE (min));
484 t = VR_RANGE;
486 else if (is_min
487 /* As a special exception preserve non-null ranges. */
488 && !(TYPE_UNSIGNED (TREE_TYPE (min))
489 && integer_zerop (max)))
491 tree one = build_int_cst (TREE_TYPE (max), 1);
492 min = int_const_binop (PLUS_EXPR, max, one);
493 max = vrp_val_max (TREE_TYPE (max));
494 t = VR_RANGE;
496 else if (is_max)
498 tree one = build_int_cst (TREE_TYPE (min), 1);
499 max = int_const_binop (MINUS_EXPR, min, one);
500 min = vrp_val_min (TREE_TYPE (min));
501 t = VR_RANGE;
505 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
506 to make sure VRP iteration terminates, otherwise we can get into
507 oscillations. */
509 set_value_range (vr, t, min, max, equiv);
512 /* Copy value range FROM into value range TO. */
514 static inline void
515 copy_value_range (value_range *to, value_range *from)
517 set_value_range (to, from->type, from->min, from->max, from->equiv);
520 /* Set value range VR to a single value. This function is only called
521 with values we get from statements, and exists to clear the
522 TREE_OVERFLOW flag so that we don't think we have an overflow
523 infinity when we shouldn't. */
525 static inline void
526 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
528 gcc_assert (is_gimple_min_invariant (val));
529 if (TREE_OVERFLOW_P (val))
530 val = drop_tree_overflow (val);
531 set_value_range (vr, VR_RANGE, val, val, equiv);
534 /* Set value range VR to a non-negative range of type TYPE.
535 OVERFLOW_INFINITY indicates whether to use an overflow infinity
536 rather than TYPE_MAX_VALUE; this should be true if we determine
537 that the range is nonnegative based on the assumption that signed
538 overflow does not occur. */
540 static inline void
541 set_value_range_to_nonnegative (value_range *vr, tree type,
542 bool overflow_infinity)
544 tree zero;
546 if (overflow_infinity && !supports_overflow_infinity (type))
548 set_value_range_to_varying (vr);
549 return;
552 zero = build_int_cst (type, 0);
553 set_value_range (vr, VR_RANGE, zero,
554 (overflow_infinity
555 ? positive_overflow_infinity (type)
556 : TYPE_MAX_VALUE (type)),
557 vr->equiv);
560 /* Set value range VR to a non-NULL range of type TYPE. */
562 static inline void
563 set_value_range_to_nonnull (value_range *vr, tree type)
565 tree zero = build_int_cst (type, 0);
566 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
570 /* Set value range VR to a NULL range of type TYPE. */
572 static inline void
573 set_value_range_to_null (value_range *vr, tree type)
575 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
579 /* Set value range VR to a range of a truthvalue of type TYPE. */
581 static inline void
582 set_value_range_to_truthvalue (value_range *vr, tree type)
584 if (TYPE_PRECISION (type) == 1)
585 set_value_range_to_varying (vr);
586 else
587 set_value_range (vr, VR_RANGE,
588 build_int_cst (type, 0), build_int_cst (type, 1),
589 vr->equiv);
593 /* If abs (min) < abs (max), set VR to [-max, max], if
594 abs (min) >= abs (max), set VR to [-min, min]. */
596 static void
597 abs_extent_range (value_range *vr, tree min, tree max)
599 int cmp;
601 gcc_assert (TREE_CODE (min) == INTEGER_CST);
602 gcc_assert (TREE_CODE (max) == INTEGER_CST);
603 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
604 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
605 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
606 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
607 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
609 set_value_range_to_varying (vr);
610 return;
612 cmp = compare_values (min, max);
613 if (cmp == -1)
614 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
615 else if (cmp == 0 || cmp == 1)
617 max = min;
618 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
620 else
622 set_value_range_to_varying (vr);
623 return;
625 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
629 /* Return value range information for VAR.
631 If we have no values ranges recorded (ie, VRP is not running), then
632 return NULL. Otherwise create an empty range if none existed for VAR. */
634 static value_range *
635 get_value_range (const_tree var)
637 static const value_range vr_const_varying
638 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
639 value_range *vr;
640 tree sym;
641 unsigned ver = SSA_NAME_VERSION (var);
643 /* If we have no recorded ranges, then return NULL. */
644 if (! vr_value)
645 return NULL;
647 /* If we query the range for a new SSA name return an unmodifiable VARYING.
648 We should get here at most from the substitute-and-fold stage which
649 will never try to change values. */
650 if (ver >= num_vr_values)
651 return CONST_CAST (value_range *, &vr_const_varying);
653 vr = vr_value[ver];
654 if (vr)
655 return vr;
657 /* After propagation finished do not allocate new value-ranges. */
658 if (values_propagated)
659 return CONST_CAST (value_range *, &vr_const_varying);
661 /* Create a default value range. */
662 vr_value[ver] = vr = vrp_value_range_pool.allocate ();
663 memset (vr, 0, sizeof (*vr));
665 /* Defer allocating the equivalence set. */
666 vr->equiv = NULL;
668 /* If VAR is a default definition of a parameter, the variable can
669 take any value in VAR's type. */
670 if (SSA_NAME_IS_DEFAULT_DEF (var))
672 sym = SSA_NAME_VAR (var);
673 if (TREE_CODE (sym) == PARM_DECL)
675 /* Try to use the "nonnull" attribute to create ~[0, 0]
676 anti-ranges for pointers. Note that this is only valid with
677 default definitions of PARM_DECLs. */
678 if (POINTER_TYPE_P (TREE_TYPE (sym))
679 && (nonnull_arg_p (sym)
680 || get_ptr_nonnull (var)))
681 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
682 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
684 wide_int min, max;
685 value_range_type rtype = get_range_info (var, &min, &max);
686 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
687 set_value_range (vr, rtype,
688 wide_int_to_tree (TREE_TYPE (var), min),
689 wide_int_to_tree (TREE_TYPE (var), max),
690 NULL);
691 else
692 set_value_range_to_varying (vr);
694 else
695 set_value_range_to_varying (vr);
697 else if (TREE_CODE (sym) == RESULT_DECL
698 && DECL_BY_REFERENCE (sym))
699 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
702 return vr;
705 /* Set value-ranges of all SSA names defined by STMT to varying. */
707 static void
708 set_defs_to_varying (gimple *stmt)
710 ssa_op_iter i;
711 tree def;
712 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
714 value_range *vr = get_value_range (def);
715 /* Avoid writing to vr_const_varying get_value_range may return. */
716 if (vr->type != VR_VARYING)
717 set_value_range_to_varying (vr);
722 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
724 static inline bool
725 vrp_operand_equal_p (const_tree val1, const_tree val2)
727 if (val1 == val2)
728 return true;
729 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
730 return false;
731 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
734 /* Return true, if the bitmaps B1 and B2 are equal. */
736 static inline bool
737 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
739 return (b1 == b2
740 || ((!b1 || bitmap_empty_p (b1))
741 && (!b2 || bitmap_empty_p (b2)))
742 || (b1 && b2
743 && bitmap_equal_p (b1, b2)));
746 /* Update the value range and equivalence set for variable VAR to
747 NEW_VR. Return true if NEW_VR is different from VAR's previous
748 value.
750 NOTE: This function assumes that NEW_VR is a temporary value range
751 object created for the sole purpose of updating VAR's range. The
752 storage used by the equivalence set from NEW_VR will be freed by
753 this function. Do not call update_value_range when NEW_VR
754 is the range object associated with another SSA name. */
756 static inline bool
757 update_value_range (const_tree var, value_range *new_vr)
759 value_range *old_vr;
760 bool is_new;
762 /* If there is a value-range on the SSA name from earlier analysis
763 factor that in. */
764 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
766 wide_int min, max;
767 value_range_type rtype = get_range_info (var, &min, &max);
768 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
770 tree nr_min, nr_max;
771 /* Range info on SSA names doesn't carry overflow information
772 so make sure to preserve the overflow bit on the lattice. */
773 if (rtype == VR_RANGE
774 && needs_overflow_infinity (TREE_TYPE (var))
775 && (new_vr->type == VR_VARYING
776 || (new_vr->type == VR_RANGE
777 && is_negative_overflow_infinity (new_vr->min)))
778 && wi::eq_p (vrp_val_min (TREE_TYPE (var)), min))
779 nr_min = negative_overflow_infinity (TREE_TYPE (var));
780 else
781 nr_min = wide_int_to_tree (TREE_TYPE (var), min);
782 if (rtype == VR_RANGE
783 && needs_overflow_infinity (TREE_TYPE (var))
784 && (new_vr->type == VR_VARYING
785 || (new_vr->type == VR_RANGE
786 && is_positive_overflow_infinity (new_vr->max)))
787 && wi::eq_p (vrp_val_max (TREE_TYPE (var)), max))
788 nr_max = positive_overflow_infinity (TREE_TYPE (var));
789 else
790 nr_max = wide_int_to_tree (TREE_TYPE (var), max);
791 value_range nr = VR_INITIALIZER;
792 set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
793 vrp_intersect_ranges (new_vr, &nr);
797 /* Update the value range, if necessary. */
798 old_vr = get_value_range (var);
799 is_new = old_vr->type != new_vr->type
800 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
801 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
802 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
804 if (is_new)
806 /* Do not allow transitions up the lattice. The following
807 is slightly more awkward than just new_vr->type < old_vr->type
808 because VR_RANGE and VR_ANTI_RANGE need to be considered
809 the same. We may not have is_new when transitioning to
810 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
811 called. */
812 if (new_vr->type == VR_UNDEFINED)
814 BITMAP_FREE (new_vr->equiv);
815 set_value_range_to_varying (old_vr);
816 set_value_range_to_varying (new_vr);
817 return true;
819 else
820 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
821 new_vr->equiv);
824 BITMAP_FREE (new_vr->equiv);
826 return is_new;
830 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
831 point where equivalence processing can be turned on/off. */
833 static void
834 add_equivalence (bitmap *equiv, const_tree var)
836 unsigned ver = SSA_NAME_VERSION (var);
837 value_range *vr = get_value_range (var);
839 if (*equiv == NULL)
840 *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
841 bitmap_set_bit (*equiv, ver);
842 if (vr && vr->equiv)
843 bitmap_ior_into (*equiv, vr->equiv);
847 /* Return true if VR is ~[0, 0]. */
849 static inline bool
850 range_is_nonnull (value_range *vr)
852 return vr->type == VR_ANTI_RANGE
853 && integer_zerop (vr->min)
854 && integer_zerop (vr->max);
858 /* Return true if VR is [0, 0]. */
860 static inline bool
861 range_is_null (value_range *vr)
863 return vr->type == VR_RANGE
864 && integer_zerop (vr->min)
865 && integer_zerop (vr->max);
868 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
869 a singleton. */
871 static inline bool
872 range_int_cst_p (value_range *vr)
874 return (vr->type == VR_RANGE
875 && TREE_CODE (vr->max) == INTEGER_CST
876 && TREE_CODE (vr->min) == INTEGER_CST);
879 /* Return true if VR is a INTEGER_CST singleton. */
881 static inline bool
882 range_int_cst_singleton_p (value_range *vr)
884 return (range_int_cst_p (vr)
885 && !is_overflow_infinity (vr->min)
886 && !is_overflow_infinity (vr->max)
887 && tree_int_cst_equal (vr->min, vr->max));
890 /* Return true if value range VR involves at least one symbol. */
892 static inline bool
893 symbolic_range_p (value_range *vr)
895 return (!is_gimple_min_invariant (vr->min)
896 || !is_gimple_min_invariant (vr->max));
899 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
900 otherwise. We only handle additive operations and set NEG to true if the
901 symbol is negated and INV to the invariant part, if any. */
903 static tree
904 get_single_symbol (tree t, bool *neg, tree *inv)
906 bool neg_;
907 tree inv_;
909 *inv = NULL_TREE;
910 *neg = false;
912 if (TREE_CODE (t) == PLUS_EXPR
913 || TREE_CODE (t) == POINTER_PLUS_EXPR
914 || TREE_CODE (t) == MINUS_EXPR)
916 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
918 neg_ = (TREE_CODE (t) == MINUS_EXPR);
919 inv_ = TREE_OPERAND (t, 0);
920 t = TREE_OPERAND (t, 1);
922 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
924 neg_ = false;
925 inv_ = TREE_OPERAND (t, 1);
926 t = TREE_OPERAND (t, 0);
928 else
929 return NULL_TREE;
931 else
933 neg_ = false;
934 inv_ = NULL_TREE;
937 if (TREE_CODE (t) == NEGATE_EXPR)
939 t = TREE_OPERAND (t, 0);
940 neg_ = !neg_;
943 if (TREE_CODE (t) != SSA_NAME)
944 return NULL_TREE;
946 *neg = neg_;
947 *inv = inv_;
948 return t;
951 /* The reverse operation: build a symbolic expression with TYPE
952 from symbol SYM, negated according to NEG, and invariant INV. */
954 static tree
955 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
957 const bool pointer_p = POINTER_TYPE_P (type);
958 tree t = sym;
960 if (neg)
961 t = build1 (NEGATE_EXPR, type, t);
963 if (integer_zerop (inv))
964 return t;
966 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
969 /* Return true if value range VR involves exactly one symbol SYM. */
971 static bool
972 symbolic_range_based_on_p (value_range *vr, const_tree sym)
974 bool neg, min_has_symbol, max_has_symbol;
975 tree inv;
977 if (is_gimple_min_invariant (vr->min))
978 min_has_symbol = false;
979 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
980 min_has_symbol = true;
981 else
982 return false;
984 if (is_gimple_min_invariant (vr->max))
985 max_has_symbol = false;
986 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
987 max_has_symbol = true;
988 else
989 return false;
991 return (min_has_symbol || max_has_symbol);
994 /* Return true if value range VR uses an overflow infinity. */
996 static inline bool
997 overflow_infinity_range_p (value_range *vr)
999 return (vr->type == VR_RANGE
1000 && (is_overflow_infinity (vr->min)
1001 || is_overflow_infinity (vr->max)));
1004 /* Return false if we can not make a valid comparison based on VR;
1005 this will be the case if it uses an overflow infinity and overflow
1006 is not undefined (i.e., -fno-strict-overflow is in effect).
1007 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
1008 uses an overflow infinity. */
1010 static bool
1011 usable_range_p (value_range *vr, bool *strict_overflow_p)
1013 gcc_assert (vr->type == VR_RANGE);
1014 if (is_overflow_infinity (vr->min))
1016 *strict_overflow_p = true;
1017 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
1018 return false;
1020 if (is_overflow_infinity (vr->max))
1022 *strict_overflow_p = true;
1023 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1024 return false;
1026 return true;
1029 /* Return true if the result of assignment STMT is know to be non-zero.
1030 If the return value is based on the assumption that signed overflow is
1031 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1032 *STRICT_OVERFLOW_P.*/
1034 static bool
1035 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1037 enum tree_code code = gimple_assign_rhs_code (stmt);
1038 switch (get_gimple_rhs_class (code))
1040 case GIMPLE_UNARY_RHS:
1041 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1042 gimple_expr_type (stmt),
1043 gimple_assign_rhs1 (stmt),
1044 strict_overflow_p);
1045 case GIMPLE_BINARY_RHS:
1046 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1047 gimple_expr_type (stmt),
1048 gimple_assign_rhs1 (stmt),
1049 gimple_assign_rhs2 (stmt),
1050 strict_overflow_p);
1051 case GIMPLE_TERNARY_RHS:
1052 return false;
1053 case GIMPLE_SINGLE_RHS:
1054 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1055 strict_overflow_p);
1056 case GIMPLE_INVALID_RHS:
1057 gcc_unreachable ();
1058 default:
1059 gcc_unreachable ();
1063 /* Return true if STMT is known to compute a non-zero value.
1064 If the return value is based on the assumption that signed overflow is
1065 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1066 *STRICT_OVERFLOW_P.*/
1068 static bool
1069 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1071 switch (gimple_code (stmt))
1073 case GIMPLE_ASSIGN:
1074 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1075 case GIMPLE_CALL:
1077 tree fndecl = gimple_call_fndecl (stmt);
1078 if (!fndecl) return false;
1079 if (flag_delete_null_pointer_checks && !flag_check_new
1080 && DECL_IS_OPERATOR_NEW (fndecl)
1081 && !TREE_NOTHROW (fndecl))
1082 return true;
1083 /* References are always non-NULL. */
1084 if (flag_delete_null_pointer_checks
1085 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1086 return true;
1087 if (flag_delete_null_pointer_checks &&
1088 lookup_attribute ("returns_nonnull",
1089 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1090 return true;
1092 gcall *call_stmt = as_a<gcall *> (stmt);
1093 unsigned rf = gimple_call_return_flags (call_stmt);
1094 if (rf & ERF_RETURNS_ARG)
1096 unsigned argnum = rf & ERF_RETURN_ARG_MASK;
1097 if (argnum < gimple_call_num_args (call_stmt))
1099 tree arg = gimple_call_arg (call_stmt, argnum);
1100 if (SSA_VAR_P (arg)
1101 && infer_nonnull_range_by_attribute (stmt, arg))
1102 return true;
1105 return gimple_alloca_call_p (stmt);
1107 default:
1108 gcc_unreachable ();
1112 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1113 obtained so far. */
1115 static bool
1116 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1118 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1119 return true;
1121 /* If we have an expression of the form &X->a, then the expression
1122 is nonnull if X is nonnull. */
1123 if (is_gimple_assign (stmt)
1124 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1126 tree expr = gimple_assign_rhs1 (stmt);
1127 tree base = get_base_address (TREE_OPERAND (expr, 0));
1129 if (base != NULL_TREE
1130 && TREE_CODE (base) == MEM_REF
1131 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1133 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
1134 if (range_is_nonnull (vr))
1135 return true;
1139 return false;
1142 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1143 a gimple invariant, or SSA_NAME +- CST. */
1145 static bool
1146 valid_value_p (tree expr)
1148 if (TREE_CODE (expr) == SSA_NAME)
1149 return true;
1151 if (TREE_CODE (expr) == PLUS_EXPR
1152 || TREE_CODE (expr) == MINUS_EXPR)
1153 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1154 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1156 return is_gimple_min_invariant (expr);
1159 /* Return
1160 1 if VAL < VAL2
1161 0 if !(VAL < VAL2)
1162 -2 if those are incomparable. */
1163 static inline int
1164 operand_less_p (tree val, tree val2)
1166 /* LT is folded faster than GE and others. Inline the common case. */
1167 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1169 if (! is_positive_overflow_infinity (val2))
1170 return tree_int_cst_lt (val, val2);
1172 else
1174 tree tcmp;
1176 fold_defer_overflow_warnings ();
1178 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1180 fold_undefer_and_ignore_overflow_warnings ();
1182 if (!tcmp
1183 || TREE_CODE (tcmp) != INTEGER_CST)
1184 return -2;
1186 if (!integer_zerop (tcmp))
1187 return 1;
1190 /* val >= val2, not considering overflow infinity. */
1191 if (is_negative_overflow_infinity (val))
1192 return is_negative_overflow_infinity (val2) ? 0 : 1;
1193 else if (is_positive_overflow_infinity (val2))
1194 return is_positive_overflow_infinity (val) ? 0 : 1;
1196 return 0;
1199 /* Compare two values VAL1 and VAL2. Return
1201 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1202 -1 if VAL1 < VAL2,
1203 0 if VAL1 == VAL2,
1204 +1 if VAL1 > VAL2, and
1205 +2 if VAL1 != VAL2
1207 This is similar to tree_int_cst_compare but supports pointer values
1208 and values that cannot be compared at compile time.
1210 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1211 true if the return value is only valid if we assume that signed
1212 overflow is undefined. */
1214 static int
1215 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1217 if (val1 == val2)
1218 return 0;
1220 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1221 both integers. */
1222 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1223 == POINTER_TYPE_P (TREE_TYPE (val2)));
1225 /* Convert the two values into the same type. This is needed because
1226 sizetype causes sign extension even for unsigned types. */
1227 val2 = fold_convert (TREE_TYPE (val1), val2);
1228 STRIP_USELESS_TYPE_CONVERSION (val2);
1230 const bool overflow_undefined
1231 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1232 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1233 tree inv1, inv2;
1234 bool neg1, neg2;
1235 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1236 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1238 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1239 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1240 if (sym1 && sym2)
1242 /* Both values must use the same name with the same sign. */
1243 if (sym1 != sym2 || neg1 != neg2)
1244 return -2;
1246 /* [-]NAME + CST == [-]NAME + CST. */
1247 if (inv1 == inv2)
1248 return 0;
1250 /* If overflow is defined we cannot simplify more. */
1251 if (!overflow_undefined)
1252 return -2;
1254 if (strict_overflow_p != NULL
1255 && (!inv1 || !TREE_NO_WARNING (val1))
1256 && (!inv2 || !TREE_NO_WARNING (val2)))
1257 *strict_overflow_p = true;
1259 if (!inv1)
1260 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1261 if (!inv2)
1262 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1264 return compare_values_warnv (inv1, inv2, strict_overflow_p);
1267 const bool cst1 = is_gimple_min_invariant (val1);
1268 const bool cst2 = is_gimple_min_invariant (val2);
1270 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1271 it might be possible to say something depending on the constants. */
1272 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1274 if (!overflow_undefined)
1275 return -2;
1277 if (strict_overflow_p != NULL
1278 && (!sym1 || !TREE_NO_WARNING (val1))
1279 && (!sym2 || !TREE_NO_WARNING (val2)))
1280 *strict_overflow_p = true;
1282 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1283 tree cst = cst1 ? val1 : val2;
1284 tree inv = cst1 ? inv2 : inv1;
1286 /* Compute the difference between the constants. If it overflows or
1287 underflows, this means that we can trivially compare the NAME with
1288 it and, consequently, the two values with each other. */
1289 wide_int diff = wi::sub (cst, inv);
1290 if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
1292 const int res = wi::cmp (cst, inv, sgn);
1293 return cst1 ? res : -res;
1296 return -2;
1299 /* We cannot say anything more for non-constants. */
1300 if (!cst1 || !cst2)
1301 return -2;
1303 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1305 /* We cannot compare overflowed values, except for overflow
1306 infinities. */
1307 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1309 if (strict_overflow_p != NULL)
1310 *strict_overflow_p = true;
1311 if (is_negative_overflow_infinity (val1))
1312 return is_negative_overflow_infinity (val2) ? 0 : -1;
1313 else if (is_negative_overflow_infinity (val2))
1314 return 1;
1315 else if (is_positive_overflow_infinity (val1))
1316 return is_positive_overflow_infinity (val2) ? 0 : 1;
1317 else if (is_positive_overflow_infinity (val2))
1318 return -1;
1319 return -2;
1322 return tree_int_cst_compare (val1, val2);
1324 else
1326 tree t;
1328 /* First see if VAL1 and VAL2 are not the same. */
1329 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1330 return 0;
1332 /* If VAL1 is a lower address than VAL2, return -1. */
1333 if (operand_less_p (val1, val2) == 1)
1334 return -1;
1336 /* If VAL1 is a higher address than VAL2, return +1. */
1337 if (operand_less_p (val2, val1) == 1)
1338 return 1;
1340 /* If VAL1 is different than VAL2, return +2.
1341 For integer constants we either have already returned -1 or 1
1342 or they are equivalent. We still might succeed in proving
1343 something about non-trivial operands. */
1344 if (TREE_CODE (val1) != INTEGER_CST
1345 || TREE_CODE (val2) != INTEGER_CST)
1347 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1348 if (t && integer_onep (t))
1349 return 2;
1352 return -2;
1356 /* Compare values like compare_values_warnv, but treat comparisons of
1357 nonconstants which rely on undefined overflow as incomparable. */
1359 static int
1360 compare_values (tree val1, tree val2)
1362 bool sop;
1363 int ret;
1365 sop = false;
1366 ret = compare_values_warnv (val1, val2, &sop);
1367 if (sop
1368 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1369 ret = -2;
1370 return ret;
1374 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1375 0 if VAL is not inside [MIN, MAX],
1376 -2 if we cannot tell either way.
1378 Benchmark compile/20001226-1.c compilation time after changing this
1379 function. */
1381 static inline int
1382 value_inside_range (tree val, tree min, tree max)
1384 int cmp1, cmp2;
1386 cmp1 = operand_less_p (val, min);
1387 if (cmp1 == -2)
1388 return -2;
1389 if (cmp1 == 1)
1390 return 0;
1392 cmp2 = operand_less_p (max, val);
1393 if (cmp2 == -2)
1394 return -2;
1396 return !cmp2;
1400 /* Return true if value ranges VR0 and VR1 have a non-empty
1401 intersection.
1403 Benchmark compile/20001226-1.c compilation time after changing this
1404 function.
1407 static inline bool
1408 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1410 /* The value ranges do not intersect if the maximum of the first range is
1411 less than the minimum of the second range or vice versa.
1412 When those relations are unknown, we can't do any better. */
1413 if (operand_less_p (vr0->max, vr1->min) != 0)
1414 return false;
1415 if (operand_less_p (vr1->max, vr0->min) != 0)
1416 return false;
1417 return true;
1421 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1422 include the value zero, -2 if we cannot tell. */
1424 static inline int
1425 range_includes_zero_p (tree min, tree max)
1427 tree zero = build_int_cst (TREE_TYPE (min), 0);
1428 return value_inside_range (zero, min, max);
1431 /* Return true if *VR is know to only contain nonnegative values. */
1433 static inline bool
1434 value_range_nonnegative_p (value_range *vr)
1436 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1437 which would return a useful value should be encoded as a
1438 VR_RANGE. */
1439 if (vr->type == VR_RANGE)
1441 int result = compare_values (vr->min, integer_zero_node);
1442 return (result == 0 || result == 1);
1445 return false;
1448 /* If *VR has a value rante that is a single constant value return that,
1449 otherwise return NULL_TREE. */
1451 static tree
1452 value_range_constant_singleton (value_range *vr)
1454 if (vr->type == VR_RANGE
1455 && vrp_operand_equal_p (vr->min, vr->max)
1456 && is_gimple_min_invariant (vr->min))
1457 return vr->min;
1459 return NULL_TREE;
1462 /* If OP has a value range with a single constant value return that,
1463 otherwise return NULL_TREE. This returns OP itself if OP is a
1464 constant. */
1466 static tree
1467 op_with_constant_singleton_value_range (tree op)
1469 if (is_gimple_min_invariant (op))
1470 return op;
1472 if (TREE_CODE (op) != SSA_NAME)
1473 return NULL_TREE;
1475 return value_range_constant_singleton (get_value_range (op));
1478 /* Return true if op is in a boolean [0, 1] value-range. */
1480 static bool
1481 op_with_boolean_value_range_p (tree op)
1483 value_range *vr;
1485 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1486 return true;
1488 if (integer_zerop (op)
1489 || integer_onep (op))
1490 return true;
1492 if (TREE_CODE (op) != SSA_NAME)
1493 return false;
1495 vr = get_value_range (op);
1496 return (vr->type == VR_RANGE
1497 && integer_zerop (vr->min)
1498 && integer_onep (vr->max));
1501 /* Extract value range information for VAR when (OP COND_CODE LIMIT) is
1502 true and store it in *VR_P. */
1504 static void
1505 extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code,
1506 tree op, tree limit,
1507 value_range *vr_p)
1509 tree min, max, type;
1510 value_range *limit_vr;
1511 limit = avoid_overflow_infinity (limit);
1512 type = TREE_TYPE (var);
1513 gcc_assert (limit != var);
1515 /* For pointer arithmetic, we only keep track of pointer equality
1516 and inequality. */
1517 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1519 set_value_range_to_varying (vr_p);
1520 return;
1523 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1524 try to use LIMIT's range to avoid creating symbolic ranges
1525 unnecessarily. */
1526 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1528 /* LIMIT's range is only interesting if it has any useful information. */
1529 if (! limit_vr
1530 || limit_vr->type == VR_UNDEFINED
1531 || limit_vr->type == VR_VARYING
1532 || (symbolic_range_p (limit_vr)
1533 && ! (limit_vr->type == VR_RANGE
1534 && (limit_vr->min == limit_vr->max
1535 || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
1536 limit_vr = NULL;
1538 /* Initially, the new range has the same set of equivalences of
1539 VAR's range. This will be revised before returning the final
1540 value. Since assertions may be chained via mutually exclusive
1541 predicates, we will need to trim the set of equivalences before
1542 we are done. */
1543 gcc_assert (vr_p->equiv == NULL);
1544 add_equivalence (&vr_p->equiv, var);
1546 /* Extract a new range based on the asserted comparison for VAR and
1547 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1548 will only use it for equality comparisons (EQ_EXPR). For any
1549 other kind of assertion, we cannot derive a range from LIMIT's
1550 anti-range that can be used to describe the new range. For
1551 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1552 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1553 no single range for x_2 that could describe LE_EXPR, so we might
1554 as well build the range [b_4, +INF] for it.
1555 One special case we handle is extracting a range from a
1556 range test encoded as (unsigned)var + CST <= limit. */
1557 if (TREE_CODE (op) == NOP_EXPR
1558 || TREE_CODE (op) == PLUS_EXPR)
1560 if (TREE_CODE (op) == PLUS_EXPR)
1562 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
1563 TREE_OPERAND (op, 1));
1564 max = int_const_binop (PLUS_EXPR, limit, min);
1565 op = TREE_OPERAND (op, 0);
1567 else
1569 min = build_int_cst (TREE_TYPE (var), 0);
1570 max = limit;
1573 /* Make sure to not set TREE_OVERFLOW on the final type
1574 conversion. We are willingly interpreting large positive
1575 unsigned values as negative signed values here. */
1576 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1577 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1579 /* We can transform a max, min range to an anti-range or
1580 vice-versa. Use set_and_canonicalize_value_range which does
1581 this for us. */
1582 if (cond_code == LE_EXPR)
1583 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1584 min, max, vr_p->equiv);
1585 else if (cond_code == GT_EXPR)
1586 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1587 min, max, vr_p->equiv);
1588 else
1589 gcc_unreachable ();
1591 else if (cond_code == EQ_EXPR)
1593 enum value_range_type range_type;
1595 if (limit_vr)
1597 range_type = limit_vr->type;
1598 min = limit_vr->min;
1599 max = limit_vr->max;
1601 else
1603 range_type = VR_RANGE;
1604 min = limit;
1605 max = limit;
1608 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1610 /* When asserting the equality VAR == LIMIT and LIMIT is another
1611 SSA name, the new range will also inherit the equivalence set
1612 from LIMIT. */
1613 if (TREE_CODE (limit) == SSA_NAME)
1614 add_equivalence (&vr_p->equiv, limit);
1616 else if (cond_code == NE_EXPR)
1618 /* As described above, when LIMIT's range is an anti-range and
1619 this assertion is an inequality (NE_EXPR), then we cannot
1620 derive anything from the anti-range. For instance, if
1621 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1622 not imply that VAR's range is [0, 0]. So, in the case of
1623 anti-ranges, we just assert the inequality using LIMIT and
1624 not its anti-range.
1626 If LIMIT_VR is a range, we can only use it to build a new
1627 anti-range if LIMIT_VR is a single-valued range. For
1628 instance, if LIMIT_VR is [0, 1], the predicate
1629 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1630 Rather, it means that for value 0 VAR should be ~[0, 0]
1631 and for value 1, VAR should be ~[1, 1]. We cannot
1632 represent these ranges.
1634 The only situation in which we can build a valid
1635 anti-range is when LIMIT_VR is a single-valued range
1636 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1637 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1638 if (limit_vr
1639 && limit_vr->type == VR_RANGE
1640 && compare_values (limit_vr->min, limit_vr->max) == 0)
1642 min = limit_vr->min;
1643 max = limit_vr->max;
1645 else
1647 /* In any other case, we cannot use LIMIT's range to build a
1648 valid anti-range. */
1649 min = max = limit;
1652 /* If MIN and MAX cover the whole range for their type, then
1653 just use the original LIMIT. */
1654 if (INTEGRAL_TYPE_P (type)
1655 && vrp_val_is_min (min)
1656 && vrp_val_is_max (max))
1657 min = max = limit;
1659 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1660 min, max, vr_p->equiv);
1662 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1664 min = TYPE_MIN_VALUE (type);
1666 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1667 max = limit;
1668 else
1670 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1671 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1672 LT_EXPR. */
1673 max = limit_vr->max;
1676 /* If the maximum value forces us to be out of bounds, simply punt.
1677 It would be pointless to try and do anything more since this
1678 all should be optimized away above us. */
1679 if ((cond_code == LT_EXPR
1680 && compare_values (max, min) == 0)
1681 || is_overflow_infinity (max))
1682 set_value_range_to_varying (vr_p);
1683 else
1685 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1686 if (cond_code == LT_EXPR)
1688 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1689 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1690 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1691 build_int_cst (TREE_TYPE (max), -1));
1692 else
1693 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1694 build_int_cst (TREE_TYPE (max), 1));
1695 if (EXPR_P (max))
1696 TREE_NO_WARNING (max) = 1;
1699 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1702 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1704 max = TYPE_MAX_VALUE (type);
1706 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1707 min = limit;
1708 else
1710 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1711 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1712 GT_EXPR. */
1713 min = limit_vr->min;
1716 /* If the minimum value forces us to be out of bounds, simply punt.
1717 It would be pointless to try and do anything more since this
1718 all should be optimized away above us. */
1719 if ((cond_code == GT_EXPR
1720 && compare_values (min, max) == 0)
1721 || is_overflow_infinity (min))
1722 set_value_range_to_varying (vr_p);
1723 else
1725 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1726 if (cond_code == GT_EXPR)
1728 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1729 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1730 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1731 build_int_cst (TREE_TYPE (min), -1));
1732 else
1733 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1734 build_int_cst (TREE_TYPE (min), 1));
1735 if (EXPR_P (min))
1736 TREE_NO_WARNING (min) = 1;
1739 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1742 else
1743 gcc_unreachable ();
1745 /* Finally intersect the new range with what we already know about var. */
1746 vrp_intersect_ranges (vr_p, get_value_range (var));
1749 /* Extract value range information from an ASSERT_EXPR EXPR and store
1750 it in *VR_P. */
1752 static void
1753 extract_range_from_assert (value_range *vr_p, tree expr)
1755 tree var = ASSERT_EXPR_VAR (expr);
1756 tree cond = ASSERT_EXPR_COND (expr);
1757 tree limit, op;
1758 enum tree_code cond_code;
1759 gcc_assert (COMPARISON_CLASS_P (cond));
1761 /* Find VAR in the ASSERT_EXPR conditional. */
1762 if (var == TREE_OPERAND (cond, 0)
1763 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1764 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1766 /* If the predicate is of the form VAR COMP LIMIT, then we just
1767 take LIMIT from the RHS and use the same comparison code. */
1768 cond_code = TREE_CODE (cond);
1769 limit = TREE_OPERAND (cond, 1);
1770 op = TREE_OPERAND (cond, 0);
1772 else
1774 /* If the predicate is of the form LIMIT COMP VAR, then we need
1775 to flip around the comparison code to create the proper range
1776 for VAR. */
1777 cond_code = swap_tree_comparison (TREE_CODE (cond));
1778 limit = TREE_OPERAND (cond, 0);
1779 op = TREE_OPERAND (cond, 1);
1781 extract_range_for_var_from_comparison_expr (var, cond_code, op,
1782 limit, vr_p);
1785 /* Extract range information from SSA name VAR and store it in VR. If
1786 VAR has an interesting range, use it. Otherwise, create the
1787 range [VAR, VAR] and return it. This is useful in situations where
1788 we may have conditionals testing values of VARYING names. For
1789 instance,
1791 x_3 = y_5;
1792 if (x_3 > y_5)
1795 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1796 always false. */
1798 static void
1799 extract_range_from_ssa_name (value_range *vr, tree var)
1801 value_range *var_vr = get_value_range (var);
1803 if (var_vr->type != VR_VARYING)
1804 copy_value_range (vr, var_vr);
1805 else
1806 set_value_range (vr, VR_RANGE, var, var, NULL);
1808 add_equivalence (&vr->equiv, var);
1812 /* Wrapper around int_const_binop. If the operation overflows and we
1813 are not using wrapping arithmetic, then adjust the result to be
1814 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1815 NULL_TREE if we need to use an overflow infinity representation but
1816 the type does not support it. */
1818 static tree
1819 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1821 tree res;
1823 res = int_const_binop (code, val1, val2);
1825 /* If we are using unsigned arithmetic, operate symbolically
1826 on -INF and +INF as int_const_binop only handles signed overflow. */
1827 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1829 int checkz = compare_values (res, val1);
1830 bool overflow = false;
1832 /* Ensure that res = val1 [+*] val2 >= val1
1833 or that res = val1 - val2 <= val1. */
1834 if ((code == PLUS_EXPR
1835 && !(checkz == 1 || checkz == 0))
1836 || (code == MINUS_EXPR
1837 && !(checkz == 0 || checkz == -1)))
1839 overflow = true;
1841 /* Checking for multiplication overflow is done by dividing the
1842 output of the multiplication by the first input of the
1843 multiplication. If the result of that division operation is
1844 not equal to the second input of the multiplication, then the
1845 multiplication overflowed. */
1846 else if (code == MULT_EXPR && !integer_zerop (val1))
1848 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1849 res,
1850 val1);
1851 int check = compare_values (tmp, val2);
1853 if (check != 0)
1854 overflow = true;
1857 if (overflow)
1859 res = copy_node (res);
1860 TREE_OVERFLOW (res) = 1;
1864 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1865 /* If the singed operation wraps then int_const_binop has done
1866 everything we want. */
1868 /* Signed division of -1/0 overflows and by the time it gets here
1869 returns NULL_TREE. */
1870 else if (!res)
1871 return NULL_TREE;
1872 else if ((TREE_OVERFLOW (res)
1873 && !TREE_OVERFLOW (val1)
1874 && !TREE_OVERFLOW (val2))
1875 || is_overflow_infinity (val1)
1876 || is_overflow_infinity (val2))
1878 /* If the operation overflowed but neither VAL1 nor VAL2 are
1879 overflown, return -INF or +INF depending on the operation
1880 and the combination of signs of the operands. */
1881 int sgn1 = tree_int_cst_sgn (val1);
1882 int sgn2 = tree_int_cst_sgn (val2);
1884 if (needs_overflow_infinity (TREE_TYPE (res))
1885 && !supports_overflow_infinity (TREE_TYPE (res)))
1886 return NULL_TREE;
1888 /* We have to punt on adding infinities of different signs,
1889 since we can't tell what the sign of the result should be.
1890 Likewise for subtracting infinities of the same sign. */
1891 if (((code == PLUS_EXPR && sgn1 != sgn2)
1892 || (code == MINUS_EXPR && sgn1 == sgn2))
1893 && is_overflow_infinity (val1)
1894 && is_overflow_infinity (val2))
1895 return NULL_TREE;
1897 /* Don't try to handle division or shifting of infinities. */
1898 if ((code == TRUNC_DIV_EXPR
1899 || code == FLOOR_DIV_EXPR
1900 || code == CEIL_DIV_EXPR
1901 || code == EXACT_DIV_EXPR
1902 || code == ROUND_DIV_EXPR
1903 || code == RSHIFT_EXPR)
1904 && (is_overflow_infinity (val1)
1905 || is_overflow_infinity (val2)))
1906 return NULL_TREE;
1908 /* Notice that we only need to handle the restricted set of
1909 operations handled by extract_range_from_binary_expr.
1910 Among them, only multiplication, addition and subtraction
1911 can yield overflow without overflown operands because we
1912 are working with integral types only... except in the
1913 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1914 for division too. */
1916 /* For multiplication, the sign of the overflow is given
1917 by the comparison of the signs of the operands. */
1918 if ((code == MULT_EXPR && sgn1 == sgn2)
1919 /* For addition, the operands must be of the same sign
1920 to yield an overflow. Its sign is therefore that
1921 of one of the operands, for example the first. For
1922 infinite operands X + -INF is negative, not positive. */
1923 || (code == PLUS_EXPR
1924 && (sgn1 >= 0
1925 ? !is_negative_overflow_infinity (val2)
1926 : is_positive_overflow_infinity (val2)))
1927 /* For subtraction, non-infinite operands must be of
1928 different signs to yield an overflow. Its sign is
1929 therefore that of the first operand or the opposite of
1930 that of the second operand. A first operand of 0 counts
1931 as positive here, for the corner case 0 - (-INF), which
1932 overflows, but must yield +INF. For infinite operands 0
1933 - INF is negative, not positive. */
1934 || (code == MINUS_EXPR
1935 && (sgn1 >= 0
1936 ? !is_positive_overflow_infinity (val2)
1937 : is_negative_overflow_infinity (val2)))
1938 /* We only get in here with positive shift count, so the
1939 overflow direction is the same as the sign of val1.
1940 Actually rshift does not overflow at all, but we only
1941 handle the case of shifting overflowed -INF and +INF. */
1942 || (code == RSHIFT_EXPR
1943 && sgn1 >= 0)
1944 /* For division, the only case is -INF / -1 = +INF. */
1945 || code == TRUNC_DIV_EXPR
1946 || code == FLOOR_DIV_EXPR
1947 || code == CEIL_DIV_EXPR
1948 || code == EXACT_DIV_EXPR
1949 || code == ROUND_DIV_EXPR)
1950 return (needs_overflow_infinity (TREE_TYPE (res))
1951 ? positive_overflow_infinity (TREE_TYPE (res))
1952 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1953 else
1954 return (needs_overflow_infinity (TREE_TYPE (res))
1955 ? negative_overflow_infinity (TREE_TYPE (res))
1956 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1959 return res;
1963 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1964 bitmask if some bit is unset, it means for all numbers in the range
1965 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1966 bitmask if some bit is set, it means for all numbers in the range
1967 the bit is 1, otherwise it might be 0 or 1. */
1969 static bool
1970 zero_nonzero_bits_from_vr (const tree expr_type,
1971 value_range *vr,
1972 wide_int *may_be_nonzero,
1973 wide_int *must_be_nonzero)
1975 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1976 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1977 if (!range_int_cst_p (vr)
1978 || is_overflow_infinity (vr->min)
1979 || is_overflow_infinity (vr->max))
1980 return false;
1982 if (range_int_cst_singleton_p (vr))
1984 *may_be_nonzero = vr->min;
1985 *must_be_nonzero = *may_be_nonzero;
1987 else if (tree_int_cst_sgn (vr->min) >= 0
1988 || tree_int_cst_sgn (vr->max) < 0)
1990 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1991 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1992 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1993 if (xor_mask != 0)
1995 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1996 may_be_nonzero->get_precision ());
1997 *may_be_nonzero = *may_be_nonzero | mask;
1998 *must_be_nonzero = must_be_nonzero->and_not (mask);
2002 return true;
2005 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2006 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2007 false otherwise. If *AR can be represented with a single range
2008 *VR1 will be VR_UNDEFINED. */
2010 static bool
2011 ranges_from_anti_range (value_range *ar,
2012 value_range *vr0, value_range *vr1)
2014 tree type = TREE_TYPE (ar->min);
2016 vr0->type = VR_UNDEFINED;
2017 vr1->type = VR_UNDEFINED;
2019 if (ar->type != VR_ANTI_RANGE
2020 || TREE_CODE (ar->min) != INTEGER_CST
2021 || TREE_CODE (ar->max) != INTEGER_CST
2022 || !vrp_val_min (type)
2023 || !vrp_val_max (type))
2024 return false;
2026 if (!vrp_val_is_min (ar->min))
2028 vr0->type = VR_RANGE;
2029 vr0->min = vrp_val_min (type);
2030 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
2032 if (!vrp_val_is_max (ar->max))
2034 vr1->type = VR_RANGE;
2035 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
2036 vr1->max = vrp_val_max (type);
2038 if (vr0->type == VR_UNDEFINED)
2040 *vr0 = *vr1;
2041 vr1->type = VR_UNDEFINED;
2044 return vr0->type != VR_UNDEFINED;
2047 /* Helper to extract a value-range *VR for a multiplicative operation
2048 *VR0 CODE *VR1. */
2050 static void
2051 extract_range_from_multiplicative_op_1 (value_range *vr,
2052 enum tree_code code,
2053 value_range *vr0, value_range *vr1)
2055 enum value_range_type type;
2056 tree val[4];
2057 size_t i;
2058 tree min, max;
2059 bool sop;
2060 int cmp;
2062 /* Multiplications, divisions and shifts are a bit tricky to handle,
2063 depending on the mix of signs we have in the two ranges, we
2064 need to operate on different values to get the minimum and
2065 maximum values for the new range. One approach is to figure
2066 out all the variations of range combinations and do the
2067 operations.
2069 However, this involves several calls to compare_values and it
2070 is pretty convoluted. It's simpler to do the 4 operations
2071 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2072 MAX1) and then figure the smallest and largest values to form
2073 the new range. */
2074 gcc_assert (code == MULT_EXPR
2075 || code == TRUNC_DIV_EXPR
2076 || code == FLOOR_DIV_EXPR
2077 || code == CEIL_DIV_EXPR
2078 || code == EXACT_DIV_EXPR
2079 || code == ROUND_DIV_EXPR
2080 || code == RSHIFT_EXPR
2081 || code == LSHIFT_EXPR);
2082 gcc_assert ((vr0->type == VR_RANGE
2083 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2084 && vr0->type == vr1->type);
2086 type = vr0->type;
2088 /* Compute the 4 cross operations. */
2089 sop = false;
2090 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2091 if (val[0] == NULL_TREE)
2092 sop = true;
2094 if (vr1->max == vr1->min)
2095 val[1] = NULL_TREE;
2096 else
2098 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2099 if (val[1] == NULL_TREE)
2100 sop = true;
2103 if (vr0->max == vr0->min)
2104 val[2] = NULL_TREE;
2105 else
2107 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2108 if (val[2] == NULL_TREE)
2109 sop = true;
2112 if (vr0->min == vr0->max || vr1->min == vr1->max)
2113 val[3] = NULL_TREE;
2114 else
2116 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2117 if (val[3] == NULL_TREE)
2118 sop = true;
2121 if (sop)
2123 set_value_range_to_varying (vr);
2124 return;
2127 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2128 of VAL[i]. */
2129 min = val[0];
2130 max = val[0];
2131 for (i = 1; i < 4; i++)
2133 if (!is_gimple_min_invariant (min)
2134 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2135 || !is_gimple_min_invariant (max)
2136 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2137 break;
2139 if (val[i])
2141 if (!is_gimple_min_invariant (val[i])
2142 || (TREE_OVERFLOW (val[i])
2143 && !is_overflow_infinity (val[i])))
2145 /* If we found an overflowed value, set MIN and MAX
2146 to it so that we set the resulting range to
2147 VARYING. */
2148 min = max = val[i];
2149 break;
2152 if (compare_values (val[i], min) == -1)
2153 min = val[i];
2155 if (compare_values (val[i], max) == 1)
2156 max = val[i];
2160 /* If either MIN or MAX overflowed, then set the resulting range to
2161 VARYING. But we do accept an overflow infinity
2162 representation. */
2163 if (min == NULL_TREE
2164 || !is_gimple_min_invariant (min)
2165 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2166 || max == NULL_TREE
2167 || !is_gimple_min_invariant (max)
2168 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2170 set_value_range_to_varying (vr);
2171 return;
2174 /* We punt if:
2175 1) [-INF, +INF]
2176 2) [-INF, +-INF(OVF)]
2177 3) [+-INF(OVF), +INF]
2178 4) [+-INF(OVF), +-INF(OVF)]
2179 We learn nothing when we have INF and INF(OVF) on both sides.
2180 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2181 overflow. */
2182 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2183 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2185 set_value_range_to_varying (vr);
2186 return;
2189 cmp = compare_values (min, max);
2190 if (cmp == -2 || cmp == 1)
2192 /* If the new range has its limits swapped around (MIN > MAX),
2193 then the operation caused one of them to wrap around, mark
2194 the new range VARYING. */
2195 set_value_range_to_varying (vr);
2197 else
2198 set_value_range (vr, type, min, max, NULL);
2201 /* Extract range information from a binary operation CODE based on
2202 the ranges of each of its operands *VR0 and *VR1 with resulting
2203 type EXPR_TYPE. The resulting range is stored in *VR. */
2205 static void
2206 extract_range_from_binary_expr_1 (value_range *vr,
2207 enum tree_code code, tree expr_type,
2208 value_range *vr0_, value_range *vr1_)
2210 value_range vr0 = *vr0_, vr1 = *vr1_;
2211 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2212 enum value_range_type type;
2213 tree min = NULL_TREE, max = NULL_TREE;
2214 int cmp;
2216 if (!INTEGRAL_TYPE_P (expr_type)
2217 && !POINTER_TYPE_P (expr_type))
2219 set_value_range_to_varying (vr);
2220 return;
2223 /* Not all binary expressions can be applied to ranges in a
2224 meaningful way. Handle only arithmetic operations. */
2225 if (code != PLUS_EXPR
2226 && code != MINUS_EXPR
2227 && code != POINTER_PLUS_EXPR
2228 && code != MULT_EXPR
2229 && code != TRUNC_DIV_EXPR
2230 && code != FLOOR_DIV_EXPR
2231 && code != CEIL_DIV_EXPR
2232 && code != EXACT_DIV_EXPR
2233 && code != ROUND_DIV_EXPR
2234 && code != TRUNC_MOD_EXPR
2235 && code != RSHIFT_EXPR
2236 && code != LSHIFT_EXPR
2237 && code != MIN_EXPR
2238 && code != MAX_EXPR
2239 && code != BIT_AND_EXPR
2240 && code != BIT_IOR_EXPR
2241 && code != BIT_XOR_EXPR)
2243 set_value_range_to_varying (vr);
2244 return;
2247 /* If both ranges are UNDEFINED, so is the result. */
2248 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2250 set_value_range_to_undefined (vr);
2251 return;
2253 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2254 code. At some point we may want to special-case operations that
2255 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2256 operand. */
2257 else if (vr0.type == VR_UNDEFINED)
2258 set_value_range_to_varying (&vr0);
2259 else if (vr1.type == VR_UNDEFINED)
2260 set_value_range_to_varying (&vr1);
2262 /* We get imprecise results from ranges_from_anti_range when
2263 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
2264 range, but then we also need to hack up vrp_meet. It's just
2265 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
2266 if (code == EXACT_DIV_EXPR
2267 && vr0.type == VR_ANTI_RANGE
2268 && vr0.min == vr0.max
2269 && integer_zerop (vr0.min))
2271 set_value_range_to_nonnull (vr, expr_type);
2272 return;
2275 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2276 and express ~[] op X as ([]' op X) U ([]'' op X). */
2277 if (vr0.type == VR_ANTI_RANGE
2278 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2280 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2281 if (vrtem1.type != VR_UNDEFINED)
2283 value_range vrres = VR_INITIALIZER;
2284 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2285 &vrtem1, vr1_);
2286 vrp_meet (vr, &vrres);
2288 return;
2290 /* Likewise for X op ~[]. */
2291 if (vr1.type == VR_ANTI_RANGE
2292 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2294 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2295 if (vrtem1.type != VR_UNDEFINED)
2297 value_range vrres = VR_INITIALIZER;
2298 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2299 vr0_, &vrtem1);
2300 vrp_meet (vr, &vrres);
2302 return;
2305 /* The type of the resulting value range defaults to VR0.TYPE. */
2306 type = vr0.type;
2308 /* Refuse to operate on VARYING ranges, ranges of different kinds
2309 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2310 because we may be able to derive a useful range even if one of
2311 the operands is VR_VARYING or symbolic range. Similarly for
2312 divisions, MIN/MAX and PLUS/MINUS.
2314 TODO, we may be able to derive anti-ranges in some cases. */
2315 if (code != BIT_AND_EXPR
2316 && code != BIT_IOR_EXPR
2317 && code != TRUNC_DIV_EXPR
2318 && code != FLOOR_DIV_EXPR
2319 && code != CEIL_DIV_EXPR
2320 && code != EXACT_DIV_EXPR
2321 && code != ROUND_DIV_EXPR
2322 && code != TRUNC_MOD_EXPR
2323 && code != MIN_EXPR
2324 && code != MAX_EXPR
2325 && code != PLUS_EXPR
2326 && code != MINUS_EXPR
2327 && code != RSHIFT_EXPR
2328 && (vr0.type == VR_VARYING
2329 || vr1.type == VR_VARYING
2330 || vr0.type != vr1.type
2331 || symbolic_range_p (&vr0)
2332 || symbolic_range_p (&vr1)))
2334 set_value_range_to_varying (vr);
2335 return;
2338 /* Now evaluate the expression to determine the new range. */
2339 if (POINTER_TYPE_P (expr_type))
2341 if (code == MIN_EXPR || code == MAX_EXPR)
2343 /* For MIN/MAX expressions with pointers, we only care about
2344 nullness, if both are non null, then the result is nonnull.
2345 If both are null, then the result is null. Otherwise they
2346 are varying. */
2347 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2348 set_value_range_to_nonnull (vr, expr_type);
2349 else if (range_is_null (&vr0) && range_is_null (&vr1))
2350 set_value_range_to_null (vr, expr_type);
2351 else
2352 set_value_range_to_varying (vr);
2354 else if (code == POINTER_PLUS_EXPR)
2356 /* For pointer types, we are really only interested in asserting
2357 whether the expression evaluates to non-NULL. */
2358 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2359 set_value_range_to_nonnull (vr, expr_type);
2360 else if (range_is_null (&vr0) && range_is_null (&vr1))
2361 set_value_range_to_null (vr, expr_type);
2362 else
2363 set_value_range_to_varying (vr);
2365 else if (code == BIT_AND_EXPR)
2367 /* For pointer types, we are really only interested in asserting
2368 whether the expression evaluates to non-NULL. */
2369 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2370 set_value_range_to_nonnull (vr, expr_type);
2371 else if (range_is_null (&vr0) || range_is_null (&vr1))
2372 set_value_range_to_null (vr, expr_type);
2373 else
2374 set_value_range_to_varying (vr);
2376 else
2377 set_value_range_to_varying (vr);
2379 return;
2382 /* For integer ranges, apply the operation to each end of the
2383 range and see what we end up with. */
2384 if (code == PLUS_EXPR || code == MINUS_EXPR)
2386 const bool minus_p = (code == MINUS_EXPR);
2387 tree min_op0 = vr0.min;
2388 tree min_op1 = minus_p ? vr1.max : vr1.min;
2389 tree max_op0 = vr0.max;
2390 tree max_op1 = minus_p ? vr1.min : vr1.max;
2391 tree sym_min_op0 = NULL_TREE;
2392 tree sym_min_op1 = NULL_TREE;
2393 tree sym_max_op0 = NULL_TREE;
2394 tree sym_max_op1 = NULL_TREE;
2395 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2397 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2398 single-symbolic ranges, try to compute the precise resulting range,
2399 but only if we know that this resulting range will also be constant
2400 or single-symbolic. */
2401 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2402 && (TREE_CODE (min_op0) == INTEGER_CST
2403 || (sym_min_op0
2404 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2405 && (TREE_CODE (min_op1) == INTEGER_CST
2406 || (sym_min_op1
2407 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2408 && (!(sym_min_op0 && sym_min_op1)
2409 || (sym_min_op0 == sym_min_op1
2410 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2411 && (TREE_CODE (max_op0) == INTEGER_CST
2412 || (sym_max_op0
2413 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2414 && (TREE_CODE (max_op1) == INTEGER_CST
2415 || (sym_max_op1
2416 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2417 && (!(sym_max_op0 && sym_max_op1)
2418 || (sym_max_op0 == sym_max_op1
2419 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2421 const signop sgn = TYPE_SIGN (expr_type);
2422 const unsigned int prec = TYPE_PRECISION (expr_type);
2423 wide_int type_min, type_max, wmin, wmax;
2424 int min_ovf = 0;
2425 int max_ovf = 0;
2427 /* Get the lower and upper bounds of the type. */
2428 if (TYPE_OVERFLOW_WRAPS (expr_type))
2430 type_min = wi::min_value (prec, sgn);
2431 type_max = wi::max_value (prec, sgn);
2433 else
2435 type_min = vrp_val_min (expr_type);
2436 type_max = vrp_val_max (expr_type);
2439 /* Combine the lower bounds, if any. */
2440 if (min_op0 && min_op1)
2442 if (minus_p)
2444 wmin = wi::sub (min_op0, min_op1);
2446 /* Check for overflow. */
2447 if (wi::cmp (0, min_op1, sgn)
2448 != wi::cmp (wmin, min_op0, sgn))
2449 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2451 else
2453 wmin = wi::add (min_op0, min_op1);
2455 /* Check for overflow. */
2456 if (wi::cmp (min_op1, 0, sgn)
2457 != wi::cmp (wmin, min_op0, sgn))
2458 min_ovf = wi::cmp (min_op0, wmin, sgn);
2461 else if (min_op0)
2462 wmin = min_op0;
2463 else if (min_op1)
2464 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2465 else
2466 wmin = wi::shwi (0, prec);
2468 /* Combine the upper bounds, if any. */
2469 if (max_op0 && max_op1)
2471 if (minus_p)
2473 wmax = wi::sub (max_op0, max_op1);
2475 /* Check for overflow. */
2476 if (wi::cmp (0, max_op1, sgn)
2477 != wi::cmp (wmax, max_op0, sgn))
2478 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2480 else
2482 wmax = wi::add (max_op0, max_op1);
2484 if (wi::cmp (max_op1, 0, sgn)
2485 != wi::cmp (wmax, max_op0, sgn))
2486 max_ovf = wi::cmp (max_op0, wmax, sgn);
2489 else if (max_op0)
2490 wmax = max_op0;
2491 else if (max_op1)
2492 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2493 else
2494 wmax = wi::shwi (0, prec);
2496 /* Check for type overflow. */
2497 if (min_ovf == 0)
2499 if (wi::cmp (wmin, type_min, sgn) == -1)
2500 min_ovf = -1;
2501 else if (wi::cmp (wmin, type_max, sgn) == 1)
2502 min_ovf = 1;
2504 if (max_ovf == 0)
2506 if (wi::cmp (wmax, type_min, sgn) == -1)
2507 max_ovf = -1;
2508 else if (wi::cmp (wmax, type_max, sgn) == 1)
2509 max_ovf = 1;
2512 /* If we have overflow for the constant part and the resulting
2513 range will be symbolic, drop to VR_VARYING. */
2514 if ((min_ovf && sym_min_op0 != sym_min_op1)
2515 || (max_ovf && sym_max_op0 != sym_max_op1))
2517 set_value_range_to_varying (vr);
2518 return;
2521 if (TYPE_OVERFLOW_WRAPS (expr_type))
2523 /* If overflow wraps, truncate the values and adjust the
2524 range kind and bounds appropriately. */
2525 wide_int tmin = wide_int::from (wmin, prec, sgn);
2526 wide_int tmax = wide_int::from (wmax, prec, sgn);
2527 if (min_ovf == max_ovf)
2529 /* No overflow or both overflow or underflow. The
2530 range kind stays VR_RANGE. */
2531 min = wide_int_to_tree (expr_type, tmin);
2532 max = wide_int_to_tree (expr_type, tmax);
2534 else if ((min_ovf == -1 && max_ovf == 0)
2535 || (max_ovf == 1 && min_ovf == 0))
2537 /* Min underflow or max overflow. The range kind
2538 changes to VR_ANTI_RANGE. */
2539 bool covers = false;
2540 wide_int tem = tmin;
2541 type = VR_ANTI_RANGE;
2542 tmin = tmax + 1;
2543 if (wi::cmp (tmin, tmax, sgn) < 0)
2544 covers = true;
2545 tmax = tem - 1;
2546 if (wi::cmp (tmax, tem, sgn) > 0)
2547 covers = true;
2548 /* If the anti-range would cover nothing, drop to varying.
2549 Likewise if the anti-range bounds are outside of the
2550 types values. */
2551 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2553 set_value_range_to_varying (vr);
2554 return;
2556 min = wide_int_to_tree (expr_type, tmin);
2557 max = wide_int_to_tree (expr_type, tmax);
2559 else
2561 /* Other underflow and/or overflow, drop to VR_VARYING. */
2562 set_value_range_to_varying (vr);
2563 return;
2566 else
2568 /* If overflow does not wrap, saturate to the types min/max
2569 value. */
2570 if (min_ovf == -1)
2572 if (needs_overflow_infinity (expr_type)
2573 && supports_overflow_infinity (expr_type))
2574 min = negative_overflow_infinity (expr_type);
2575 else
2576 min = wide_int_to_tree (expr_type, type_min);
2578 else if (min_ovf == 1)
2580 if (needs_overflow_infinity (expr_type)
2581 && supports_overflow_infinity (expr_type))
2582 min = positive_overflow_infinity (expr_type);
2583 else
2584 min = wide_int_to_tree (expr_type, type_max);
2586 else
2587 min = wide_int_to_tree (expr_type, wmin);
2589 if (max_ovf == -1)
2591 if (needs_overflow_infinity (expr_type)
2592 && supports_overflow_infinity (expr_type))
2593 max = negative_overflow_infinity (expr_type);
2594 else
2595 max = wide_int_to_tree (expr_type, type_min);
2597 else if (max_ovf == 1)
2599 if (needs_overflow_infinity (expr_type)
2600 && supports_overflow_infinity (expr_type))
2601 max = positive_overflow_infinity (expr_type);
2602 else
2603 max = wide_int_to_tree (expr_type, type_max);
2605 else
2606 max = wide_int_to_tree (expr_type, wmax);
2609 if (needs_overflow_infinity (expr_type)
2610 && supports_overflow_infinity (expr_type))
2612 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2613 || (min_op1
2614 && (minus_p
2615 ? is_positive_overflow_infinity (min_op1)
2616 : is_negative_overflow_infinity (min_op1))))
2617 min = negative_overflow_infinity (expr_type);
2618 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2619 || (max_op1
2620 && (minus_p
2621 ? is_negative_overflow_infinity (max_op1)
2622 : is_positive_overflow_infinity (max_op1))))
2623 max = positive_overflow_infinity (expr_type);
2626 /* If the result lower bound is constant, we're done;
2627 otherwise, build the symbolic lower bound. */
2628 if (sym_min_op0 == sym_min_op1)
2630 else if (sym_min_op0)
2631 min = build_symbolic_expr (expr_type, sym_min_op0,
2632 neg_min_op0, min);
2633 else if (sym_min_op1)
2634 min = build_symbolic_expr (expr_type, sym_min_op1,
2635 neg_min_op1 ^ minus_p, min);
2637 /* Likewise for the upper bound. */
2638 if (sym_max_op0 == sym_max_op1)
2640 else if (sym_max_op0)
2641 max = build_symbolic_expr (expr_type, sym_max_op0,
2642 neg_max_op0, max);
2643 else if (sym_max_op1)
2644 max = build_symbolic_expr (expr_type, sym_max_op1,
2645 neg_max_op1 ^ minus_p, max);
2647 else
2649 /* For other cases, for example if we have a PLUS_EXPR with two
2650 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2651 to compute a precise range for such a case.
2652 ??? General even mixed range kind operations can be expressed
2653 by for example transforming ~[3, 5] + [1, 2] to range-only
2654 operations and a union primitive:
2655 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2656 [-INF+1, 4] U [6, +INF(OVF)]
2657 though usually the union is not exactly representable with
2658 a single range or anti-range as the above is
2659 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2660 but one could use a scheme similar to equivalences for this. */
2661 set_value_range_to_varying (vr);
2662 return;
2665 else if (code == MIN_EXPR
2666 || code == MAX_EXPR)
2668 if (vr0.type == VR_RANGE
2669 && !symbolic_range_p (&vr0))
2671 type = VR_RANGE;
2672 if (vr1.type == VR_RANGE
2673 && !symbolic_range_p (&vr1))
2675 /* For operations that make the resulting range directly
2676 proportional to the original ranges, apply the operation to
2677 the same end of each range. */
2678 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2679 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2681 else if (code == MIN_EXPR)
2683 min = vrp_val_min (expr_type);
2684 max = vr0.max;
2686 else if (code == MAX_EXPR)
2688 min = vr0.min;
2689 max = vrp_val_max (expr_type);
2692 else if (vr1.type == VR_RANGE
2693 && !symbolic_range_p (&vr1))
2695 type = VR_RANGE;
2696 if (code == MIN_EXPR)
2698 min = vrp_val_min (expr_type);
2699 max = vr1.max;
2701 else if (code == MAX_EXPR)
2703 min = vr1.min;
2704 max = vrp_val_max (expr_type);
2707 else
2709 set_value_range_to_varying (vr);
2710 return;
2713 else if (code == MULT_EXPR)
2715 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2716 drop to varying. This test requires 2*prec bits if both
2717 operands are signed and 2*prec + 2 bits if either is not. */
2719 signop sign = TYPE_SIGN (expr_type);
2720 unsigned int prec = TYPE_PRECISION (expr_type);
2722 if (range_int_cst_p (&vr0)
2723 && range_int_cst_p (&vr1)
2724 && TYPE_OVERFLOW_WRAPS (expr_type))
2726 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2727 typedef generic_wide_int
2728 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2729 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2730 vrp_int size = sizem1 + 1;
2732 /* Extend the values using the sign of the result to PREC2.
2733 From here on out, everthing is just signed math no matter
2734 what the input types were. */
2735 vrp_int min0 = vrp_int_cst (vr0.min);
2736 vrp_int max0 = vrp_int_cst (vr0.max);
2737 vrp_int min1 = vrp_int_cst (vr1.min);
2738 vrp_int max1 = vrp_int_cst (vr1.max);
2739 /* Canonicalize the intervals. */
2740 if (sign == UNSIGNED)
2742 if (wi::ltu_p (size, min0 + max0))
2744 min0 -= size;
2745 max0 -= size;
2748 if (wi::ltu_p (size, min1 + max1))
2750 min1 -= size;
2751 max1 -= size;
2755 vrp_int prod0 = min0 * min1;
2756 vrp_int prod1 = min0 * max1;
2757 vrp_int prod2 = max0 * min1;
2758 vrp_int prod3 = max0 * max1;
2760 /* Sort the 4 products so that min is in prod0 and max is in
2761 prod3. */
2762 /* min0min1 > max0max1 */
2763 if (prod0 > prod3)
2764 std::swap (prod0, prod3);
2766 /* min0max1 > max0min1 */
2767 if (prod1 > prod2)
2768 std::swap (prod1, prod2);
2770 if (prod0 > prod1)
2771 std::swap (prod0, prod1);
2773 if (prod2 > prod3)
2774 std::swap (prod2, prod3);
2776 /* diff = max - min. */
2777 prod2 = prod3 - prod0;
2778 if (wi::geu_p (prod2, sizem1))
2780 /* the range covers all values. */
2781 set_value_range_to_varying (vr);
2782 return;
2785 /* The following should handle the wrapping and selecting
2786 VR_ANTI_RANGE for us. */
2787 min = wide_int_to_tree (expr_type, prod0);
2788 max = wide_int_to_tree (expr_type, prod3);
2789 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2790 return;
2793 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2794 drop to VR_VARYING. It would take more effort to compute a
2795 precise range for such a case. For example, if we have
2796 op0 == 65536 and op1 == 65536 with their ranges both being
2797 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2798 we cannot claim that the product is in ~[0,0]. Note that we
2799 are guaranteed to have vr0.type == vr1.type at this
2800 point. */
2801 if (vr0.type == VR_ANTI_RANGE
2802 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2804 set_value_range_to_varying (vr);
2805 return;
2808 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2809 return;
2811 else if (code == RSHIFT_EXPR
2812 || code == LSHIFT_EXPR)
2814 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2815 then drop to VR_VARYING. Outside of this range we get undefined
2816 behavior from the shift operation. We cannot even trust
2817 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2818 shifts, and the operation at the tree level may be widened. */
2819 if (range_int_cst_p (&vr1)
2820 && compare_tree_int (vr1.min, 0) >= 0
2821 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2823 if (code == RSHIFT_EXPR)
2825 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2826 useful ranges just from the shift count. E.g.
2827 x >> 63 for signed 64-bit x is always [-1, 0]. */
2828 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2830 vr0.type = type = VR_RANGE;
2831 vr0.min = vrp_val_min (expr_type);
2832 vr0.max = vrp_val_max (expr_type);
2834 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2835 return;
2837 /* We can map lshifts by constants to MULT_EXPR handling. */
2838 else if (code == LSHIFT_EXPR
2839 && range_int_cst_singleton_p (&vr1))
2841 bool saved_flag_wrapv;
2842 value_range vr1p = VR_INITIALIZER;
2843 vr1p.type = VR_RANGE;
2844 vr1p.min = (wide_int_to_tree
2845 (expr_type,
2846 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2847 TYPE_PRECISION (expr_type))));
2848 vr1p.max = vr1p.min;
2849 /* We have to use a wrapping multiply though as signed overflow
2850 on lshifts is implementation defined in C89. */
2851 saved_flag_wrapv = flag_wrapv;
2852 flag_wrapv = 1;
2853 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2854 &vr0, &vr1p);
2855 flag_wrapv = saved_flag_wrapv;
2856 return;
2858 else if (code == LSHIFT_EXPR
2859 && range_int_cst_p (&vr0))
2861 int prec = TYPE_PRECISION (expr_type);
2862 int overflow_pos = prec;
2863 int bound_shift;
2864 wide_int low_bound, high_bound;
2865 bool uns = TYPE_UNSIGNED (expr_type);
2866 bool in_bounds = false;
2868 if (!uns)
2869 overflow_pos -= 1;
2871 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2872 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2873 overflow. However, for that to happen, vr1.max needs to be
2874 zero, which means vr1 is a singleton range of zero, which
2875 means it should be handled by the previous LSHIFT_EXPR
2876 if-clause. */
2877 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2878 wide_int complement = ~(bound - 1);
2880 if (uns)
2882 low_bound = bound;
2883 high_bound = complement;
2884 if (wi::ltu_p (vr0.max, low_bound))
2886 /* [5, 6] << [1, 2] == [10, 24]. */
2887 /* We're shifting out only zeroes, the value increases
2888 monotonically. */
2889 in_bounds = true;
2891 else if (wi::ltu_p (high_bound, vr0.min))
2893 /* [0xffffff00, 0xffffffff] << [1, 2]
2894 == [0xfffffc00, 0xfffffffe]. */
2895 /* We're shifting out only ones, the value decreases
2896 monotonically. */
2897 in_bounds = true;
2900 else
2902 /* [-1, 1] << [1, 2] == [-4, 4]. */
2903 low_bound = complement;
2904 high_bound = bound;
2905 if (wi::lts_p (vr0.max, high_bound)
2906 && wi::lts_p (low_bound, vr0.min))
2908 /* For non-negative numbers, we're shifting out only
2909 zeroes, the value increases monotonically.
2910 For negative numbers, we're shifting out only ones, the
2911 value decreases monotomically. */
2912 in_bounds = true;
2916 if (in_bounds)
2918 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2919 return;
2923 set_value_range_to_varying (vr);
2924 return;
2926 else if (code == TRUNC_DIV_EXPR
2927 || code == FLOOR_DIV_EXPR
2928 || code == CEIL_DIV_EXPR
2929 || code == EXACT_DIV_EXPR
2930 || code == ROUND_DIV_EXPR)
2932 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2934 /* For division, if op1 has VR_RANGE but op0 does not, something
2935 can be deduced just from that range. Say [min, max] / [4, max]
2936 gives [min / 4, max / 4] range. */
2937 if (vr1.type == VR_RANGE
2938 && !symbolic_range_p (&vr1)
2939 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2941 vr0.type = type = VR_RANGE;
2942 vr0.min = vrp_val_min (expr_type);
2943 vr0.max = vrp_val_max (expr_type);
2945 else
2947 set_value_range_to_varying (vr);
2948 return;
2952 /* For divisions, if flag_non_call_exceptions is true, we must
2953 not eliminate a division by zero. */
2954 if (cfun->can_throw_non_call_exceptions
2955 && (vr1.type != VR_RANGE
2956 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2958 set_value_range_to_varying (vr);
2959 return;
2962 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2963 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2964 include 0. */
2965 if (vr0.type == VR_RANGE
2966 && (vr1.type != VR_RANGE
2967 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2969 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2970 int cmp;
2972 min = NULL_TREE;
2973 max = NULL_TREE;
2974 if (TYPE_UNSIGNED (expr_type)
2975 || value_range_nonnegative_p (&vr1))
2977 /* For unsigned division or when divisor is known
2978 to be non-negative, the range has to cover
2979 all numbers from 0 to max for positive max
2980 and all numbers from min to 0 for negative min. */
2981 cmp = compare_values (vr0.max, zero);
2982 if (cmp == -1)
2984 /* When vr0.max < 0, vr1.min != 0 and value
2985 ranges for dividend and divisor are available. */
2986 if (vr1.type == VR_RANGE
2987 && !symbolic_range_p (&vr0)
2988 && !symbolic_range_p (&vr1)
2989 && compare_values (vr1.min, zero) != 0)
2990 max = int_const_binop (code, vr0.max, vr1.min);
2991 else
2992 max = zero;
2994 else if (cmp == 0 || cmp == 1)
2995 max = vr0.max;
2996 else
2997 type = VR_VARYING;
2998 cmp = compare_values (vr0.min, zero);
2999 if (cmp == 1)
3001 /* For unsigned division when value ranges for dividend
3002 and divisor are available. */
3003 if (vr1.type == VR_RANGE
3004 && !symbolic_range_p (&vr0)
3005 && !symbolic_range_p (&vr1)
3006 && compare_values (vr1.max, zero) != 0)
3007 min = int_const_binop (code, vr0.min, vr1.max);
3008 else
3009 min = zero;
3011 else if (cmp == 0 || cmp == -1)
3012 min = vr0.min;
3013 else
3014 type = VR_VARYING;
3016 else
3018 /* Otherwise the range is -max .. max or min .. -min
3019 depending on which bound is bigger in absolute value,
3020 as the division can change the sign. */
3021 abs_extent_range (vr, vr0.min, vr0.max);
3022 return;
3024 if (type == VR_VARYING)
3026 set_value_range_to_varying (vr);
3027 return;
3030 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
3032 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3033 return;
3036 else if (code == TRUNC_MOD_EXPR)
3038 if (range_is_null (&vr1))
3040 set_value_range_to_undefined (vr);
3041 return;
3043 /* ABS (A % B) < ABS (B) and either
3044 0 <= A % B <= A or A <= A % B <= 0. */
3045 type = VR_RANGE;
3046 signop sgn = TYPE_SIGN (expr_type);
3047 unsigned int prec = TYPE_PRECISION (expr_type);
3048 wide_int wmin, wmax, tmp;
3049 wide_int zero = wi::zero (prec);
3050 wide_int one = wi::one (prec);
3051 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3053 wmax = wi::sub (vr1.max, one);
3054 if (sgn == SIGNED)
3056 tmp = wi::sub (wi::minus_one (prec), vr1.min);
3057 wmax = wi::smax (wmax, tmp);
3060 else
3062 wmax = wi::max_value (prec, sgn);
3063 /* X % INT_MIN may be INT_MAX. */
3064 if (sgn == UNSIGNED)
3065 wmax = wmax - one;
3068 if (sgn == UNSIGNED)
3069 wmin = zero;
3070 else
3072 wmin = -wmax;
3073 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3075 tmp = vr0.min;
3076 if (wi::gts_p (tmp, zero))
3077 tmp = zero;
3078 wmin = wi::smax (wmin, tmp);
3082 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3084 tmp = vr0.max;
3085 if (sgn == SIGNED && wi::neg_p (tmp))
3086 tmp = zero;
3087 wmax = wi::min (wmax, tmp, sgn);
3090 min = wide_int_to_tree (expr_type, wmin);
3091 max = wide_int_to_tree (expr_type, wmax);
3093 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3095 bool int_cst_range0, int_cst_range1;
3096 wide_int may_be_nonzero0, may_be_nonzero1;
3097 wide_int must_be_nonzero0, must_be_nonzero1;
3099 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3100 &may_be_nonzero0,
3101 &must_be_nonzero0);
3102 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3103 &may_be_nonzero1,
3104 &must_be_nonzero1);
3106 type = VR_RANGE;
3107 if (code == BIT_AND_EXPR)
3109 min = wide_int_to_tree (expr_type,
3110 must_be_nonzero0 & must_be_nonzero1);
3111 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3112 /* If both input ranges contain only negative values we can
3113 truncate the result range maximum to the minimum of the
3114 input range maxima. */
3115 if (int_cst_range0 && int_cst_range1
3116 && tree_int_cst_sgn (vr0.max) < 0
3117 && tree_int_cst_sgn (vr1.max) < 0)
3119 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3120 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3122 /* If either input range contains only non-negative values
3123 we can truncate the result range maximum to the respective
3124 maximum of the input range. */
3125 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3126 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3127 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3128 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3129 max = wide_int_to_tree (expr_type, wmax);
3130 cmp = compare_values (min, max);
3131 /* PR68217: In case of signed & sign-bit-CST should
3132 result in [-INF, 0] instead of [-INF, INF]. */
3133 if (cmp == -2 || cmp == 1)
3135 wide_int sign_bit
3136 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
3137 TYPE_PRECISION (expr_type));
3138 if (!TYPE_UNSIGNED (expr_type)
3139 && ((value_range_constant_singleton (&vr0)
3140 && !wi::cmps (vr0.min, sign_bit))
3141 || (value_range_constant_singleton (&vr1)
3142 && !wi::cmps (vr1.min, sign_bit))))
3144 min = TYPE_MIN_VALUE (expr_type);
3145 max = build_int_cst (expr_type, 0);
3149 else if (code == BIT_IOR_EXPR)
3151 max = wide_int_to_tree (expr_type,
3152 may_be_nonzero0 | may_be_nonzero1);
3153 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3154 /* If the input ranges contain only positive values we can
3155 truncate the minimum of the result range to the maximum
3156 of the input range minima. */
3157 if (int_cst_range0 && int_cst_range1
3158 && tree_int_cst_sgn (vr0.min) >= 0
3159 && tree_int_cst_sgn (vr1.min) >= 0)
3161 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3162 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3164 /* If either input range contains only negative values
3165 we can truncate the minimum of the result range to the
3166 respective minimum range. */
3167 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3168 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3169 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3170 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3171 min = wide_int_to_tree (expr_type, wmin);
3173 else if (code == BIT_XOR_EXPR)
3175 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3176 | ~(may_be_nonzero0 | may_be_nonzero1));
3177 wide_int result_one_bits
3178 = (must_be_nonzero0.and_not (may_be_nonzero1)
3179 | must_be_nonzero1.and_not (may_be_nonzero0));
3180 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3181 min = wide_int_to_tree (expr_type, result_one_bits);
3182 /* If the range has all positive or all negative values the
3183 result is better than VARYING. */
3184 if (tree_int_cst_sgn (min) < 0
3185 || tree_int_cst_sgn (max) >= 0)
3187 else
3188 max = min = NULL_TREE;
3191 else
3192 gcc_unreachable ();
3194 /* If either MIN or MAX overflowed, then set the resulting range to
3195 VARYING. But we do accept an overflow infinity representation. */
3196 if (min == NULL_TREE
3197 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3198 || max == NULL_TREE
3199 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3201 set_value_range_to_varying (vr);
3202 return;
3205 /* We punt if:
3206 1) [-INF, +INF]
3207 2) [-INF, +-INF(OVF)]
3208 3) [+-INF(OVF), +INF]
3209 4) [+-INF(OVF), +-INF(OVF)]
3210 We learn nothing when we have INF and INF(OVF) on both sides.
3211 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3212 overflow. */
3213 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3214 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3216 set_value_range_to_varying (vr);
3217 return;
3220 cmp = compare_values (min, max);
3221 if (cmp == -2 || cmp == 1)
3223 /* If the new range has its limits swapped around (MIN > MAX),
3224 then the operation caused one of them to wrap around, mark
3225 the new range VARYING. */
3226 set_value_range_to_varying (vr);
3228 else
3229 set_value_range (vr, type, min, max, NULL);
3232 /* Extract range information from a binary expression OP0 CODE OP1 based on
3233 the ranges of each of its operands with resulting type EXPR_TYPE.
3234 The resulting range is stored in *VR. */
3236 static void
3237 extract_range_from_binary_expr (value_range *vr,
3238 enum tree_code code,
3239 tree expr_type, tree op0, tree op1)
3241 value_range vr0 = VR_INITIALIZER;
3242 value_range vr1 = VR_INITIALIZER;
3244 /* Get value ranges for each operand. For constant operands, create
3245 a new value range with the operand to simplify processing. */
3246 if (TREE_CODE (op0) == SSA_NAME)
3247 vr0 = *(get_value_range (op0));
3248 else if (is_gimple_min_invariant (op0))
3249 set_value_range_to_value (&vr0, op0, NULL);
3250 else
3251 set_value_range_to_varying (&vr0);
3253 if (TREE_CODE (op1) == SSA_NAME)
3254 vr1 = *(get_value_range (op1));
3255 else if (is_gimple_min_invariant (op1))
3256 set_value_range_to_value (&vr1, op1, NULL);
3257 else
3258 set_value_range_to_varying (&vr1);
3260 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3262 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3263 and based on the other operand, for example if it was deduced from a
3264 symbolic comparison. When a bound of the range of the first operand
3265 is invariant, we set the corresponding bound of the new range to INF
3266 in order to avoid recursing on the range of the second operand. */
3267 if (vr->type == VR_VARYING
3268 && (code == PLUS_EXPR || code == MINUS_EXPR)
3269 && TREE_CODE (op1) == SSA_NAME
3270 && vr0.type == VR_RANGE
3271 && symbolic_range_based_on_p (&vr0, op1))
3273 const bool minus_p = (code == MINUS_EXPR);
3274 value_range n_vr1 = VR_INITIALIZER;
3276 /* Try with VR0 and [-INF, OP1]. */
3277 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3278 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3280 /* Try with VR0 and [OP1, +INF]. */
3281 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3282 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3284 /* Try with VR0 and [OP1, OP1]. */
3285 else
3286 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3288 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3291 if (vr->type == VR_VARYING
3292 && (code == PLUS_EXPR || code == MINUS_EXPR)
3293 && TREE_CODE (op0) == SSA_NAME
3294 && vr1.type == VR_RANGE
3295 && symbolic_range_based_on_p (&vr1, op0))
3297 const bool minus_p = (code == MINUS_EXPR);
3298 value_range n_vr0 = VR_INITIALIZER;
3300 /* Try with [-INF, OP0] and VR1. */
3301 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3302 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3304 /* Try with [OP0, +INF] and VR1. */
3305 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3306 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3308 /* Try with [OP0, OP0] and VR1. */
3309 else
3310 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3312 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3315 /* If we didn't derive a range for MINUS_EXPR, and
3316 op1's range is ~[op0,op0] or vice-versa, then we
3317 can derive a non-null range. This happens often for
3318 pointer subtraction. */
3319 if (vr->type == VR_VARYING
3320 && code == MINUS_EXPR
3321 && TREE_CODE (op0) == SSA_NAME
3322 && ((vr0.type == VR_ANTI_RANGE
3323 && vr0.min == op1
3324 && vr0.min == vr0.max)
3325 || (vr1.type == VR_ANTI_RANGE
3326 && vr1.min == op0
3327 && vr1.min == vr1.max)))
3328 set_value_range_to_nonnull (vr, TREE_TYPE (op0));
3331 /* Extract range information from a unary operation CODE based on
3332 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3333 The resulting range is stored in *VR. */
3335 void
3336 extract_range_from_unary_expr (value_range *vr,
3337 enum tree_code code, tree type,
3338 value_range *vr0_, tree op0_type)
3340 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3342 /* VRP only operates on integral and pointer types. */
3343 if (!(INTEGRAL_TYPE_P (op0_type)
3344 || POINTER_TYPE_P (op0_type))
3345 || !(INTEGRAL_TYPE_P (type)
3346 || POINTER_TYPE_P (type)))
3348 set_value_range_to_varying (vr);
3349 return;
3352 /* If VR0 is UNDEFINED, so is the result. */
3353 if (vr0.type == VR_UNDEFINED)
3355 set_value_range_to_undefined (vr);
3356 return;
3359 /* Handle operations that we express in terms of others. */
3360 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3362 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3363 copy_value_range (vr, &vr0);
3364 return;
3366 else if (code == NEGATE_EXPR)
3368 /* -X is simply 0 - X, so re-use existing code that also handles
3369 anti-ranges fine. */
3370 value_range zero = VR_INITIALIZER;
3371 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3372 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3373 return;
3375 else if (code == BIT_NOT_EXPR)
3377 /* ~X is simply -1 - X, so re-use existing code that also handles
3378 anti-ranges fine. */
3379 value_range minusone = VR_INITIALIZER;
3380 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3381 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3382 type, &minusone, &vr0);
3383 return;
3386 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3387 and express op ~[] as (op []') U (op []''). */
3388 if (vr0.type == VR_ANTI_RANGE
3389 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3391 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
3392 if (vrtem1.type != VR_UNDEFINED)
3394 value_range vrres = VR_INITIALIZER;
3395 extract_range_from_unary_expr (&vrres, code, type,
3396 &vrtem1, op0_type);
3397 vrp_meet (vr, &vrres);
3399 return;
3402 if (CONVERT_EXPR_CODE_P (code))
3404 tree inner_type = op0_type;
3405 tree outer_type = type;
3407 /* If the expression evaluates to a pointer, we are only interested in
3408 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3409 if (POINTER_TYPE_P (type))
3411 if (range_is_nonnull (&vr0))
3412 set_value_range_to_nonnull (vr, type);
3413 else if (range_is_null (&vr0))
3414 set_value_range_to_null (vr, type);
3415 else
3416 set_value_range_to_varying (vr);
3417 return;
3420 /* If VR0 is varying and we increase the type precision, assume
3421 a full range for the following transformation. */
3422 if (vr0.type == VR_VARYING
3423 && INTEGRAL_TYPE_P (inner_type)
3424 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3426 vr0.type = VR_RANGE;
3427 vr0.min = TYPE_MIN_VALUE (inner_type);
3428 vr0.max = TYPE_MAX_VALUE (inner_type);
3431 /* If VR0 is a constant range or anti-range and the conversion is
3432 not truncating we can convert the min and max values and
3433 canonicalize the resulting range. Otherwise we can do the
3434 conversion if the size of the range is less than what the
3435 precision of the target type can represent and the range is
3436 not an anti-range. */
3437 if ((vr0.type == VR_RANGE
3438 || vr0.type == VR_ANTI_RANGE)
3439 && TREE_CODE (vr0.min) == INTEGER_CST
3440 && TREE_CODE (vr0.max) == INTEGER_CST
3441 && (!is_overflow_infinity (vr0.min)
3442 || (vr0.type == VR_RANGE
3443 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3444 && needs_overflow_infinity (outer_type)
3445 && supports_overflow_infinity (outer_type)))
3446 && (!is_overflow_infinity (vr0.max)
3447 || (vr0.type == VR_RANGE
3448 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3449 && needs_overflow_infinity (outer_type)
3450 && supports_overflow_infinity (outer_type)))
3451 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3452 || (vr0.type == VR_RANGE
3453 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3454 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3455 size_int (TYPE_PRECISION (outer_type)))))))
3457 tree new_min, new_max;
3458 if (is_overflow_infinity (vr0.min))
3459 new_min = negative_overflow_infinity (outer_type);
3460 else
3461 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3462 0, false);
3463 if (is_overflow_infinity (vr0.max))
3464 new_max = positive_overflow_infinity (outer_type);
3465 else
3466 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3467 0, false);
3468 set_and_canonicalize_value_range (vr, vr0.type,
3469 new_min, new_max, NULL);
3470 return;
3473 set_value_range_to_varying (vr);
3474 return;
3476 else if (code == ABS_EXPR)
3478 tree min, max;
3479 int cmp;
3481 /* Pass through vr0 in the easy cases. */
3482 if (TYPE_UNSIGNED (type)
3483 || value_range_nonnegative_p (&vr0))
3485 copy_value_range (vr, &vr0);
3486 return;
3489 /* For the remaining varying or symbolic ranges we can't do anything
3490 useful. */
3491 if (vr0.type == VR_VARYING
3492 || symbolic_range_p (&vr0))
3494 set_value_range_to_varying (vr);
3495 return;
3498 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3499 useful range. */
3500 if (!TYPE_OVERFLOW_UNDEFINED (type)
3501 && ((vr0.type == VR_RANGE
3502 && vrp_val_is_min (vr0.min))
3503 || (vr0.type == VR_ANTI_RANGE
3504 && !vrp_val_is_min (vr0.min))))
3506 set_value_range_to_varying (vr);
3507 return;
3510 /* ABS_EXPR may flip the range around, if the original range
3511 included negative values. */
3512 if (is_overflow_infinity (vr0.min))
3513 min = positive_overflow_infinity (type);
3514 else if (!vrp_val_is_min (vr0.min))
3515 min = fold_unary_to_constant (code, type, vr0.min);
3516 else if (!needs_overflow_infinity (type))
3517 min = TYPE_MAX_VALUE (type);
3518 else if (supports_overflow_infinity (type))
3519 min = positive_overflow_infinity (type);
3520 else
3522 set_value_range_to_varying (vr);
3523 return;
3526 if (is_overflow_infinity (vr0.max))
3527 max = positive_overflow_infinity (type);
3528 else if (!vrp_val_is_min (vr0.max))
3529 max = fold_unary_to_constant (code, type, vr0.max);
3530 else if (!needs_overflow_infinity (type))
3531 max = TYPE_MAX_VALUE (type);
3532 else if (supports_overflow_infinity (type)
3533 /* We shouldn't generate [+INF, +INF] as set_value_range
3534 doesn't like this and ICEs. */
3535 && !is_positive_overflow_infinity (min))
3536 max = positive_overflow_infinity (type);
3537 else
3539 set_value_range_to_varying (vr);
3540 return;
3543 cmp = compare_values (min, max);
3545 /* If a VR_ANTI_RANGEs contains zero, then we have
3546 ~[-INF, min(MIN, MAX)]. */
3547 if (vr0.type == VR_ANTI_RANGE)
3549 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3551 /* Take the lower of the two values. */
3552 if (cmp != 1)
3553 max = min;
3555 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3556 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3557 flag_wrapv is set and the original anti-range doesn't include
3558 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3559 if (TYPE_OVERFLOW_WRAPS (type))
3561 tree type_min_value = TYPE_MIN_VALUE (type);
3563 min = (vr0.min != type_min_value
3564 ? int_const_binop (PLUS_EXPR, type_min_value,
3565 build_int_cst (TREE_TYPE (type_min_value), 1))
3566 : type_min_value);
3568 else
3570 if (overflow_infinity_range_p (&vr0))
3571 min = negative_overflow_infinity (type);
3572 else
3573 min = TYPE_MIN_VALUE (type);
3576 else
3578 /* All else has failed, so create the range [0, INF], even for
3579 flag_wrapv since TYPE_MIN_VALUE is in the original
3580 anti-range. */
3581 vr0.type = VR_RANGE;
3582 min = build_int_cst (type, 0);
3583 if (needs_overflow_infinity (type))
3585 if (supports_overflow_infinity (type))
3586 max = positive_overflow_infinity (type);
3587 else
3589 set_value_range_to_varying (vr);
3590 return;
3593 else
3594 max = TYPE_MAX_VALUE (type);
3598 /* If the range contains zero then we know that the minimum value in the
3599 range will be zero. */
3600 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3602 if (cmp == 1)
3603 max = min;
3604 min = build_int_cst (type, 0);
3606 else
3608 /* If the range was reversed, swap MIN and MAX. */
3609 if (cmp == 1)
3610 std::swap (min, max);
3613 cmp = compare_values (min, max);
3614 if (cmp == -2 || cmp == 1)
3616 /* If the new range has its limits swapped around (MIN > MAX),
3617 then the operation caused one of them to wrap around, mark
3618 the new range VARYING. */
3619 set_value_range_to_varying (vr);
3621 else
3622 set_value_range (vr, vr0.type, min, max, NULL);
3623 return;
3626 /* For unhandled operations fall back to varying. */
3627 set_value_range_to_varying (vr);
3628 return;
3632 /* Extract range information from a unary expression CODE OP0 based on
3633 the range of its operand with resulting type TYPE.
3634 The resulting range is stored in *VR. */
3636 static void
3637 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3638 tree type, tree op0)
3640 value_range vr0 = VR_INITIALIZER;
3642 /* Get value ranges for the operand. For constant operands, create
3643 a new value range with the operand to simplify processing. */
3644 if (TREE_CODE (op0) == SSA_NAME)
3645 vr0 = *(get_value_range (op0));
3646 else if (is_gimple_min_invariant (op0))
3647 set_value_range_to_value (&vr0, op0, NULL);
3648 else
3649 set_value_range_to_varying (&vr0);
3651 extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
3655 /* Extract range information from a conditional expression STMT based on
3656 the ranges of each of its operands and the expression code. */
3658 static void
3659 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3661 tree op0, op1;
3662 value_range vr0 = VR_INITIALIZER;
3663 value_range vr1 = VR_INITIALIZER;
3665 /* Get value ranges for each operand. For constant operands, create
3666 a new value range with the operand to simplify processing. */
3667 op0 = gimple_assign_rhs2 (stmt);
3668 if (TREE_CODE (op0) == SSA_NAME)
3669 vr0 = *(get_value_range (op0));
3670 else if (is_gimple_min_invariant (op0))
3671 set_value_range_to_value (&vr0, op0, NULL);
3672 else
3673 set_value_range_to_varying (&vr0);
3675 op1 = gimple_assign_rhs3 (stmt);
3676 if (TREE_CODE (op1) == SSA_NAME)
3677 vr1 = *(get_value_range (op1));
3678 else if (is_gimple_min_invariant (op1))
3679 set_value_range_to_value (&vr1, op1, NULL);
3680 else
3681 set_value_range_to_varying (&vr1);
3683 /* The resulting value range is the union of the operand ranges */
3684 copy_value_range (vr, &vr0);
3685 vrp_meet (vr, &vr1);
3689 /* Extract range information from a comparison expression EXPR based
3690 on the range of its operand and the expression code. */
3692 static void
3693 extract_range_from_comparison (value_range *vr, enum tree_code code,
3694 tree type, tree op0, tree op1)
3696 bool sop = false;
3697 tree val;
3699 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3700 NULL);
3702 /* A disadvantage of using a special infinity as an overflow
3703 representation is that we lose the ability to record overflow
3704 when we don't have an infinity. So we have to ignore a result
3705 which relies on overflow. */
3707 if (val && !is_overflow_infinity (val) && !sop)
3709 /* Since this expression was found on the RHS of an assignment,
3710 its type may be different from _Bool. Convert VAL to EXPR's
3711 type. */
3712 val = fold_convert (type, val);
3713 if (is_gimple_min_invariant (val))
3714 set_value_range_to_value (vr, val, vr->equiv);
3715 else
3716 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3718 else
3719 /* The result of a comparison is always true or false. */
3720 set_value_range_to_truthvalue (vr, type);
3723 /* Helper function for simplify_internal_call_using_ranges and
3724 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3725 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3726 always overflow. Set *OVF to true if it is known to always
3727 overflow. */
3729 static bool
3730 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3731 tree op0, tree op1, bool *ovf)
3733 value_range vr0 = VR_INITIALIZER;
3734 value_range vr1 = VR_INITIALIZER;
3735 if (TREE_CODE (op0) == SSA_NAME)
3736 vr0 = *get_value_range (op0);
3737 else if (TREE_CODE (op0) == INTEGER_CST)
3738 set_value_range_to_value (&vr0, op0, NULL);
3739 else
3740 set_value_range_to_varying (&vr0);
3742 if (TREE_CODE (op1) == SSA_NAME)
3743 vr1 = *get_value_range (op1);
3744 else if (TREE_CODE (op1) == INTEGER_CST)
3745 set_value_range_to_value (&vr1, op1, NULL);
3746 else
3747 set_value_range_to_varying (&vr1);
3749 if (!range_int_cst_p (&vr0)
3750 || TREE_OVERFLOW (vr0.min)
3751 || TREE_OVERFLOW (vr0.max))
3753 vr0.min = vrp_val_min (TREE_TYPE (op0));
3754 vr0.max = vrp_val_max (TREE_TYPE (op0));
3756 if (!range_int_cst_p (&vr1)
3757 || TREE_OVERFLOW (vr1.min)
3758 || TREE_OVERFLOW (vr1.max))
3760 vr1.min = vrp_val_min (TREE_TYPE (op1));
3761 vr1.max = vrp_val_max (TREE_TYPE (op1));
3763 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3764 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3765 if (arith_overflowed_p (subcode, type, vr0.max,
3766 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3767 return false;
3768 if (subcode == MULT_EXPR)
3770 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3771 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3772 return false;
3774 if (*ovf)
3776 /* So far we found that there is an overflow on the boundaries.
3777 That doesn't prove that there is an overflow even for all values
3778 in between the boundaries. For that compute widest_int range
3779 of the result and see if it doesn't overlap the range of
3780 type. */
3781 widest_int wmin, wmax;
3782 widest_int w[4];
3783 int i;
3784 w[0] = wi::to_widest (vr0.min);
3785 w[1] = wi::to_widest (vr0.max);
3786 w[2] = wi::to_widest (vr1.min);
3787 w[3] = wi::to_widest (vr1.max);
3788 for (i = 0; i < 4; i++)
3790 widest_int wt;
3791 switch (subcode)
3793 case PLUS_EXPR:
3794 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3795 break;
3796 case MINUS_EXPR:
3797 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3798 break;
3799 case MULT_EXPR:
3800 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3801 break;
3802 default:
3803 gcc_unreachable ();
3805 if (i == 0)
3807 wmin = wt;
3808 wmax = wt;
3810 else
3812 wmin = wi::smin (wmin, wt);
3813 wmax = wi::smax (wmax, wt);
3816 /* The result of op0 CODE op1 is known to be in range
3817 [wmin, wmax]. */
3818 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3819 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3820 /* If all values in [wmin, wmax] are smaller than
3821 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3822 the arithmetic operation will always overflow. */
3823 if (wmax < wtmin || wmin > wtmax)
3824 return true;
3825 return false;
3827 return true;
3830 /* Try to derive a nonnegative or nonzero range out of STMT relying
3831 primarily on generic routines in fold in conjunction with range data.
3832 Store the result in *VR */
3834 static void
3835 extract_range_basic (value_range *vr, gimple *stmt)
3837 bool sop = false;
3838 tree type = gimple_expr_type (stmt);
3840 if (is_gimple_call (stmt))
3842 tree arg;
3843 int mini, maxi, zerov = 0, prec;
3844 enum tree_code subcode = ERROR_MARK;
3845 combined_fn cfn = gimple_call_combined_fn (stmt);
3847 switch (cfn)
3849 case CFN_BUILT_IN_CONSTANT_P:
3850 /* If the call is __builtin_constant_p and the argument is a
3851 function parameter resolve it to false. This avoids bogus
3852 array bound warnings.
3853 ??? We could do this as early as inlining is finished. */
3854 arg = gimple_call_arg (stmt, 0);
3855 if (TREE_CODE (arg) == SSA_NAME
3856 && SSA_NAME_IS_DEFAULT_DEF (arg)
3857 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
3858 && cfun->after_inlining)
3860 set_value_range_to_null (vr, type);
3861 return;
3863 break;
3864 /* Both __builtin_ffs* and __builtin_popcount return
3865 [0, prec]. */
3866 CASE_CFN_FFS:
3867 CASE_CFN_POPCOUNT:
3868 arg = gimple_call_arg (stmt, 0);
3869 prec = TYPE_PRECISION (TREE_TYPE (arg));
3870 mini = 0;
3871 maxi = prec;
3872 if (TREE_CODE (arg) == SSA_NAME)
3874 value_range *vr0 = get_value_range (arg);
3875 /* If arg is non-zero, then ffs or popcount
3876 are non-zero. */
3877 if (((vr0->type == VR_RANGE
3878 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3879 || (vr0->type == VR_ANTI_RANGE
3880 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3881 && !is_overflow_infinity (vr0->min)
3882 && !is_overflow_infinity (vr0->max))
3883 mini = 1;
3884 /* If some high bits are known to be zero,
3885 we can decrease the maximum. */
3886 if (vr0->type == VR_RANGE
3887 && TREE_CODE (vr0->max) == INTEGER_CST
3888 && !operand_less_p (vr0->min,
3889 build_zero_cst (TREE_TYPE (vr0->min)))
3890 && !is_overflow_infinity (vr0->max))
3891 maxi = tree_floor_log2 (vr0->max) + 1;
3893 goto bitop_builtin;
3894 /* __builtin_parity* returns [0, 1]. */
3895 CASE_CFN_PARITY:
3896 mini = 0;
3897 maxi = 1;
3898 goto bitop_builtin;
3899 /* __builtin_c[lt]z* return [0, prec-1], except for
3900 when the argument is 0, but that is undefined behavior.
3901 On many targets where the CLZ RTL or optab value is defined
3902 for 0 the value is prec, so include that in the range
3903 by default. */
3904 CASE_CFN_CLZ:
3905 arg = gimple_call_arg (stmt, 0);
3906 prec = TYPE_PRECISION (TREE_TYPE (arg));
3907 mini = 0;
3908 maxi = prec;
3909 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3910 != CODE_FOR_nothing
3911 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3912 zerov)
3913 /* Handle only the single common value. */
3914 && zerov != prec)
3915 /* Magic value to give up, unless vr0 proves
3916 arg is non-zero. */
3917 mini = -2;
3918 if (TREE_CODE (arg) == SSA_NAME)
3920 value_range *vr0 = get_value_range (arg);
3921 /* From clz of VR_RANGE minimum we can compute
3922 result maximum. */
3923 if (vr0->type == VR_RANGE
3924 && TREE_CODE (vr0->min) == INTEGER_CST
3925 && !is_overflow_infinity (vr0->min))
3927 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3928 if (maxi != prec)
3929 mini = 0;
3931 else if (vr0->type == VR_ANTI_RANGE
3932 && integer_zerop (vr0->min)
3933 && !is_overflow_infinity (vr0->min))
3935 maxi = prec - 1;
3936 mini = 0;
3938 if (mini == -2)
3939 break;
3940 /* From clz of VR_RANGE maximum we can compute
3941 result minimum. */
3942 if (vr0->type == VR_RANGE
3943 && TREE_CODE (vr0->max) == INTEGER_CST
3944 && !is_overflow_infinity (vr0->max))
3946 mini = prec - 1 - tree_floor_log2 (vr0->max);
3947 if (mini == prec)
3948 break;
3951 if (mini == -2)
3952 break;
3953 goto bitop_builtin;
3954 /* __builtin_ctz* return [0, prec-1], except for
3955 when the argument is 0, but that is undefined behavior.
3956 If there is a ctz optab for this mode and
3957 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3958 otherwise just assume 0 won't be seen. */
3959 CASE_CFN_CTZ:
3960 arg = gimple_call_arg (stmt, 0);
3961 prec = TYPE_PRECISION (TREE_TYPE (arg));
3962 mini = 0;
3963 maxi = prec - 1;
3964 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3965 != CODE_FOR_nothing
3966 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3967 zerov))
3969 /* Handle only the two common values. */
3970 if (zerov == -1)
3971 mini = -1;
3972 else if (zerov == prec)
3973 maxi = prec;
3974 else
3975 /* Magic value to give up, unless vr0 proves
3976 arg is non-zero. */
3977 mini = -2;
3979 if (TREE_CODE (arg) == SSA_NAME)
3981 value_range *vr0 = get_value_range (arg);
3982 /* If arg is non-zero, then use [0, prec - 1]. */
3983 if (((vr0->type == VR_RANGE
3984 && integer_nonzerop (vr0->min))
3985 || (vr0->type == VR_ANTI_RANGE
3986 && integer_zerop (vr0->min)))
3987 && !is_overflow_infinity (vr0->min))
3989 mini = 0;
3990 maxi = prec - 1;
3992 /* If some high bits are known to be zero,
3993 we can decrease the result maximum. */
3994 if (vr0->type == VR_RANGE
3995 && TREE_CODE (vr0->max) == INTEGER_CST
3996 && !is_overflow_infinity (vr0->max))
3998 maxi = tree_floor_log2 (vr0->max);
3999 /* For vr0 [0, 0] give up. */
4000 if (maxi == -1)
4001 break;
4004 if (mini == -2)
4005 break;
4006 goto bitop_builtin;
4007 /* __builtin_clrsb* returns [0, prec-1]. */
4008 CASE_CFN_CLRSB:
4009 arg = gimple_call_arg (stmt, 0);
4010 prec = TYPE_PRECISION (TREE_TYPE (arg));
4011 mini = 0;
4012 maxi = prec - 1;
4013 goto bitop_builtin;
4014 bitop_builtin:
4015 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
4016 build_int_cst (type, maxi), NULL);
4017 return;
4018 case CFN_UBSAN_CHECK_ADD:
4019 subcode = PLUS_EXPR;
4020 break;
4021 case CFN_UBSAN_CHECK_SUB:
4022 subcode = MINUS_EXPR;
4023 break;
4024 case CFN_UBSAN_CHECK_MUL:
4025 subcode = MULT_EXPR;
4026 break;
4027 case CFN_GOACC_DIM_SIZE:
4028 case CFN_GOACC_DIM_POS:
4029 /* Optimizing these two internal functions helps the loop
4030 optimizer eliminate outer comparisons. Size is [1,N]
4031 and pos is [0,N-1]. */
4033 bool is_pos = cfn == CFN_GOACC_DIM_POS;
4034 int axis = oacc_get_ifn_dim_arg (stmt);
4035 int size = oacc_get_fn_dim_size (current_function_decl, axis);
4037 if (!size)
4038 /* If it's dynamic, the backend might know a hardware
4039 limitation. */
4040 size = targetm.goacc.dim_limit (axis);
4042 tree type = TREE_TYPE (gimple_call_lhs (stmt));
4043 set_value_range (vr, VR_RANGE,
4044 build_int_cst (type, is_pos ? 0 : 1),
4045 size ? build_int_cst (type, size - is_pos)
4046 : vrp_val_max (type), NULL);
4048 return;
4049 case CFN_BUILT_IN_STRLEN:
4050 if (tree lhs = gimple_call_lhs (stmt))
4051 if (ptrdiff_type_node
4052 && (TYPE_PRECISION (ptrdiff_type_node)
4053 == TYPE_PRECISION (TREE_TYPE (lhs))))
4055 tree type = TREE_TYPE (lhs);
4056 tree max = vrp_val_max (ptrdiff_type_node);
4057 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
4058 tree range_min = build_zero_cst (type);
4059 tree range_max = wide_int_to_tree (type, wmax - 1);
4060 set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
4061 return;
4063 break;
4064 default:
4065 break;
4067 if (subcode != ERROR_MARK)
4069 bool saved_flag_wrapv = flag_wrapv;
4070 /* Pretend the arithmetics is wrapping. If there is
4071 any overflow, we'll complain, but will actually do
4072 wrapping operation. */
4073 flag_wrapv = 1;
4074 extract_range_from_binary_expr (vr, subcode, type,
4075 gimple_call_arg (stmt, 0),
4076 gimple_call_arg (stmt, 1));
4077 flag_wrapv = saved_flag_wrapv;
4079 /* If for both arguments vrp_valueize returned non-NULL,
4080 this should have been already folded and if not, it
4081 wasn't folded because of overflow. Avoid removing the
4082 UBSAN_CHECK_* calls in that case. */
4083 if (vr->type == VR_RANGE
4084 && (vr->min == vr->max
4085 || operand_equal_p (vr->min, vr->max, 0)))
4086 set_value_range_to_varying (vr);
4087 return;
4090 /* Handle extraction of the two results (result of arithmetics and
4091 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4092 internal function. */
4093 else if (is_gimple_assign (stmt)
4094 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4095 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4096 && INTEGRAL_TYPE_P (type))
4098 enum tree_code code = gimple_assign_rhs_code (stmt);
4099 tree op = gimple_assign_rhs1 (stmt);
4100 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4102 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
4103 if (is_gimple_call (g) && gimple_call_internal_p (g))
4105 enum tree_code subcode = ERROR_MARK;
4106 switch (gimple_call_internal_fn (g))
4108 case IFN_ADD_OVERFLOW:
4109 subcode = PLUS_EXPR;
4110 break;
4111 case IFN_SUB_OVERFLOW:
4112 subcode = MINUS_EXPR;
4113 break;
4114 case IFN_MUL_OVERFLOW:
4115 subcode = MULT_EXPR;
4116 break;
4117 default:
4118 break;
4120 if (subcode != ERROR_MARK)
4122 tree op0 = gimple_call_arg (g, 0);
4123 tree op1 = gimple_call_arg (g, 1);
4124 if (code == IMAGPART_EXPR)
4126 bool ovf = false;
4127 if (check_for_binary_op_overflow (subcode, type,
4128 op0, op1, &ovf))
4129 set_value_range_to_value (vr,
4130 build_int_cst (type, ovf),
4131 NULL);
4132 else if (TYPE_PRECISION (type) == 1
4133 && !TYPE_UNSIGNED (type))
4134 set_value_range_to_varying (vr);
4135 else
4136 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4137 build_int_cst (type, 1), NULL);
4139 else if (types_compatible_p (type, TREE_TYPE (op0))
4140 && types_compatible_p (type, TREE_TYPE (op1)))
4142 bool saved_flag_wrapv = flag_wrapv;
4143 /* Pretend the arithmetics is wrapping. If there is
4144 any overflow, IMAGPART_EXPR will be set. */
4145 flag_wrapv = 1;
4146 extract_range_from_binary_expr (vr, subcode, type,
4147 op0, op1);
4148 flag_wrapv = saved_flag_wrapv;
4150 else
4152 value_range vr0 = VR_INITIALIZER;
4153 value_range vr1 = VR_INITIALIZER;
4154 bool saved_flag_wrapv = flag_wrapv;
4155 /* Pretend the arithmetics is wrapping. If there is
4156 any overflow, IMAGPART_EXPR will be set. */
4157 flag_wrapv = 1;
4158 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4159 type, op0);
4160 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4161 type, op1);
4162 extract_range_from_binary_expr_1 (vr, subcode, type,
4163 &vr0, &vr1);
4164 flag_wrapv = saved_flag_wrapv;
4166 return;
4171 if (INTEGRAL_TYPE_P (type)
4172 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4173 set_value_range_to_nonnegative (vr, type,
4174 sop || stmt_overflow_infinity (stmt));
4175 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4176 && !sop)
4177 set_value_range_to_nonnull (vr, type);
4178 else
4179 set_value_range_to_varying (vr);
4183 /* Try to compute a useful range out of assignment STMT and store it
4184 in *VR. */
4186 static void
4187 extract_range_from_assignment (value_range *vr, gassign *stmt)
4189 enum tree_code code = gimple_assign_rhs_code (stmt);
4191 if (code == ASSERT_EXPR)
4192 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4193 else if (code == SSA_NAME)
4194 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4195 else if (TREE_CODE_CLASS (code) == tcc_binary)
4196 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4197 gimple_expr_type (stmt),
4198 gimple_assign_rhs1 (stmt),
4199 gimple_assign_rhs2 (stmt));
4200 else if (TREE_CODE_CLASS (code) == tcc_unary)
4201 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4202 gimple_expr_type (stmt),
4203 gimple_assign_rhs1 (stmt));
4204 else if (code == COND_EXPR)
4205 extract_range_from_cond_expr (vr, stmt);
4206 else if (TREE_CODE_CLASS (code) == tcc_comparison)
4207 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4208 gimple_expr_type (stmt),
4209 gimple_assign_rhs1 (stmt),
4210 gimple_assign_rhs2 (stmt));
4211 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4212 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4213 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4214 else
4215 set_value_range_to_varying (vr);
4217 if (vr->type == VR_VARYING)
4218 extract_range_basic (vr, stmt);
4221 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4222 would be profitable to adjust VR using scalar evolution information
4223 for VAR. If so, update VR with the new limits. */
4225 static void
4226 adjust_range_with_scev (value_range *vr, struct loop *loop,
4227 gimple *stmt, tree var)
4229 tree init, step, chrec, tmin, tmax, min, max, type, tem;
4230 enum ev_direction dir;
4232 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4233 better opportunities than a regular range, but I'm not sure. */
4234 if (vr->type == VR_ANTI_RANGE)
4235 return;
4237 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4239 /* Like in PR19590, scev can return a constant function. */
4240 if (is_gimple_min_invariant (chrec))
4242 set_value_range_to_value (vr, chrec, vr->equiv);
4243 return;
4246 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4247 return;
4249 init = initial_condition_in_loop_num (chrec, loop->num);
4250 tem = op_with_constant_singleton_value_range (init);
4251 if (tem)
4252 init = tem;
4253 step = evolution_part_in_loop_num (chrec, loop->num);
4254 tem = op_with_constant_singleton_value_range (step);
4255 if (tem)
4256 step = tem;
4258 /* If STEP is symbolic, we can't know whether INIT will be the
4259 minimum or maximum value in the range. Also, unless INIT is
4260 a simple expression, compare_values and possibly other functions
4261 in tree-vrp won't be able to handle it. */
4262 if (step == NULL_TREE
4263 || !is_gimple_min_invariant (step)
4264 || !valid_value_p (init))
4265 return;
4267 dir = scev_direction (chrec);
4268 if (/* Do not adjust ranges if we do not know whether the iv increases
4269 or decreases, ... */
4270 dir == EV_DIR_UNKNOWN
4271 /* ... or if it may wrap. */
4272 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
4273 get_chrec_loop (chrec), true))
4274 return;
4276 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4277 negative_overflow_infinity and positive_overflow_infinity,
4278 because we have concluded that the loop probably does not
4279 wrap. */
4281 type = TREE_TYPE (var);
4282 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4283 tmin = lower_bound_in_type (type, type);
4284 else
4285 tmin = TYPE_MIN_VALUE (type);
4286 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4287 tmax = upper_bound_in_type (type, type);
4288 else
4289 tmax = TYPE_MAX_VALUE (type);
4291 /* Try to use estimated number of iterations for the loop to constrain the
4292 final value in the evolution. */
4293 if (TREE_CODE (step) == INTEGER_CST
4294 && is_gimple_val (init)
4295 && (TREE_CODE (init) != SSA_NAME
4296 || get_value_range (init)->type == VR_RANGE))
4298 widest_int nit;
4300 /* We are only entering here for loop header PHI nodes, so using
4301 the number of latch executions is the correct thing to use. */
4302 if (max_loop_iterations (loop, &nit))
4304 value_range maxvr = VR_INITIALIZER;
4305 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4306 bool overflow;
4308 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4309 &overflow);
4310 /* If the multiplication overflowed we can't do a meaningful
4311 adjustment. Likewise if the result doesn't fit in the type
4312 of the induction variable. For a signed type we have to
4313 check whether the result has the expected signedness which
4314 is that of the step as number of iterations is unsigned. */
4315 if (!overflow
4316 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4317 && (sgn == UNSIGNED
4318 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4320 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4321 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4322 TREE_TYPE (init), init, tem);
4323 /* Likewise if the addition did. */
4324 if (maxvr.type == VR_RANGE)
4326 value_range initvr = VR_INITIALIZER;
4328 if (TREE_CODE (init) == SSA_NAME)
4329 initvr = *(get_value_range (init));
4330 else if (is_gimple_min_invariant (init))
4331 set_value_range_to_value (&initvr, init, NULL);
4332 else
4333 return;
4335 /* Check if init + nit * step overflows. Though we checked
4336 scev {init, step}_loop doesn't wrap, it is not enough
4337 because the loop may exit immediately. Overflow could
4338 happen in the plus expression in this case. */
4339 if ((dir == EV_DIR_DECREASES
4340 && (is_negative_overflow_infinity (maxvr.min)
4341 || compare_values (maxvr.min, initvr.min) != -1))
4342 || (dir == EV_DIR_GROWS
4343 && (is_positive_overflow_infinity (maxvr.max)
4344 || compare_values (maxvr.max, initvr.max) != 1)))
4345 return;
4347 tmin = maxvr.min;
4348 tmax = maxvr.max;
4354 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4356 min = tmin;
4357 max = tmax;
4359 /* For VARYING or UNDEFINED ranges, just about anything we get
4360 from scalar evolutions should be better. */
4362 if (dir == EV_DIR_DECREASES)
4363 max = init;
4364 else
4365 min = init;
4367 else if (vr->type == VR_RANGE)
4369 min = vr->min;
4370 max = vr->max;
4372 if (dir == EV_DIR_DECREASES)
4374 /* INIT is the maximum value. If INIT is lower than VR->MAX
4375 but no smaller than VR->MIN, set VR->MAX to INIT. */
4376 if (compare_values (init, max) == -1)
4377 max = init;
4379 /* According to the loop information, the variable does not
4380 overflow. If we think it does, probably because of an
4381 overflow due to arithmetic on a different INF value,
4382 reset now. */
4383 if (is_negative_overflow_infinity (min)
4384 || compare_values (min, tmin) == -1)
4385 min = tmin;
4388 else
4390 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4391 if (compare_values (init, min) == 1)
4392 min = init;
4394 if (is_positive_overflow_infinity (max)
4395 || compare_values (tmax, max) == -1)
4396 max = tmax;
4399 else
4400 return;
4402 /* If we just created an invalid range with the minimum
4403 greater than the maximum, we fail conservatively.
4404 This should happen only in unreachable
4405 parts of code, or for invalid programs. */
4406 if (compare_values (min, max) == 1
4407 || (is_negative_overflow_infinity (min)
4408 && is_positive_overflow_infinity (max)))
4409 return;
4411 /* Even for valid range info, sometimes overflow flag will leak in.
4412 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4413 drop them except for +-overflow_infinity which still need special
4414 handling in vrp pass. */
4415 if (TREE_OVERFLOW_P (min)
4416 && ! is_negative_overflow_infinity (min))
4417 min = drop_tree_overflow (min);
4418 if (TREE_OVERFLOW_P (max)
4419 && ! is_positive_overflow_infinity (max))
4420 max = drop_tree_overflow (max);
4422 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4426 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4428 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4429 all the values in the ranges.
4431 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4433 - Return NULL_TREE if it is not always possible to determine the
4434 value of the comparison.
4436 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4437 overflow infinity was used in the test. */
4440 static tree
4441 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4442 bool *strict_overflow_p)
4444 /* VARYING or UNDEFINED ranges cannot be compared. */
4445 if (vr0->type == VR_VARYING
4446 || vr0->type == VR_UNDEFINED
4447 || vr1->type == VR_VARYING
4448 || vr1->type == VR_UNDEFINED)
4449 return NULL_TREE;
4451 /* Anti-ranges need to be handled separately. */
4452 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4454 /* If both are anti-ranges, then we cannot compute any
4455 comparison. */
4456 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4457 return NULL_TREE;
4459 /* These comparisons are never statically computable. */
4460 if (comp == GT_EXPR
4461 || comp == GE_EXPR
4462 || comp == LT_EXPR
4463 || comp == LE_EXPR)
4464 return NULL_TREE;
4466 /* Equality can be computed only between a range and an
4467 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4468 if (vr0->type == VR_RANGE)
4470 /* To simplify processing, make VR0 the anti-range. */
4471 value_range *tmp = vr0;
4472 vr0 = vr1;
4473 vr1 = tmp;
4476 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4478 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4479 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4480 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4482 return NULL_TREE;
4485 if (!usable_range_p (vr0, strict_overflow_p)
4486 || !usable_range_p (vr1, strict_overflow_p))
4487 return NULL_TREE;
4489 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4490 operands around and change the comparison code. */
4491 if (comp == GT_EXPR || comp == GE_EXPR)
4493 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4494 std::swap (vr0, vr1);
4497 if (comp == EQ_EXPR)
4499 /* Equality may only be computed if both ranges represent
4500 exactly one value. */
4501 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4502 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4504 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4505 strict_overflow_p);
4506 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4507 strict_overflow_p);
4508 if (cmp_min == 0 && cmp_max == 0)
4509 return boolean_true_node;
4510 else if (cmp_min != -2 && cmp_max != -2)
4511 return boolean_false_node;
4513 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4514 else if (compare_values_warnv (vr0->min, vr1->max,
4515 strict_overflow_p) == 1
4516 || compare_values_warnv (vr1->min, vr0->max,
4517 strict_overflow_p) == 1)
4518 return boolean_false_node;
4520 return NULL_TREE;
4522 else if (comp == NE_EXPR)
4524 int cmp1, cmp2;
4526 /* If VR0 is completely to the left or completely to the right
4527 of VR1, they are always different. Notice that we need to
4528 make sure that both comparisons yield similar results to
4529 avoid comparing values that cannot be compared at
4530 compile-time. */
4531 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4532 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4533 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4534 return boolean_true_node;
4536 /* If VR0 and VR1 represent a single value and are identical,
4537 return false. */
4538 else if (compare_values_warnv (vr0->min, vr0->max,
4539 strict_overflow_p) == 0
4540 && compare_values_warnv (vr1->min, vr1->max,
4541 strict_overflow_p) == 0
4542 && compare_values_warnv (vr0->min, vr1->min,
4543 strict_overflow_p) == 0
4544 && compare_values_warnv (vr0->max, vr1->max,
4545 strict_overflow_p) == 0)
4546 return boolean_false_node;
4548 /* Otherwise, they may or may not be different. */
4549 else
4550 return NULL_TREE;
4552 else if (comp == LT_EXPR || comp == LE_EXPR)
4554 int tst;
4556 /* If VR0 is to the left of VR1, return true. */
4557 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4558 if ((comp == LT_EXPR && tst == -1)
4559 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4561 if (overflow_infinity_range_p (vr0)
4562 || overflow_infinity_range_p (vr1))
4563 *strict_overflow_p = true;
4564 return boolean_true_node;
4567 /* If VR0 is to the right of VR1, return false. */
4568 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4569 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4570 || (comp == LE_EXPR && tst == 1))
4572 if (overflow_infinity_range_p (vr0)
4573 || overflow_infinity_range_p (vr1))
4574 *strict_overflow_p = true;
4575 return boolean_false_node;
4578 /* Otherwise, we don't know. */
4579 return NULL_TREE;
4582 gcc_unreachable ();
4586 /* Given a value range VR, a value VAL and a comparison code COMP, return
4587 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4588 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4589 always returns false. Return NULL_TREE if it is not always
4590 possible to determine the value of the comparison. Also set
4591 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4592 infinity was used in the test. */
4594 static tree
4595 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4596 bool *strict_overflow_p)
4598 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4599 return NULL_TREE;
4601 /* Anti-ranges need to be handled separately. */
4602 if (vr->type == VR_ANTI_RANGE)
4604 /* For anti-ranges, the only predicates that we can compute at
4605 compile time are equality and inequality. */
4606 if (comp == GT_EXPR
4607 || comp == GE_EXPR
4608 || comp == LT_EXPR
4609 || comp == LE_EXPR)
4610 return NULL_TREE;
4612 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4613 if (value_inside_range (val, vr->min, vr->max) == 1)
4614 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4616 return NULL_TREE;
4619 if (!usable_range_p (vr, strict_overflow_p))
4620 return NULL_TREE;
4622 if (comp == EQ_EXPR)
4624 /* EQ_EXPR may only be computed if VR represents exactly
4625 one value. */
4626 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4628 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4629 if (cmp == 0)
4630 return boolean_true_node;
4631 else if (cmp == -1 || cmp == 1 || cmp == 2)
4632 return boolean_false_node;
4634 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4635 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4636 return boolean_false_node;
4638 return NULL_TREE;
4640 else if (comp == NE_EXPR)
4642 /* If VAL is not inside VR, then they are always different. */
4643 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4644 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4645 return boolean_true_node;
4647 /* If VR represents exactly one value equal to VAL, then return
4648 false. */
4649 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4650 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4651 return boolean_false_node;
4653 /* Otherwise, they may or may not be different. */
4654 return NULL_TREE;
4656 else if (comp == LT_EXPR || comp == LE_EXPR)
4658 int tst;
4660 /* If VR is to the left of VAL, return true. */
4661 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4662 if ((comp == LT_EXPR && tst == -1)
4663 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4665 if (overflow_infinity_range_p (vr))
4666 *strict_overflow_p = true;
4667 return boolean_true_node;
4670 /* If VR is to the right of VAL, return false. */
4671 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4672 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4673 || (comp == LE_EXPR && tst == 1))
4675 if (overflow_infinity_range_p (vr))
4676 *strict_overflow_p = true;
4677 return boolean_false_node;
4680 /* Otherwise, we don't know. */
4681 return NULL_TREE;
4683 else if (comp == GT_EXPR || comp == GE_EXPR)
4685 int tst;
4687 /* If VR is to the right of VAL, return true. */
4688 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4689 if ((comp == GT_EXPR && tst == 1)
4690 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4692 if (overflow_infinity_range_p (vr))
4693 *strict_overflow_p = true;
4694 return boolean_true_node;
4697 /* If VR is to the left of VAL, return false. */
4698 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4699 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4700 || (comp == GE_EXPR && tst == -1))
4702 if (overflow_infinity_range_p (vr))
4703 *strict_overflow_p = true;
4704 return boolean_false_node;
4707 /* Otherwise, we don't know. */
4708 return NULL_TREE;
4711 gcc_unreachable ();
4715 /* Debugging dumps. */
4717 void dump_value_range (FILE *, const value_range *);
4718 void debug_value_range (value_range *);
4719 void dump_all_value_ranges (FILE *);
4720 void debug_all_value_ranges (void);
4721 void dump_vr_equiv (FILE *, bitmap);
4722 void debug_vr_equiv (bitmap);
4725 /* Dump value range VR to FILE. */
4727 void
4728 dump_value_range (FILE *file, const value_range *vr)
4730 if (vr == NULL)
4731 fprintf (file, "[]");
4732 else if (vr->type == VR_UNDEFINED)
4733 fprintf (file, "UNDEFINED");
4734 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4736 tree type = TREE_TYPE (vr->min);
4738 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4740 if (is_negative_overflow_infinity (vr->min))
4741 fprintf (file, "-INF(OVF)");
4742 else if (INTEGRAL_TYPE_P (type)
4743 && !TYPE_UNSIGNED (type)
4744 && vrp_val_is_min (vr->min))
4745 fprintf (file, "-INF");
4746 else
4747 print_generic_expr (file, vr->min, 0);
4749 fprintf (file, ", ");
4751 if (is_positive_overflow_infinity (vr->max))
4752 fprintf (file, "+INF(OVF)");
4753 else if (INTEGRAL_TYPE_P (type)
4754 && vrp_val_is_max (vr->max))
4755 fprintf (file, "+INF");
4756 else
4757 print_generic_expr (file, vr->max, 0);
4759 fprintf (file, "]");
4761 if (vr->equiv)
4763 bitmap_iterator bi;
4764 unsigned i, c = 0;
4766 fprintf (file, " EQUIVALENCES: { ");
4768 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4770 print_generic_expr (file, ssa_name (i), 0);
4771 fprintf (file, " ");
4772 c++;
4775 fprintf (file, "} (%u elements)", c);
4778 else if (vr->type == VR_VARYING)
4779 fprintf (file, "VARYING");
4780 else
4781 fprintf (file, "INVALID RANGE");
4785 /* Dump value range VR to stderr. */
4787 DEBUG_FUNCTION void
4788 debug_value_range (value_range *vr)
4790 dump_value_range (stderr, vr);
4791 fprintf (stderr, "\n");
4795 /* Dump value ranges of all SSA_NAMEs to FILE. */
4797 void
4798 dump_all_value_ranges (FILE *file)
4800 size_t i;
4802 for (i = 0; i < num_vr_values; i++)
4804 if (vr_value[i])
4806 print_generic_expr (file, ssa_name (i), 0);
4807 fprintf (file, ": ");
4808 dump_value_range (file, vr_value[i]);
4809 fprintf (file, "\n");
4813 fprintf (file, "\n");
4817 /* Dump all value ranges to stderr. */
4819 DEBUG_FUNCTION void
4820 debug_all_value_ranges (void)
4822 dump_all_value_ranges (stderr);
4826 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4827 create a new SSA name N and return the assertion assignment
4828 'N = ASSERT_EXPR <V, V OP W>'. */
4830 static gimple *
4831 build_assert_expr_for (tree cond, tree v)
4833 tree a;
4834 gassign *assertion;
4836 gcc_assert (TREE_CODE (v) == SSA_NAME
4837 && COMPARISON_CLASS_P (cond));
4839 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4840 assertion = gimple_build_assign (NULL_TREE, a);
4842 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4843 operand of the ASSERT_EXPR. Create it so the new name and the old one
4844 are registered in the replacement table so that we can fix the SSA web
4845 after adding all the ASSERT_EXPRs. */
4846 create_new_def_for (v, assertion, NULL);
4848 return assertion;
4852 /* Return false if EXPR is a predicate expression involving floating
4853 point values. */
4855 static inline bool
4856 fp_predicate (gimple *stmt)
4858 GIMPLE_CHECK (stmt, GIMPLE_COND);
4860 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4863 /* If the range of values taken by OP can be inferred after STMT executes,
4864 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4865 describes the inferred range. Return true if a range could be
4866 inferred. */
4868 static bool
4869 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4871 *val_p = NULL_TREE;
4872 *comp_code_p = ERROR_MARK;
4874 /* Do not attempt to infer anything in names that flow through
4875 abnormal edges. */
4876 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4877 return false;
4879 /* If STMT is the last statement of a basic block with no normal
4880 successors, there is no point inferring anything about any of its
4881 operands. We would not be able to find a proper insertion point
4882 for the assertion, anyway. */
4883 if (stmt_ends_bb_p (stmt))
4885 edge_iterator ei;
4886 edge e;
4888 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4889 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4890 break;
4891 if (e == NULL)
4892 return false;
4895 if (infer_nonnull_range (stmt, op))
4897 *val_p = build_int_cst (TREE_TYPE (op), 0);
4898 *comp_code_p = NE_EXPR;
4899 return true;
4902 return false;
4906 void dump_asserts_for (FILE *, tree);
4907 void debug_asserts_for (tree);
4908 void dump_all_asserts (FILE *);
4909 void debug_all_asserts (void);
4911 /* Dump all the registered assertions for NAME to FILE. */
4913 void
4914 dump_asserts_for (FILE *file, tree name)
4916 assert_locus *loc;
4918 fprintf (file, "Assertions to be inserted for ");
4919 print_generic_expr (file, name, 0);
4920 fprintf (file, "\n");
4922 loc = asserts_for[SSA_NAME_VERSION (name)];
4923 while (loc)
4925 fprintf (file, "\t");
4926 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4927 fprintf (file, "\n\tBB #%d", loc->bb->index);
4928 if (loc->e)
4930 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4931 loc->e->dest->index);
4932 dump_edge_info (file, loc->e, dump_flags, 0);
4934 fprintf (file, "\n\tPREDICATE: ");
4935 print_generic_expr (file, loc->expr, 0);
4936 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4937 print_generic_expr (file, loc->val, 0);
4938 fprintf (file, "\n\n");
4939 loc = loc->next;
4942 fprintf (file, "\n");
4946 /* Dump all the registered assertions for NAME to stderr. */
4948 DEBUG_FUNCTION void
4949 debug_asserts_for (tree name)
4951 dump_asserts_for (stderr, name);
4955 /* Dump all the registered assertions for all the names to FILE. */
4957 void
4958 dump_all_asserts (FILE *file)
4960 unsigned i;
4961 bitmap_iterator bi;
4963 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4964 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4965 dump_asserts_for (file, ssa_name (i));
4966 fprintf (file, "\n");
4970 /* Dump all the registered assertions for all the names to stderr. */
4972 DEBUG_FUNCTION void
4973 debug_all_asserts (void)
4975 dump_all_asserts (stderr);
4979 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4980 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4981 E->DEST, then register this location as a possible insertion point
4982 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4984 BB, E and SI provide the exact insertion point for the new
4985 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4986 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4987 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4988 must not be NULL. */
4990 static void
4991 register_new_assert_for (tree name, tree expr,
4992 enum tree_code comp_code,
4993 tree val,
4994 basic_block bb,
4995 edge e,
4996 gimple_stmt_iterator si)
4998 assert_locus *n, *loc, *last_loc;
4999 basic_block dest_bb;
5001 gcc_checking_assert (bb == NULL || e == NULL);
5003 if (e == NULL)
5004 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
5005 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
5007 /* Never build an assert comparing against an integer constant with
5008 TREE_OVERFLOW set. This confuses our undefined overflow warning
5009 machinery. */
5010 if (TREE_OVERFLOW_P (val))
5011 val = drop_tree_overflow (val);
5013 /* The new assertion A will be inserted at BB or E. We need to
5014 determine if the new location is dominated by a previously
5015 registered location for A. If we are doing an edge insertion,
5016 assume that A will be inserted at E->DEST. Note that this is not
5017 necessarily true.
5019 If E is a critical edge, it will be split. But even if E is
5020 split, the new block will dominate the same set of blocks that
5021 E->DEST dominates.
5023 The reverse, however, is not true, blocks dominated by E->DEST
5024 will not be dominated by the new block created to split E. So,
5025 if the insertion location is on a critical edge, we will not use
5026 the new location to move another assertion previously registered
5027 at a block dominated by E->DEST. */
5028 dest_bb = (bb) ? bb : e->dest;
5030 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
5031 VAL at a block dominating DEST_BB, then we don't need to insert a new
5032 one. Similarly, if the same assertion already exists at a block
5033 dominated by DEST_BB and the new location is not on a critical
5034 edge, then update the existing location for the assertion (i.e.,
5035 move the assertion up in the dominance tree).
5037 Note, this is implemented as a simple linked list because there
5038 should not be more than a handful of assertions registered per
5039 name. If this becomes a performance problem, a table hashed by
5040 COMP_CODE and VAL could be implemented. */
5041 loc = asserts_for[SSA_NAME_VERSION (name)];
5042 last_loc = loc;
5043 while (loc)
5045 if (loc->comp_code == comp_code
5046 && (loc->val == val
5047 || operand_equal_p (loc->val, val, 0))
5048 && (loc->expr == expr
5049 || operand_equal_p (loc->expr, expr, 0)))
5051 /* If E is not a critical edge and DEST_BB
5052 dominates the existing location for the assertion, move
5053 the assertion up in the dominance tree by updating its
5054 location information. */
5055 if ((e == NULL || !EDGE_CRITICAL_P (e))
5056 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
5058 loc->bb = dest_bb;
5059 loc->e = e;
5060 loc->si = si;
5061 return;
5065 /* Update the last node of the list and move to the next one. */
5066 last_loc = loc;
5067 loc = loc->next;
5070 /* If we didn't find an assertion already registered for
5071 NAME COMP_CODE VAL, add a new one at the end of the list of
5072 assertions associated with NAME. */
5073 n = XNEW (struct assert_locus);
5074 n->bb = dest_bb;
5075 n->e = e;
5076 n->si = si;
5077 n->comp_code = comp_code;
5078 n->val = val;
5079 n->expr = expr;
5080 n->next = NULL;
5082 if (last_loc)
5083 last_loc->next = n;
5084 else
5085 asserts_for[SSA_NAME_VERSION (name)] = n;
5087 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5090 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5091 Extract a suitable test code and value and store them into *CODE_P and
5092 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5094 If no extraction was possible, return FALSE, otherwise return TRUE.
5096 If INVERT is true, then we invert the result stored into *CODE_P. */
5098 static bool
5099 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5100 tree cond_op0, tree cond_op1,
5101 bool invert, enum tree_code *code_p,
5102 tree *val_p)
5104 enum tree_code comp_code;
5105 tree val;
5107 /* Otherwise, we have a comparison of the form NAME COMP VAL
5108 or VAL COMP NAME. */
5109 if (name == cond_op1)
5111 /* If the predicate is of the form VAL COMP NAME, flip
5112 COMP around because we need to register NAME as the
5113 first operand in the predicate. */
5114 comp_code = swap_tree_comparison (cond_code);
5115 val = cond_op0;
5117 else if (name == cond_op0)
5119 /* The comparison is of the form NAME COMP VAL, so the
5120 comparison code remains unchanged. */
5121 comp_code = cond_code;
5122 val = cond_op1;
5124 else
5125 gcc_unreachable ();
5127 /* Invert the comparison code as necessary. */
5128 if (invert)
5129 comp_code = invert_tree_comparison (comp_code, 0);
5131 /* VRP only handles integral and pointer types. */
5132 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
5133 && ! POINTER_TYPE_P (TREE_TYPE (val)))
5134 return false;
5136 /* Do not register always-false predicates.
5137 FIXME: this works around a limitation in fold() when dealing with
5138 enumerations. Given 'enum { N1, N2 } x;', fold will not
5139 fold 'if (x > N2)' to 'if (0)'. */
5140 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5141 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5143 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5144 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5146 if (comp_code == GT_EXPR
5147 && (!max
5148 || compare_values (val, max) == 0))
5149 return false;
5151 if (comp_code == LT_EXPR
5152 && (!min
5153 || compare_values (val, min) == 0))
5154 return false;
5156 *code_p = comp_code;
5157 *val_p = val;
5158 return true;
5161 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5162 (otherwise return VAL). VAL and MASK must be zero-extended for
5163 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5164 (to transform signed values into unsigned) and at the end xor
5165 SGNBIT back. */
5167 static wide_int
5168 masked_increment (const wide_int &val_in, const wide_int &mask,
5169 const wide_int &sgnbit, unsigned int prec)
5171 wide_int bit = wi::one (prec), res;
5172 unsigned int i;
5174 wide_int val = val_in ^ sgnbit;
5175 for (i = 0; i < prec; i++, bit += bit)
5177 res = mask;
5178 if ((res & bit) == 0)
5179 continue;
5180 res = bit - 1;
5181 res = (val + bit).and_not (res);
5182 res &= mask;
5183 if (wi::gtu_p (res, val))
5184 return res ^ sgnbit;
5186 return val ^ sgnbit;
5189 /* Helper for overflow_comparison_p
5191 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
5192 OP1's defining statement to see if it ultimately has the form
5193 OP0 CODE (OP0 PLUS INTEGER_CST)
5195 If so, return TRUE indicating this is an overflow test and store into
5196 *NEW_CST an updated constant that can be used in a narrowed range test.
5198 REVERSED indicates if the comparison was originally:
5200 OP1 CODE' OP0.
5202 This affects how we build the updated constant. */
5204 static bool
5205 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
5206 bool follow_assert_exprs, bool reversed, tree *new_cst)
5208 /* See if this is a relational operation between two SSA_NAMES with
5209 unsigned, overflow wrapping values. If so, check it more deeply. */
5210 if ((code == LT_EXPR || code == LE_EXPR
5211 || code == GE_EXPR || code == GT_EXPR)
5212 && TREE_CODE (op0) == SSA_NAME
5213 && TREE_CODE (op1) == SSA_NAME
5214 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
5215 && TYPE_UNSIGNED (TREE_TYPE (op0))
5216 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
5218 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
5220 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
5221 if (follow_assert_exprs)
5223 while (gimple_assign_single_p (op1_def)
5224 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
5226 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
5227 if (TREE_CODE (op1) != SSA_NAME)
5228 break;
5229 op1_def = SSA_NAME_DEF_STMT (op1);
5233 /* Now look at the defining statement of OP1 to see if it adds
5234 or subtracts a nonzero constant from another operand. */
5235 if (op1_def
5236 && is_gimple_assign (op1_def)
5237 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
5238 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
5239 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
5241 tree target = gimple_assign_rhs1 (op1_def);
5243 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
5244 for one where TARGET appears on the RHS. */
5245 if (follow_assert_exprs)
5247 /* Now see if that "other operand" is op0, following the chain
5248 of ASSERT_EXPRs if necessary. */
5249 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
5250 while (op0 != target
5251 && gimple_assign_single_p (op0_def)
5252 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
5254 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
5255 if (TREE_CODE (op0) != SSA_NAME)
5256 break;
5257 op0_def = SSA_NAME_DEF_STMT (op0);
5261 /* If we did not find our target SSA_NAME, then this is not
5262 an overflow test. */
5263 if (op0 != target)
5264 return false;
5266 tree type = TREE_TYPE (op0);
5267 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
5268 tree inc = gimple_assign_rhs2 (op1_def);
5269 if (reversed)
5270 *new_cst = wide_int_to_tree (type, max + inc);
5271 else
5272 *new_cst = wide_int_to_tree (type, max - inc);
5273 return true;
5276 return false;
5279 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
5280 OP1's defining statement to see if it ultimately has the form
5281 OP0 CODE (OP0 PLUS INTEGER_CST)
5283 If so, return TRUE indicating this is an overflow test and store into
5284 *NEW_CST an updated constant that can be used in a narrowed range test.
5286 These statements are left as-is in the IL to facilitate discovery of
5287 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
5288 the alternate range representation is often useful within VRP. */
5290 static bool
5291 overflow_comparison_p (tree_code code, tree name, tree val,
5292 bool use_equiv_p, tree *new_cst)
5294 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
5295 return true;
5296 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
5297 use_equiv_p, true, new_cst);
5301 /* Try to register an edge assertion for SSA name NAME on edge E for
5302 the condition COND contributing to the conditional jump pointed to by BSI.
5303 Invert the condition COND if INVERT is true. */
5305 static void
5306 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5307 enum tree_code cond_code,
5308 tree cond_op0, tree cond_op1, bool invert)
5310 tree val;
5311 enum tree_code comp_code;
5313 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5314 cond_op0,
5315 cond_op1,
5316 invert, &comp_code, &val))
5317 return;
5319 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5320 reachable from E. */
5321 if (live_on_edge (e, name))
5323 tree x;
5324 if (overflow_comparison_p (comp_code, name, val, false, &x))
5326 enum tree_code new_code
5327 = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
5328 ? GT_EXPR : LE_EXPR);
5329 register_new_assert_for (name, name, new_code, x, NULL, e, bsi);
5331 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5334 /* In the case of NAME <= CST and NAME being defined as
5335 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5336 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5337 This catches range and anti-range tests. */
5338 if ((comp_code == LE_EXPR
5339 || comp_code == GT_EXPR)
5340 && TREE_CODE (val) == INTEGER_CST
5341 && TYPE_UNSIGNED (TREE_TYPE (val)))
5343 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5344 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5346 /* Extract CST2 from the (optional) addition. */
5347 if (is_gimple_assign (def_stmt)
5348 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5350 name2 = gimple_assign_rhs1 (def_stmt);
5351 cst2 = gimple_assign_rhs2 (def_stmt);
5352 if (TREE_CODE (name2) == SSA_NAME
5353 && TREE_CODE (cst2) == INTEGER_CST)
5354 def_stmt = SSA_NAME_DEF_STMT (name2);
5357 /* Extract NAME2 from the (optional) sign-changing cast. */
5358 if (gimple_assign_cast_p (def_stmt))
5360 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5361 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5362 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5363 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5364 name3 = gimple_assign_rhs1 (def_stmt);
5367 /* If name3 is used later, create an ASSERT_EXPR for it. */
5368 if (name3 != NULL_TREE
5369 && TREE_CODE (name3) == SSA_NAME
5370 && (cst2 == NULL_TREE
5371 || TREE_CODE (cst2) == INTEGER_CST)
5372 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5373 && live_on_edge (e, name3))
5375 tree tmp;
5377 /* Build an expression for the range test. */
5378 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5379 if (cst2 != NULL_TREE)
5380 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5382 if (dump_file)
5384 fprintf (dump_file, "Adding assert for ");
5385 print_generic_expr (dump_file, name3, 0);
5386 fprintf (dump_file, " from ");
5387 print_generic_expr (dump_file, tmp, 0);
5388 fprintf (dump_file, "\n");
5391 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5394 /* If name2 is used later, create an ASSERT_EXPR for it. */
5395 if (name2 != NULL_TREE
5396 && TREE_CODE (name2) == SSA_NAME
5397 && TREE_CODE (cst2) == INTEGER_CST
5398 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5399 && live_on_edge (e, name2))
5401 tree tmp;
5403 /* Build an expression for the range test. */
5404 tmp = name2;
5405 if (TREE_TYPE (name) != TREE_TYPE (name2))
5406 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5407 if (cst2 != NULL_TREE)
5408 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5410 if (dump_file)
5412 fprintf (dump_file, "Adding assert for ");
5413 print_generic_expr (dump_file, name2, 0);
5414 fprintf (dump_file, " from ");
5415 print_generic_expr (dump_file, tmp, 0);
5416 fprintf (dump_file, "\n");
5419 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5423 /* In the case of post-in/decrement tests like if (i++) ... and uses
5424 of the in/decremented value on the edge the extra name we want to
5425 assert for is not on the def chain of the name compared. Instead
5426 it is in the set of use stmts.
5427 Similar cases happen for conversions that were simplified through
5428 fold_{sign_changed,widened}_comparison. */
5429 if ((comp_code == NE_EXPR
5430 || comp_code == EQ_EXPR)
5431 && TREE_CODE (val) == INTEGER_CST)
5433 imm_use_iterator ui;
5434 gimple *use_stmt;
5435 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5437 if (!is_gimple_assign (use_stmt))
5438 continue;
5440 /* Cut off to use-stmts that are dominating the predecessor. */
5441 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5442 continue;
5444 tree name2 = gimple_assign_lhs (use_stmt);
5445 if (TREE_CODE (name2) != SSA_NAME
5446 || !live_on_edge (e, name2))
5447 continue;
5449 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5450 tree cst;
5451 if (code == PLUS_EXPR
5452 || code == MINUS_EXPR)
5454 cst = gimple_assign_rhs2 (use_stmt);
5455 if (TREE_CODE (cst) != INTEGER_CST)
5456 continue;
5457 cst = int_const_binop (code, val, cst);
5459 else if (CONVERT_EXPR_CODE_P (code))
5461 /* For truncating conversions we cannot record
5462 an inequality. */
5463 if (comp_code == NE_EXPR
5464 && (TYPE_PRECISION (TREE_TYPE (name2))
5465 < TYPE_PRECISION (TREE_TYPE (name))))
5466 continue;
5467 cst = fold_convert (TREE_TYPE (name2), val);
5469 else
5470 continue;
5472 if (TREE_OVERFLOW_P (cst))
5473 cst = drop_tree_overflow (cst);
5474 register_new_assert_for (name2, name2, comp_code, cst,
5475 NULL, e, bsi);
5479 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5480 && TREE_CODE (val) == INTEGER_CST)
5482 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5483 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5484 tree val2 = NULL_TREE;
5485 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5486 wide_int mask = wi::zero (prec);
5487 unsigned int nprec = prec;
5488 enum tree_code rhs_code = ERROR_MARK;
5490 if (is_gimple_assign (def_stmt))
5491 rhs_code = gimple_assign_rhs_code (def_stmt);
5493 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5494 assert that A != CST1 -+ CST2. */
5495 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5496 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5498 tree op0 = gimple_assign_rhs1 (def_stmt);
5499 tree op1 = gimple_assign_rhs2 (def_stmt);
5500 if (TREE_CODE (op0) == SSA_NAME
5501 && TREE_CODE (op1) == INTEGER_CST
5502 && live_on_edge (e, op0))
5504 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5505 ? MINUS_EXPR : PLUS_EXPR);
5506 op1 = int_const_binop (reverse_op, val, op1);
5507 if (TREE_OVERFLOW (op1))
5508 op1 = drop_tree_overflow (op1);
5509 register_new_assert_for (op0, op0, comp_code, op1, NULL, e, bsi);
5513 /* Add asserts for NAME cmp CST and NAME being defined
5514 as NAME = (int) NAME2. */
5515 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5516 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5517 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5518 && gimple_assign_cast_p (def_stmt))
5520 name2 = gimple_assign_rhs1 (def_stmt);
5521 if (CONVERT_EXPR_CODE_P (rhs_code)
5522 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5523 && TYPE_UNSIGNED (TREE_TYPE (name2))
5524 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5525 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5526 || !tree_int_cst_equal (val,
5527 TYPE_MIN_VALUE (TREE_TYPE (val))))
5528 && live_on_edge (e, name2))
5530 tree tmp, cst;
5531 enum tree_code new_comp_code = comp_code;
5533 cst = fold_convert (TREE_TYPE (name2),
5534 TYPE_MIN_VALUE (TREE_TYPE (val)));
5535 /* Build an expression for the range test. */
5536 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5537 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5538 fold_convert (TREE_TYPE (name2), val));
5539 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5541 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5542 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5543 build_int_cst (TREE_TYPE (name2), 1));
5546 if (dump_file)
5548 fprintf (dump_file, "Adding assert for ");
5549 print_generic_expr (dump_file, name2, 0);
5550 fprintf (dump_file, " from ");
5551 print_generic_expr (dump_file, tmp, 0);
5552 fprintf (dump_file, "\n");
5555 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5556 e, bsi);
5560 /* Add asserts for NAME cmp CST and NAME being defined as
5561 NAME = NAME2 >> CST2.
5563 Extract CST2 from the right shift. */
5564 if (rhs_code == RSHIFT_EXPR)
5566 name2 = gimple_assign_rhs1 (def_stmt);
5567 cst2 = gimple_assign_rhs2 (def_stmt);
5568 if (TREE_CODE (name2) == SSA_NAME
5569 && tree_fits_uhwi_p (cst2)
5570 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5571 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5572 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5573 && live_on_edge (e, name2))
5575 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5576 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5579 if (val2 != NULL_TREE
5580 && TREE_CODE (val2) == INTEGER_CST
5581 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5582 TREE_TYPE (val),
5583 val2, cst2), val))
5585 enum tree_code new_comp_code = comp_code;
5586 tree tmp, new_val;
5588 tmp = name2;
5589 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5591 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5593 tree type = build_nonstandard_integer_type (prec, 1);
5594 tmp = build1 (NOP_EXPR, type, name2);
5595 val2 = fold_convert (type, val2);
5597 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5598 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5599 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5601 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5603 wide_int minval
5604 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5605 new_val = val2;
5606 if (minval == new_val)
5607 new_val = NULL_TREE;
5609 else
5611 wide_int maxval
5612 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5613 mask |= val2;
5614 if (mask == maxval)
5615 new_val = NULL_TREE;
5616 else
5617 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5620 if (new_val)
5622 if (dump_file)
5624 fprintf (dump_file, "Adding assert for ");
5625 print_generic_expr (dump_file, name2, 0);
5626 fprintf (dump_file, " from ");
5627 print_generic_expr (dump_file, tmp, 0);
5628 fprintf (dump_file, "\n");
5631 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5632 NULL, e, bsi);
5636 /* Add asserts for NAME cmp CST and NAME being defined as
5637 NAME = NAME2 & CST2.
5639 Extract CST2 from the and.
5641 Also handle
5642 NAME = (unsigned) NAME2;
5643 casts where NAME's type is unsigned and has smaller precision
5644 than NAME2's type as if it was NAME = NAME2 & MASK. */
5645 names[0] = NULL_TREE;
5646 names[1] = NULL_TREE;
5647 cst2 = NULL_TREE;
5648 if (rhs_code == BIT_AND_EXPR
5649 || (CONVERT_EXPR_CODE_P (rhs_code)
5650 && INTEGRAL_TYPE_P (TREE_TYPE (val))
5651 && TYPE_UNSIGNED (TREE_TYPE (val))
5652 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5653 > prec))
5655 name2 = gimple_assign_rhs1 (def_stmt);
5656 if (rhs_code == BIT_AND_EXPR)
5657 cst2 = gimple_assign_rhs2 (def_stmt);
5658 else
5660 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5661 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5663 if (TREE_CODE (name2) == SSA_NAME
5664 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5665 && TREE_CODE (cst2) == INTEGER_CST
5666 && !integer_zerop (cst2)
5667 && (nprec > 1
5668 || TYPE_UNSIGNED (TREE_TYPE (val))))
5670 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5671 if (gimple_assign_cast_p (def_stmt2))
5673 names[1] = gimple_assign_rhs1 (def_stmt2);
5674 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5675 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5676 || (TYPE_PRECISION (TREE_TYPE (name2))
5677 != TYPE_PRECISION (TREE_TYPE (names[1])))
5678 || !live_on_edge (e, names[1]))
5679 names[1] = NULL_TREE;
5681 if (live_on_edge (e, name2))
5682 names[0] = name2;
5685 if (names[0] || names[1])
5687 wide_int minv, maxv, valv, cst2v;
5688 wide_int tem, sgnbit;
5689 bool valid_p = false, valn, cst2n;
5690 enum tree_code ccode = comp_code;
5692 valv = wide_int::from (val, nprec, UNSIGNED);
5693 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5694 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5695 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5696 /* If CST2 doesn't have most significant bit set,
5697 but VAL is negative, we have comparison like
5698 if ((x & 0x123) > -4) (always true). Just give up. */
5699 if (!cst2n && valn)
5700 ccode = ERROR_MARK;
5701 if (cst2n)
5702 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5703 else
5704 sgnbit = wi::zero (nprec);
5705 minv = valv & cst2v;
5706 switch (ccode)
5708 case EQ_EXPR:
5709 /* Minimum unsigned value for equality is VAL & CST2
5710 (should be equal to VAL, otherwise we probably should
5711 have folded the comparison into false) and
5712 maximum unsigned value is VAL | ~CST2. */
5713 maxv = valv | ~cst2v;
5714 valid_p = true;
5715 break;
5717 case NE_EXPR:
5718 tem = valv | ~cst2v;
5719 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5720 if (valv == 0)
5722 cst2n = false;
5723 sgnbit = wi::zero (nprec);
5724 goto gt_expr;
5726 /* If (VAL | ~CST2) is all ones, handle it as
5727 (X & CST2) < VAL. */
5728 if (tem == -1)
5730 cst2n = false;
5731 valn = false;
5732 sgnbit = wi::zero (nprec);
5733 goto lt_expr;
5735 if (!cst2n && wi::neg_p (cst2v))
5736 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5737 if (sgnbit != 0)
5739 if (valv == sgnbit)
5741 cst2n = true;
5742 valn = true;
5743 goto gt_expr;
5745 if (tem == wi::mask (nprec - 1, false, nprec))
5747 cst2n = true;
5748 goto lt_expr;
5750 if (!cst2n)
5751 sgnbit = wi::zero (nprec);
5753 break;
5755 case GE_EXPR:
5756 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5757 is VAL and maximum unsigned value is ~0. For signed
5758 comparison, if CST2 doesn't have most significant bit
5759 set, handle it similarly. If CST2 has MSB set,
5760 the minimum is the same, and maximum is ~0U/2. */
5761 if (minv != valv)
5763 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5764 VAL. */
5765 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5766 if (minv == valv)
5767 break;
5769 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5770 valid_p = true;
5771 break;
5773 case GT_EXPR:
5774 gt_expr:
5775 /* Find out smallest MINV where MINV > VAL
5776 && (MINV & CST2) == MINV, if any. If VAL is signed and
5777 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5778 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5779 if (minv == valv)
5780 break;
5781 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5782 valid_p = true;
5783 break;
5785 case LE_EXPR:
5786 /* Minimum unsigned value for <= is 0 and maximum
5787 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5788 Otherwise, find smallest VAL2 where VAL2 > VAL
5789 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5790 as maximum.
5791 For signed comparison, if CST2 doesn't have most
5792 significant bit set, handle it similarly. If CST2 has
5793 MSB set, the maximum is the same and minimum is INT_MIN. */
5794 if (minv == valv)
5795 maxv = valv;
5796 else
5798 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5799 if (maxv == valv)
5800 break;
5801 maxv -= 1;
5803 maxv |= ~cst2v;
5804 minv = sgnbit;
5805 valid_p = true;
5806 break;
5808 case LT_EXPR:
5809 lt_expr:
5810 /* Minimum unsigned value for < is 0 and maximum
5811 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5812 Otherwise, find smallest VAL2 where VAL2 > VAL
5813 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5814 as maximum.
5815 For signed comparison, if CST2 doesn't have most
5816 significant bit set, handle it similarly. If CST2 has
5817 MSB set, the maximum is the same and minimum is INT_MIN. */
5818 if (minv == valv)
5820 if (valv == sgnbit)
5821 break;
5822 maxv = valv;
5824 else
5826 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5827 if (maxv == valv)
5828 break;
5830 maxv -= 1;
5831 maxv |= ~cst2v;
5832 minv = sgnbit;
5833 valid_p = true;
5834 break;
5836 default:
5837 break;
5839 if (valid_p
5840 && (maxv - minv) != -1)
5842 tree tmp, new_val, type;
5843 int i;
5845 for (i = 0; i < 2; i++)
5846 if (names[i])
5848 wide_int maxv2 = maxv;
5849 tmp = names[i];
5850 type = TREE_TYPE (names[i]);
5851 if (!TYPE_UNSIGNED (type))
5853 type = build_nonstandard_integer_type (nprec, 1);
5854 tmp = build1 (NOP_EXPR, type, names[i]);
5856 if (minv != 0)
5858 tmp = build2 (PLUS_EXPR, type, tmp,
5859 wide_int_to_tree (type, -minv));
5860 maxv2 = maxv - minv;
5862 new_val = wide_int_to_tree (type, maxv2);
5864 if (dump_file)
5866 fprintf (dump_file, "Adding assert for ");
5867 print_generic_expr (dump_file, names[i], 0);
5868 fprintf (dump_file, " from ");
5869 print_generic_expr (dump_file, tmp, 0);
5870 fprintf (dump_file, "\n");
5873 register_new_assert_for (names[i], tmp, LE_EXPR,
5874 new_val, NULL, e, bsi);
5881 /* OP is an operand of a truth value expression which is known to have
5882 a particular value. Register any asserts for OP and for any
5883 operands in OP's defining statement.
5885 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5886 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5888 static void
5889 register_edge_assert_for_1 (tree op, enum tree_code code,
5890 edge e, gimple_stmt_iterator bsi)
5892 gimple *op_def;
5893 tree val;
5894 enum tree_code rhs_code;
5896 /* We only care about SSA_NAMEs. */
5897 if (TREE_CODE (op) != SSA_NAME)
5898 return;
5900 /* We know that OP will have a zero or nonzero value. If OP is used
5901 more than once go ahead and register an assert for OP. */
5902 if (live_on_edge (e, op))
5904 val = build_int_cst (TREE_TYPE (op), 0);
5905 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5908 /* Now look at how OP is set. If it's set from a comparison,
5909 a truth operation or some bit operations, then we may be able
5910 to register information about the operands of that assignment. */
5911 op_def = SSA_NAME_DEF_STMT (op);
5912 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5913 return;
5915 rhs_code = gimple_assign_rhs_code (op_def);
5917 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5919 bool invert = (code == EQ_EXPR ? true : false);
5920 tree op0 = gimple_assign_rhs1 (op_def);
5921 tree op1 = gimple_assign_rhs2 (op_def);
5923 if (TREE_CODE (op0) == SSA_NAME)
5924 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5925 if (TREE_CODE (op1) == SSA_NAME)
5926 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5928 else if ((code == NE_EXPR
5929 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5930 || (code == EQ_EXPR
5931 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5933 /* Recurse on each operand. */
5934 tree op0 = gimple_assign_rhs1 (op_def);
5935 tree op1 = gimple_assign_rhs2 (op_def);
5936 if (TREE_CODE (op0) == SSA_NAME
5937 && has_single_use (op0))
5938 register_edge_assert_for_1 (op0, code, e, bsi);
5939 if (TREE_CODE (op1) == SSA_NAME
5940 && has_single_use (op1))
5941 register_edge_assert_for_1 (op1, code, e, bsi);
5943 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5944 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5946 /* Recurse, flipping CODE. */
5947 code = invert_tree_comparison (code, false);
5948 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5950 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5952 /* Recurse through the copy. */
5953 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5955 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5957 /* Recurse through the type conversion, unless it is a narrowing
5958 conversion or conversion from non-integral type. */
5959 tree rhs = gimple_assign_rhs1 (op_def);
5960 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5961 && (TYPE_PRECISION (TREE_TYPE (rhs))
5962 <= TYPE_PRECISION (TREE_TYPE (op))))
5963 register_edge_assert_for_1 (rhs, code, e, bsi);
5967 /* Try to register an edge assertion for SSA name NAME on edge E for
5968 the condition COND contributing to the conditional jump pointed to by
5969 SI. */
5971 static void
5972 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5973 enum tree_code cond_code, tree cond_op0,
5974 tree cond_op1)
5976 tree val;
5977 enum tree_code comp_code;
5978 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5980 /* Do not attempt to infer anything in names that flow through
5981 abnormal edges. */
5982 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5983 return;
5985 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5986 cond_op0, cond_op1,
5987 is_else_edge,
5988 &comp_code, &val))
5989 return;
5991 /* Register ASSERT_EXPRs for name. */
5992 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5993 cond_op1, is_else_edge);
5996 /* If COND is effectively an equality test of an SSA_NAME against
5997 the value zero or one, then we may be able to assert values
5998 for SSA_NAMEs which flow into COND. */
6000 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
6001 statement of NAME we can assert both operands of the BIT_AND_EXPR
6002 have nonzero value. */
6003 if (((comp_code == EQ_EXPR && integer_onep (val))
6004 || (comp_code == NE_EXPR && integer_zerop (val))))
6006 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
6008 if (is_gimple_assign (def_stmt)
6009 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
6011 tree op0 = gimple_assign_rhs1 (def_stmt);
6012 tree op1 = gimple_assign_rhs2 (def_stmt);
6013 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
6014 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
6018 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
6019 statement of NAME we can assert both operands of the BIT_IOR_EXPR
6020 have zero value. */
6021 if (((comp_code == EQ_EXPR && integer_zerop (val))
6022 || (comp_code == NE_EXPR && integer_onep (val))))
6024 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
6026 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
6027 necessarily zero value, or if type-precision is one. */
6028 if (is_gimple_assign (def_stmt)
6029 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
6030 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
6031 || comp_code == EQ_EXPR)))
6033 tree op0 = gimple_assign_rhs1 (def_stmt);
6034 tree op1 = gimple_assign_rhs2 (def_stmt);
6035 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
6036 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
6042 /* Determine whether the outgoing edges of BB should receive an
6043 ASSERT_EXPR for each of the operands of BB's LAST statement.
6044 The last statement of BB must be a COND_EXPR.
6046 If any of the sub-graphs rooted at BB have an interesting use of
6047 the predicate operands, an assert location node is added to the
6048 list of assertions for the corresponding operands. */
6050 static void
6051 find_conditional_asserts (basic_block bb, gcond *last)
6053 gimple_stmt_iterator bsi;
6054 tree op;
6055 edge_iterator ei;
6056 edge e;
6057 ssa_op_iter iter;
6059 bsi = gsi_for_stmt (last);
6061 /* Look for uses of the operands in each of the sub-graphs
6062 rooted at BB. We need to check each of the outgoing edges
6063 separately, so that we know what kind of ASSERT_EXPR to
6064 insert. */
6065 FOR_EACH_EDGE (e, ei, bb->succs)
6067 if (e->dest == bb)
6068 continue;
6070 /* Register the necessary assertions for each operand in the
6071 conditional predicate. */
6072 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
6073 register_edge_assert_for (op, e, bsi,
6074 gimple_cond_code (last),
6075 gimple_cond_lhs (last),
6076 gimple_cond_rhs (last));
6080 struct case_info
6082 tree expr;
6083 basic_block bb;
6086 /* Compare two case labels sorting first by the destination bb index
6087 and then by the case value. */
6089 static int
6090 compare_case_labels (const void *p1, const void *p2)
6092 const struct case_info *ci1 = (const struct case_info *) p1;
6093 const struct case_info *ci2 = (const struct case_info *) p2;
6094 int idx1 = ci1->bb->index;
6095 int idx2 = ci2->bb->index;
6097 if (idx1 < idx2)
6098 return -1;
6099 else if (idx1 == idx2)
6101 /* Make sure the default label is first in a group. */
6102 if (!CASE_LOW (ci1->expr))
6103 return -1;
6104 else if (!CASE_LOW (ci2->expr))
6105 return 1;
6106 else
6107 return tree_int_cst_compare (CASE_LOW (ci1->expr),
6108 CASE_LOW (ci2->expr));
6110 else
6111 return 1;
6114 /* Determine whether the outgoing edges of BB should receive an
6115 ASSERT_EXPR for each of the operands of BB's LAST statement.
6116 The last statement of BB must be a SWITCH_EXPR.
6118 If any of the sub-graphs rooted at BB have an interesting use of
6119 the predicate operands, an assert location node is added to the
6120 list of assertions for the corresponding operands. */
6122 static void
6123 find_switch_asserts (basic_block bb, gswitch *last)
6125 gimple_stmt_iterator bsi;
6126 tree op;
6127 edge e;
6128 struct case_info *ci;
6129 size_t n = gimple_switch_num_labels (last);
6130 #if GCC_VERSION >= 4000
6131 unsigned int idx;
6132 #else
6133 /* Work around GCC 3.4 bug (PR 37086). */
6134 volatile unsigned int idx;
6135 #endif
6137 bsi = gsi_for_stmt (last);
6138 op = gimple_switch_index (last);
6139 if (TREE_CODE (op) != SSA_NAME)
6140 return;
6142 /* Build a vector of case labels sorted by destination label. */
6143 ci = XNEWVEC (struct case_info, n);
6144 for (idx = 0; idx < n; ++idx)
6146 ci[idx].expr = gimple_switch_label (last, idx);
6147 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
6149 edge default_edge = find_edge (bb, ci[0].bb);
6150 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
6152 for (idx = 0; idx < n; ++idx)
6154 tree min, max;
6155 tree cl = ci[idx].expr;
6156 basic_block cbb = ci[idx].bb;
6158 min = CASE_LOW (cl);
6159 max = CASE_HIGH (cl);
6161 /* If there are multiple case labels with the same destination
6162 we need to combine them to a single value range for the edge. */
6163 if (idx + 1 < n && cbb == ci[idx + 1].bb)
6165 /* Skip labels until the last of the group. */
6166 do {
6167 ++idx;
6168 } while (idx < n && cbb == ci[idx].bb);
6169 --idx;
6171 /* Pick up the maximum of the case label range. */
6172 if (CASE_HIGH (ci[idx].expr))
6173 max = CASE_HIGH (ci[idx].expr);
6174 else
6175 max = CASE_LOW (ci[idx].expr);
6178 /* Can't extract a useful assertion out of a range that includes the
6179 default label. */
6180 if (min == NULL_TREE)
6181 continue;
6183 /* Find the edge to register the assert expr on. */
6184 e = find_edge (bb, cbb);
6186 /* Register the necessary assertions for the operand in the
6187 SWITCH_EXPR. */
6188 register_edge_assert_for (op, e, bsi,
6189 max ? GE_EXPR : EQ_EXPR,
6190 op, fold_convert (TREE_TYPE (op), min));
6191 if (max)
6192 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
6193 fold_convert (TREE_TYPE (op), max));
6196 XDELETEVEC (ci);
6198 if (!live_on_edge (default_edge, op))
6199 return;
6201 /* Now register along the default label assertions that correspond to the
6202 anti-range of each label. */
6203 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
6204 if (insertion_limit == 0)
6205 return;
6207 /* We can't do this if the default case shares a label with another case. */
6208 tree default_cl = gimple_switch_default_label (last);
6209 for (idx = 1; idx < n; idx++)
6211 tree min, max;
6212 tree cl = gimple_switch_label (last, idx);
6213 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
6214 continue;
6216 min = CASE_LOW (cl);
6217 max = CASE_HIGH (cl);
6219 /* Combine contiguous case ranges to reduce the number of assertions
6220 to insert. */
6221 for (idx = idx + 1; idx < n; idx++)
6223 tree next_min, next_max;
6224 tree next_cl = gimple_switch_label (last, idx);
6225 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
6226 break;
6228 next_min = CASE_LOW (next_cl);
6229 next_max = CASE_HIGH (next_cl);
6231 wide_int difference = wi::sub (next_min, max ? max : min);
6232 if (wi::eq_p (difference, 1))
6233 max = next_max ? next_max : next_min;
6234 else
6235 break;
6237 idx--;
6239 if (max == NULL_TREE)
6241 /* Register the assertion OP != MIN. */
6242 min = fold_convert (TREE_TYPE (op), min);
6243 register_edge_assert_for (op, default_edge, bsi, NE_EXPR, op, min);
6245 else
6247 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
6248 which will give OP the anti-range ~[MIN,MAX]. */
6249 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
6250 min = fold_convert (TREE_TYPE (uop), min);
6251 max = fold_convert (TREE_TYPE (uop), max);
6253 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
6254 tree rhs = int_const_binop (MINUS_EXPR, max, min);
6255 register_new_assert_for (op, lhs, GT_EXPR, rhs,
6256 NULL, default_edge, bsi);
6259 if (--insertion_limit == 0)
6260 break;
6265 /* Traverse all the statements in block BB looking for statements that
6266 may generate useful assertions for the SSA names in their operand.
6267 If a statement produces a useful assertion A for name N_i, then the
6268 list of assertions already generated for N_i is scanned to
6269 determine if A is actually needed.
6271 If N_i already had the assertion A at a location dominating the
6272 current location, then nothing needs to be done. Otherwise, the
6273 new location for A is recorded instead.
6275 1- For every statement S in BB, all the variables used by S are
6276 added to bitmap FOUND_IN_SUBGRAPH.
6278 2- If statement S uses an operand N in a way that exposes a known
6279 value range for N, then if N was not already generated by an
6280 ASSERT_EXPR, create a new assert location for N. For instance,
6281 if N is a pointer and the statement dereferences it, we can
6282 assume that N is not NULL.
6284 3- COND_EXPRs are a special case of #2. We can derive range
6285 information from the predicate but need to insert different
6286 ASSERT_EXPRs for each of the sub-graphs rooted at the
6287 conditional block. If the last statement of BB is a conditional
6288 expression of the form 'X op Y', then
6290 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6292 b) If the conditional is the only entry point to the sub-graph
6293 corresponding to the THEN_CLAUSE, recurse into it. On
6294 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6295 an ASSERT_EXPR is added for the corresponding variable.
6297 c) Repeat step (b) on the ELSE_CLAUSE.
6299 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6301 For instance,
6303 if (a == 9)
6304 b = a;
6305 else
6306 b = c + 1;
6308 In this case, an assertion on the THEN clause is useful to
6309 determine that 'a' is always 9 on that edge. However, an assertion
6310 on the ELSE clause would be unnecessary.
6312 4- If BB does not end in a conditional expression, then we recurse
6313 into BB's dominator children.
6315 At the end of the recursive traversal, every SSA name will have a
6316 list of locations where ASSERT_EXPRs should be added. When a new
6317 location for name N is found, it is registered by calling
6318 register_new_assert_for. That function keeps track of all the
6319 registered assertions to prevent adding unnecessary assertions.
6320 For instance, if a pointer P_4 is dereferenced more than once in a
6321 dominator tree, only the location dominating all the dereference of
6322 P_4 will receive an ASSERT_EXPR. */
6324 static void
6325 find_assert_locations_1 (basic_block bb, sbitmap live)
6327 gimple *last;
6329 last = last_stmt (bb);
6331 /* If BB's last statement is a conditional statement involving integer
6332 operands, determine if we need to add ASSERT_EXPRs. */
6333 if (last
6334 && gimple_code (last) == GIMPLE_COND
6335 && !fp_predicate (last)
6336 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6337 find_conditional_asserts (bb, as_a <gcond *> (last));
6339 /* If BB's last statement is a switch statement involving integer
6340 operands, determine if we need to add ASSERT_EXPRs. */
6341 if (last
6342 && gimple_code (last) == GIMPLE_SWITCH
6343 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6344 find_switch_asserts (bb, as_a <gswitch *> (last));
6346 /* Traverse all the statements in BB marking used names and looking
6347 for statements that may infer assertions for their used operands. */
6348 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6349 gsi_prev (&si))
6351 gimple *stmt;
6352 tree op;
6353 ssa_op_iter i;
6355 stmt = gsi_stmt (si);
6357 if (is_gimple_debug (stmt))
6358 continue;
6360 /* See if we can derive an assertion for any of STMT's operands. */
6361 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6363 tree value;
6364 enum tree_code comp_code;
6366 /* If op is not live beyond this stmt, do not bother to insert
6367 asserts for it. */
6368 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6369 continue;
6371 /* If OP is used in such a way that we can infer a value
6372 range for it, and we don't find a previous assertion for
6373 it, create a new assertion location node for OP. */
6374 if (infer_value_range (stmt, op, &comp_code, &value))
6376 /* If we are able to infer a nonzero value range for OP,
6377 then walk backwards through the use-def chain to see if OP
6378 was set via a typecast.
6380 If so, then we can also infer a nonzero value range
6381 for the operand of the NOP_EXPR. */
6382 if (comp_code == NE_EXPR && integer_zerop (value))
6384 tree t = op;
6385 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6387 while (is_gimple_assign (def_stmt)
6388 && CONVERT_EXPR_CODE_P
6389 (gimple_assign_rhs_code (def_stmt))
6390 && TREE_CODE
6391 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6392 && POINTER_TYPE_P
6393 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6395 t = gimple_assign_rhs1 (def_stmt);
6396 def_stmt = SSA_NAME_DEF_STMT (t);
6398 /* Note we want to register the assert for the
6399 operand of the NOP_EXPR after SI, not after the
6400 conversion. */
6401 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
6402 register_new_assert_for (t, t, comp_code, value,
6403 bb, NULL, si);
6407 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6411 /* Update live. */
6412 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6413 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6414 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6415 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6418 /* Traverse all PHI nodes in BB, updating live. */
6419 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6420 gsi_next (&si))
6422 use_operand_p arg_p;
6423 ssa_op_iter i;
6424 gphi *phi = si.phi ();
6425 tree res = gimple_phi_result (phi);
6427 if (virtual_operand_p (res))
6428 continue;
6430 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6432 tree arg = USE_FROM_PTR (arg_p);
6433 if (TREE_CODE (arg) == SSA_NAME)
6434 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6437 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6441 /* Do an RPO walk over the function computing SSA name liveness
6442 on-the-fly and deciding on assert expressions to insert. */
6444 static void
6445 find_assert_locations (void)
6447 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6448 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6449 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6450 int rpo_cnt, i;
6452 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6453 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6454 for (i = 0; i < rpo_cnt; ++i)
6455 bb_rpo[rpo[i]] = i;
6457 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6458 the order we compute liveness and insert asserts we otherwise
6459 fail to insert asserts into the loop latch. */
6460 loop_p loop;
6461 FOR_EACH_LOOP (loop, 0)
6463 i = loop->latch->index;
6464 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6465 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6466 !gsi_end_p (gsi); gsi_next (&gsi))
6468 gphi *phi = gsi.phi ();
6469 if (virtual_operand_p (gimple_phi_result (phi)))
6470 continue;
6471 tree arg = gimple_phi_arg_def (phi, j);
6472 if (TREE_CODE (arg) == SSA_NAME)
6474 if (live[i] == NULL)
6476 live[i] = sbitmap_alloc (num_ssa_names);
6477 bitmap_clear (live[i]);
6479 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6484 for (i = rpo_cnt - 1; i >= 0; --i)
6486 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6487 edge e;
6488 edge_iterator ei;
6490 if (!live[rpo[i]])
6492 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6493 bitmap_clear (live[rpo[i]]);
6496 /* Process BB and update the live information with uses in
6497 this block. */
6498 find_assert_locations_1 (bb, live[rpo[i]]);
6500 /* Merge liveness into the predecessor blocks and free it. */
6501 if (!bitmap_empty_p (live[rpo[i]]))
6503 int pred_rpo = i;
6504 FOR_EACH_EDGE (e, ei, bb->preds)
6506 int pred = e->src->index;
6507 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6508 continue;
6510 if (!live[pred])
6512 live[pred] = sbitmap_alloc (num_ssa_names);
6513 bitmap_clear (live[pred]);
6515 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6517 if (bb_rpo[pred] < pred_rpo)
6518 pred_rpo = bb_rpo[pred];
6521 /* Record the RPO number of the last visited block that needs
6522 live information from this block. */
6523 last_rpo[rpo[i]] = pred_rpo;
6525 else
6527 sbitmap_free (live[rpo[i]]);
6528 live[rpo[i]] = NULL;
6531 /* We can free all successors live bitmaps if all their
6532 predecessors have been visited already. */
6533 FOR_EACH_EDGE (e, ei, bb->succs)
6534 if (last_rpo[e->dest->index] == i
6535 && live[e->dest->index])
6537 sbitmap_free (live[e->dest->index]);
6538 live[e->dest->index] = NULL;
6542 XDELETEVEC (rpo);
6543 XDELETEVEC (bb_rpo);
6544 XDELETEVEC (last_rpo);
6545 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6546 if (live[i])
6547 sbitmap_free (live[i]);
6548 XDELETEVEC (live);
6551 /* Create an ASSERT_EXPR for NAME and insert it in the location
6552 indicated by LOC. Return true if we made any edge insertions. */
6554 static bool
6555 process_assert_insertions_for (tree name, assert_locus *loc)
6557 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6558 gimple *stmt;
6559 tree cond;
6560 gimple *assert_stmt;
6561 edge_iterator ei;
6562 edge e;
6564 /* If we have X <=> X do not insert an assert expr for that. */
6565 if (loc->expr == loc->val)
6566 return false;
6568 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6569 assert_stmt = build_assert_expr_for (cond, name);
6570 if (loc->e)
6572 /* We have been asked to insert the assertion on an edge. This
6573 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6574 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6575 || (gimple_code (gsi_stmt (loc->si))
6576 == GIMPLE_SWITCH));
6578 gsi_insert_on_edge (loc->e, assert_stmt);
6579 return true;
6582 /* If the stmt iterator points at the end then this is an insertion
6583 at the beginning of a block. */
6584 if (gsi_end_p (loc->si))
6586 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
6587 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
6588 return false;
6591 /* Otherwise, we can insert right after LOC->SI iff the
6592 statement must not be the last statement in the block. */
6593 stmt = gsi_stmt (loc->si);
6594 if (!stmt_ends_bb_p (stmt))
6596 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6597 return false;
6600 /* If STMT must be the last statement in BB, we can only insert new
6601 assertions on the non-abnormal edge out of BB. Note that since
6602 STMT is not control flow, there may only be one non-abnormal/eh edge
6603 out of BB. */
6604 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6605 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
6607 gsi_insert_on_edge (e, assert_stmt);
6608 return true;
6611 gcc_unreachable ();
6614 /* Qsort helper for sorting assert locations. */
6616 static int
6617 compare_assert_loc (const void *pa, const void *pb)
6619 assert_locus * const a = *(assert_locus * const *)pa;
6620 assert_locus * const b = *(assert_locus * const *)pb;
6621 if (! a->e && b->e)
6622 return 1;
6623 else if (a->e && ! b->e)
6624 return -1;
6626 /* Sort after destination index. */
6627 if (! a->e && ! b->e)
6629 else if (a->e->dest->index > b->e->dest->index)
6630 return 1;
6631 else if (a->e->dest->index < b->e->dest->index)
6632 return -1;
6634 /* Sort after comp_code. */
6635 if (a->comp_code > b->comp_code)
6636 return 1;
6637 else if (a->comp_code < b->comp_code)
6638 return -1;
6640 /* Break the tie using hashing and source/bb index. */
6641 hashval_t ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
6642 hashval_t hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
6643 if (ha == hb)
6644 return (a->e && b->e
6645 ? a->e->src->index - b->e->src->index
6646 : a->bb->index - b->bb->index);
6647 return ha - hb;
6650 /* Process all the insertions registered for every name N_i registered
6651 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6652 found in ASSERTS_FOR[i]. */
6654 static void
6655 process_assert_insertions (void)
6657 unsigned i;
6658 bitmap_iterator bi;
6659 bool update_edges_p = false;
6660 int num_asserts = 0;
6662 if (dump_file && (dump_flags & TDF_DETAILS))
6663 dump_all_asserts (dump_file);
6665 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6667 assert_locus *loc = asserts_for[i];
6668 gcc_assert (loc);
6670 auto_vec<assert_locus *, 16> asserts;
6671 for (; loc; loc = loc->next)
6672 asserts.safe_push (loc);
6673 asserts.qsort (compare_assert_loc);
6675 /* Push down common asserts to successors and remove redundant ones. */
6676 unsigned ecnt = 0;
6677 assert_locus *common = NULL;
6678 unsigned commonj = 0;
6679 for (unsigned j = 0; j < asserts.length (); ++j)
6681 loc = asserts[j];
6682 if (! loc->e)
6683 common = NULL;
6684 else if (! common
6685 || loc->e->dest != common->e->dest
6686 || loc->comp_code != common->comp_code
6687 || ! operand_equal_p (loc->val, common->val, 0)
6688 || ! operand_equal_p (loc->expr, common->expr, 0))
6690 commonj = j;
6691 common = loc;
6692 ecnt = 1;
6694 else if (loc->e == asserts[j-1]->e)
6696 /* Remove duplicate asserts. */
6697 if (commonj == j - 1)
6699 commonj = j;
6700 common = loc;
6702 free (asserts[j-1]);
6703 asserts[j-1] = NULL;
6705 else
6707 ecnt++;
6708 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
6710 /* We have the same assertion on all incoming edges of a BB.
6711 Insert it at the beginning of that block. */
6712 loc->bb = loc->e->dest;
6713 loc->e = NULL;
6714 loc->si = gsi_none ();
6715 common = NULL;
6716 /* Clear asserts commoned. */
6717 for (; commonj != j; ++commonj)
6718 if (asserts[commonj])
6720 free (asserts[commonj]);
6721 asserts[commonj] = NULL;
6727 for (unsigned j = 0; j < asserts.length (); ++j)
6729 loc = asserts[j];
6730 if (! loc)
6731 continue;
6732 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6733 num_asserts++;
6734 free (loc);
6738 if (update_edges_p)
6739 gsi_commit_edge_inserts ();
6741 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6742 num_asserts);
6746 /* Traverse the flowgraph looking for conditional jumps to insert range
6747 expressions. These range expressions are meant to provide information
6748 to optimizations that need to reason in terms of value ranges. They
6749 will not be expanded into RTL. For instance, given:
6751 x = ...
6752 y = ...
6753 if (x < y)
6754 y = x - 2;
6755 else
6756 x = y + 3;
6758 this pass will transform the code into:
6760 x = ...
6761 y = ...
6762 if (x < y)
6764 x = ASSERT_EXPR <x, x < y>
6765 y = x - 2
6767 else
6769 y = ASSERT_EXPR <y, x >= y>
6770 x = y + 3
6773 The idea is that once copy and constant propagation have run, other
6774 optimizations will be able to determine what ranges of values can 'x'
6775 take in different paths of the code, simply by checking the reaching
6776 definition of 'x'. */
6778 static void
6779 insert_range_assertions (void)
6781 need_assert_for = BITMAP_ALLOC (NULL);
6782 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6784 calculate_dominance_info (CDI_DOMINATORS);
6786 find_assert_locations ();
6787 if (!bitmap_empty_p (need_assert_for))
6789 process_assert_insertions ();
6790 update_ssa (TODO_update_ssa_no_phi);
6793 if (dump_file && (dump_flags & TDF_DETAILS))
6795 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6796 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6799 free (asserts_for);
6800 BITMAP_FREE (need_assert_for);
6803 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6804 and "struct" hacks. If VRP can determine that the
6805 array subscript is a constant, check if it is outside valid
6806 range. If the array subscript is a RANGE, warn if it is
6807 non-overlapping with valid range.
6808 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6810 static void
6811 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6813 value_range *vr = NULL;
6814 tree low_sub, up_sub;
6815 tree low_bound, up_bound, up_bound_p1;
6817 if (TREE_NO_WARNING (ref))
6818 return;
6820 low_sub = up_sub = TREE_OPERAND (ref, 1);
6821 up_bound = array_ref_up_bound (ref);
6823 /* Can not check flexible arrays. */
6824 if (!up_bound
6825 || TREE_CODE (up_bound) != INTEGER_CST)
6826 return;
6828 /* Accesses to trailing arrays via pointers may access storage
6829 beyond the types array bounds. */
6830 if (warn_array_bounds < 2
6831 && array_at_struct_end_p (ref))
6832 return;
6834 low_bound = array_ref_low_bound (ref);
6835 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6836 build_int_cst (TREE_TYPE (up_bound), 1));
6838 /* Empty array. */
6839 if (tree_int_cst_equal (low_bound, up_bound_p1))
6841 warning_at (location, OPT_Warray_bounds,
6842 "array subscript is above array bounds");
6843 TREE_NO_WARNING (ref) = 1;
6846 if (TREE_CODE (low_sub) == SSA_NAME)
6848 vr = get_value_range (low_sub);
6849 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6851 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6852 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6856 if (vr && vr->type == VR_ANTI_RANGE)
6858 if (TREE_CODE (up_sub) == INTEGER_CST
6859 && (ignore_off_by_one
6860 ? tree_int_cst_lt (up_bound, up_sub)
6861 : tree_int_cst_le (up_bound, up_sub))
6862 && TREE_CODE (low_sub) == INTEGER_CST
6863 && tree_int_cst_le (low_sub, low_bound))
6865 warning_at (location, OPT_Warray_bounds,
6866 "array subscript is outside array bounds");
6867 TREE_NO_WARNING (ref) = 1;
6870 else if (TREE_CODE (up_sub) == INTEGER_CST
6871 && (ignore_off_by_one
6872 ? !tree_int_cst_le (up_sub, up_bound_p1)
6873 : !tree_int_cst_le (up_sub, up_bound)))
6875 if (dump_file && (dump_flags & TDF_DETAILS))
6877 fprintf (dump_file, "Array bound warning for ");
6878 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6879 fprintf (dump_file, "\n");
6881 warning_at (location, OPT_Warray_bounds,
6882 "array subscript is above array bounds");
6883 TREE_NO_WARNING (ref) = 1;
6885 else if (TREE_CODE (low_sub) == INTEGER_CST
6886 && tree_int_cst_lt (low_sub, low_bound))
6888 if (dump_file && (dump_flags & TDF_DETAILS))
6890 fprintf (dump_file, "Array bound warning for ");
6891 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6892 fprintf (dump_file, "\n");
6894 warning_at (location, OPT_Warray_bounds,
6895 "array subscript is below array bounds");
6896 TREE_NO_WARNING (ref) = 1;
6900 /* Searches if the expr T, located at LOCATION computes
6901 address of an ARRAY_REF, and call check_array_ref on it. */
6903 static void
6904 search_for_addr_array (tree t, location_t location)
6906 /* Check each ARRAY_REFs in the reference chain. */
6909 if (TREE_CODE (t) == ARRAY_REF)
6910 check_array_ref (location, t, true /*ignore_off_by_one*/);
6912 t = TREE_OPERAND (t, 0);
6914 while (handled_component_p (t));
6916 if (TREE_CODE (t) == MEM_REF
6917 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6918 && !TREE_NO_WARNING (t))
6920 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6921 tree low_bound, up_bound, el_sz;
6922 offset_int idx;
6923 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6924 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6925 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6926 return;
6928 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6929 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6930 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6931 if (!low_bound
6932 || TREE_CODE (low_bound) != INTEGER_CST
6933 || !up_bound
6934 || TREE_CODE (up_bound) != INTEGER_CST
6935 || !el_sz
6936 || TREE_CODE (el_sz) != INTEGER_CST)
6937 return;
6939 idx = mem_ref_offset (t);
6940 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6941 if (idx < 0)
6943 if (dump_file && (dump_flags & TDF_DETAILS))
6945 fprintf (dump_file, "Array bound warning for ");
6946 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6947 fprintf (dump_file, "\n");
6949 warning_at (location, OPT_Warray_bounds,
6950 "array subscript is below array bounds");
6951 TREE_NO_WARNING (t) = 1;
6953 else if (idx > (wi::to_offset (up_bound)
6954 - wi::to_offset (low_bound) + 1))
6956 if (dump_file && (dump_flags & TDF_DETAILS))
6958 fprintf (dump_file, "Array bound warning for ");
6959 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6960 fprintf (dump_file, "\n");
6962 warning_at (location, OPT_Warray_bounds,
6963 "array subscript is above array bounds");
6964 TREE_NO_WARNING (t) = 1;
6969 /* walk_tree() callback that checks if *TP is
6970 an ARRAY_REF inside an ADDR_EXPR (in which an array
6971 subscript one outside the valid range is allowed). Call
6972 check_array_ref for each ARRAY_REF found. The location is
6973 passed in DATA. */
6975 static tree
6976 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6978 tree t = *tp;
6979 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6980 location_t location;
6982 if (EXPR_HAS_LOCATION (t))
6983 location = EXPR_LOCATION (t);
6984 else
6986 location_t *locp = (location_t *) wi->info;
6987 location = *locp;
6990 *walk_subtree = TRUE;
6992 if (TREE_CODE (t) == ARRAY_REF)
6993 check_array_ref (location, t, false /*ignore_off_by_one*/);
6995 else if (TREE_CODE (t) == ADDR_EXPR)
6997 search_for_addr_array (t, location);
6998 *walk_subtree = FALSE;
7001 return NULL_TREE;
7004 /* Walk over all statements of all reachable BBs and call check_array_bounds
7005 on them. */
7007 static void
7008 check_all_array_refs (void)
7010 basic_block bb;
7011 gimple_stmt_iterator si;
7013 FOR_EACH_BB_FN (bb, cfun)
7015 edge_iterator ei;
7016 edge e;
7017 bool executable = false;
7019 /* Skip blocks that were found to be unreachable. */
7020 FOR_EACH_EDGE (e, ei, bb->preds)
7021 executable |= !!(e->flags & EDGE_EXECUTABLE);
7022 if (!executable)
7023 continue;
7025 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7027 gimple *stmt = gsi_stmt (si);
7028 struct walk_stmt_info wi;
7029 if (!gimple_has_location (stmt)
7030 || is_gimple_debug (stmt))
7031 continue;
7033 memset (&wi, 0, sizeof (wi));
7035 location_t loc = gimple_location (stmt);
7036 wi.info = &loc;
7038 walk_gimple_op (gsi_stmt (si),
7039 check_array_bounds,
7040 &wi);
7045 /* Return true if all imm uses of VAR are either in STMT, or
7046 feed (optionally through a chain of single imm uses) GIMPLE_COND
7047 in basic block COND_BB. */
7049 static bool
7050 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
7052 use_operand_p use_p, use2_p;
7053 imm_use_iterator iter;
7055 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
7056 if (USE_STMT (use_p) != stmt)
7058 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
7059 if (is_gimple_debug (use_stmt))
7060 continue;
7061 while (is_gimple_assign (use_stmt)
7062 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
7063 && single_imm_use (gimple_assign_lhs (use_stmt),
7064 &use2_p, &use_stmt2))
7065 use_stmt = use_stmt2;
7066 if (gimple_code (use_stmt) != GIMPLE_COND
7067 || gimple_bb (use_stmt) != cond_bb)
7068 return false;
7070 return true;
7073 /* Handle
7074 _4 = x_3 & 31;
7075 if (_4 != 0)
7076 goto <bb 6>;
7077 else
7078 goto <bb 7>;
7079 <bb 6>:
7080 __builtin_unreachable ();
7081 <bb 7>:
7082 x_5 = ASSERT_EXPR <x_3, ...>;
7083 If x_3 has no other immediate uses (checked by caller),
7084 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
7085 from the non-zero bitmask. */
7087 static void
7088 maybe_set_nonzero_bits (basic_block bb, tree var)
7090 edge e = single_pred_edge (bb);
7091 basic_block cond_bb = e->src;
7092 gimple *stmt = last_stmt (cond_bb);
7093 tree cst;
7095 if (stmt == NULL
7096 || gimple_code (stmt) != GIMPLE_COND
7097 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
7098 ? EQ_EXPR : NE_EXPR)
7099 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
7100 || !integer_zerop (gimple_cond_rhs (stmt)))
7101 return;
7103 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
7104 if (!is_gimple_assign (stmt)
7105 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
7106 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
7107 return;
7108 if (gimple_assign_rhs1 (stmt) != var)
7110 gimple *stmt2;
7112 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
7113 return;
7114 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
7115 if (!gimple_assign_cast_p (stmt2)
7116 || gimple_assign_rhs1 (stmt2) != var
7117 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
7118 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7119 != TYPE_PRECISION (TREE_TYPE (var))))
7120 return;
7122 cst = gimple_assign_rhs2 (stmt);
7123 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
7126 /* Convert range assertion expressions into the implied copies and
7127 copy propagate away the copies. Doing the trivial copy propagation
7128 here avoids the need to run the full copy propagation pass after
7129 VRP.
7131 FIXME, this will eventually lead to copy propagation removing the
7132 names that had useful range information attached to them. For
7133 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
7134 then N_i will have the range [3, +INF].
7136 However, by converting the assertion into the implied copy
7137 operation N_i = N_j, we will then copy-propagate N_j into the uses
7138 of N_i and lose the range information. We may want to hold on to
7139 ASSERT_EXPRs a little while longer as the ranges could be used in
7140 things like jump threading.
7142 The problem with keeping ASSERT_EXPRs around is that passes after
7143 VRP need to handle them appropriately.
7145 Another approach would be to make the range information a first
7146 class property of the SSA_NAME so that it can be queried from
7147 any pass. This is made somewhat more complex by the need for
7148 multiple ranges to be associated with one SSA_NAME. */
7150 static void
7151 remove_range_assertions (void)
7153 basic_block bb;
7154 gimple_stmt_iterator si;
7155 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
7156 a basic block preceeded by GIMPLE_COND branching to it and
7157 __builtin_trap, -1 if not yet checked, 0 otherwise. */
7158 int is_unreachable;
7160 /* Note that the BSI iterator bump happens at the bottom of the
7161 loop and no bump is necessary if we're removing the statement
7162 referenced by the current BSI. */
7163 FOR_EACH_BB_FN (bb, cfun)
7164 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
7166 gimple *stmt = gsi_stmt (si);
7168 if (is_gimple_assign (stmt)
7169 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
7171 tree lhs = gimple_assign_lhs (stmt);
7172 tree rhs = gimple_assign_rhs1 (stmt);
7173 tree var;
7175 var = ASSERT_EXPR_VAR (rhs);
7177 if (TREE_CODE (var) == SSA_NAME
7178 && !POINTER_TYPE_P (TREE_TYPE (lhs))
7179 && SSA_NAME_RANGE_INFO (lhs))
7181 if (is_unreachable == -1)
7183 is_unreachable = 0;
7184 if (single_pred_p (bb)
7185 && assert_unreachable_fallthru_edge_p
7186 (single_pred_edge (bb)))
7187 is_unreachable = 1;
7189 /* Handle
7190 if (x_7 >= 10 && x_7 < 20)
7191 __builtin_unreachable ();
7192 x_8 = ASSERT_EXPR <x_7, ...>;
7193 if the only uses of x_7 are in the ASSERT_EXPR and
7194 in the condition. In that case, we can copy the
7195 range info from x_8 computed in this pass also
7196 for x_7. */
7197 if (is_unreachable
7198 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
7199 single_pred (bb)))
7201 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
7202 SSA_NAME_RANGE_INFO (lhs)->get_min (),
7203 SSA_NAME_RANGE_INFO (lhs)->get_max ());
7204 maybe_set_nonzero_bits (bb, var);
7208 /* Propagate the RHS into every use of the LHS. For SSA names
7209 also propagate abnormals as it merely restores the original
7210 IL in this case (an replace_uses_by would assert). */
7211 if (TREE_CODE (var) == SSA_NAME)
7213 imm_use_iterator iter;
7214 use_operand_p use_p;
7215 gimple *use_stmt;
7216 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7217 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7218 SET_USE (use_p, var);
7220 else
7221 replace_uses_by (lhs, var);
7223 /* And finally, remove the copy, it is not needed. */
7224 gsi_remove (&si, true);
7225 release_defs (stmt);
7227 else
7229 if (!is_gimple_debug (gsi_stmt (si)))
7230 is_unreachable = 0;
7231 gsi_next (&si);
7237 /* Return true if STMT is interesting for VRP. */
7239 static bool
7240 stmt_interesting_for_vrp (gimple *stmt)
7242 if (gimple_code (stmt) == GIMPLE_PHI)
7244 tree res = gimple_phi_result (stmt);
7245 return (!virtual_operand_p (res)
7246 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
7247 || POINTER_TYPE_P (TREE_TYPE (res))));
7249 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7251 tree lhs = gimple_get_lhs (stmt);
7253 /* In general, assignments with virtual operands are not useful
7254 for deriving ranges, with the obvious exception of calls to
7255 builtin functions. */
7256 if (lhs && TREE_CODE (lhs) == SSA_NAME
7257 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7258 || POINTER_TYPE_P (TREE_TYPE (lhs)))
7259 && (is_gimple_call (stmt)
7260 || !gimple_vuse (stmt)))
7261 return true;
7262 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7263 switch (gimple_call_internal_fn (stmt))
7265 case IFN_ADD_OVERFLOW:
7266 case IFN_SUB_OVERFLOW:
7267 case IFN_MUL_OVERFLOW:
7268 /* These internal calls return _Complex integer type,
7269 but are interesting to VRP nevertheless. */
7270 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7271 return true;
7272 break;
7273 default:
7274 break;
7277 else if (gimple_code (stmt) == GIMPLE_COND
7278 || gimple_code (stmt) == GIMPLE_SWITCH)
7279 return true;
7281 return false;
7284 /* Initialize VRP lattice. */
7286 static void
7287 vrp_initialize_lattice ()
7289 values_propagated = false;
7290 num_vr_values = num_ssa_names;
7291 vr_value = XCNEWVEC (value_range *, num_vr_values);
7292 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
7293 bitmap_obstack_initialize (&vrp_equiv_obstack);
7296 /* Initialization required by ssa_propagate engine. */
7298 static void
7299 vrp_initialize ()
7301 basic_block bb;
7303 FOR_EACH_BB_FN (bb, cfun)
7305 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7306 gsi_next (&si))
7308 gphi *phi = si.phi ();
7309 if (!stmt_interesting_for_vrp (phi))
7311 tree lhs = PHI_RESULT (phi);
7312 set_value_range_to_varying (get_value_range (lhs));
7313 prop_set_simulate_again (phi, false);
7315 else
7316 prop_set_simulate_again (phi, true);
7319 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
7320 gsi_next (&si))
7322 gimple *stmt = gsi_stmt (si);
7324 /* If the statement is a control insn, then we do not
7325 want to avoid simulating the statement once. Failure
7326 to do so means that those edges will never get added. */
7327 if (stmt_ends_bb_p (stmt))
7328 prop_set_simulate_again (stmt, true);
7329 else if (!stmt_interesting_for_vrp (stmt))
7331 set_defs_to_varying (stmt);
7332 prop_set_simulate_again (stmt, false);
7334 else
7335 prop_set_simulate_again (stmt, true);
7340 /* Return the singleton value-range for NAME or NAME. */
7342 static inline tree
7343 vrp_valueize (tree name)
7345 if (TREE_CODE (name) == SSA_NAME)
7347 value_range *vr = get_value_range (name);
7348 if (vr->type == VR_RANGE
7349 && (TREE_CODE (vr->min) == SSA_NAME
7350 || is_gimple_min_invariant (vr->min))
7351 && vrp_operand_equal_p (vr->min, vr->max))
7352 return vr->min;
7354 return name;
7357 /* Return the singleton value-range for NAME if that is a constant
7358 but signal to not follow SSA edges. */
7360 static inline tree
7361 vrp_valueize_1 (tree name)
7363 if (TREE_CODE (name) == SSA_NAME)
7365 /* If the definition may be simulated again we cannot follow
7366 this SSA edge as the SSA propagator does not necessarily
7367 re-visit the use. */
7368 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7369 if (!gimple_nop_p (def_stmt)
7370 && prop_simulate_again_p (def_stmt))
7371 return NULL_TREE;
7372 value_range *vr = get_value_range (name);
7373 if (range_int_cst_singleton_p (vr))
7374 return vr->min;
7376 return name;
7379 /* Visit assignment STMT. If it produces an interesting range, record
7380 the range in VR and set LHS to OUTPUT_P. */
7382 static void
7383 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
7385 tree lhs;
7386 enum gimple_code code = gimple_code (stmt);
7387 lhs = gimple_get_lhs (stmt);
7388 *output_p = NULL_TREE;
7390 /* We only keep track of ranges in integral and pointer types. */
7391 if (TREE_CODE (lhs) == SSA_NAME
7392 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7393 /* It is valid to have NULL MIN/MAX values on a type. See
7394 build_range_type. */
7395 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7396 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7397 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7399 *output_p = lhs;
7401 /* Try folding the statement to a constant first. */
7402 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7403 vrp_valueize_1);
7404 if (tem)
7406 if (TREE_CODE (tem) == SSA_NAME
7407 && (SSA_NAME_IS_DEFAULT_DEF (tem)
7408 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
7410 extract_range_from_ssa_name (vr, tem);
7411 return;
7413 else if (is_gimple_min_invariant (tem))
7415 set_value_range_to_value (vr, tem, NULL);
7416 return;
7419 /* Then dispatch to value-range extracting functions. */
7420 if (code == GIMPLE_CALL)
7421 extract_range_basic (vr, stmt);
7422 else
7423 extract_range_from_assignment (vr, as_a <gassign *> (stmt));
7427 /* Helper that gets the value range of the SSA_NAME with version I
7428 or a symbolic range containing the SSA_NAME only if the value range
7429 is varying or undefined. */
7431 static inline value_range
7432 get_vr_for_comparison (int i)
7434 value_range vr = *get_value_range (ssa_name (i));
7436 /* If name N_i does not have a valid range, use N_i as its own
7437 range. This allows us to compare against names that may
7438 have N_i in their ranges. */
7439 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7441 vr.type = VR_RANGE;
7442 vr.min = ssa_name (i);
7443 vr.max = ssa_name (i);
7446 return vr;
7449 /* Compare all the value ranges for names equivalent to VAR with VAL
7450 using comparison code COMP. Return the same value returned by
7451 compare_range_with_value, including the setting of
7452 *STRICT_OVERFLOW_P. */
7454 static tree
7455 compare_name_with_value (enum tree_code comp, tree var, tree val,
7456 bool *strict_overflow_p, bool use_equiv_p)
7458 bitmap_iterator bi;
7459 unsigned i;
7460 bitmap e;
7461 tree retval, t;
7462 int used_strict_overflow;
7463 bool sop;
7464 value_range equiv_vr;
7466 /* Get the set of equivalences for VAR. */
7467 e = get_value_range (var)->equiv;
7469 /* Start at -1. Set it to 0 if we do a comparison without relying
7470 on overflow, or 1 if all comparisons rely on overflow. */
7471 used_strict_overflow = -1;
7473 /* Compare vars' value range with val. */
7474 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7475 sop = false;
7476 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7477 if (retval)
7478 used_strict_overflow = sop ? 1 : 0;
7480 /* If the equiv set is empty we have done all work we need to do. */
7481 if (e == NULL)
7483 if (retval
7484 && used_strict_overflow > 0)
7485 *strict_overflow_p = true;
7486 return retval;
7489 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7491 tree name = ssa_name (i);
7492 if (! name)
7493 continue;
7495 if (! use_equiv_p
7496 && ! SSA_NAME_IS_DEFAULT_DEF (name)
7497 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
7498 continue;
7500 equiv_vr = get_vr_for_comparison (i);
7501 sop = false;
7502 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7503 if (t)
7505 /* If we get different answers from different members
7506 of the equivalence set this check must be in a dead
7507 code region. Folding it to a trap representation
7508 would be correct here. For now just return don't-know. */
7509 if (retval != NULL
7510 && t != retval)
7512 retval = NULL_TREE;
7513 break;
7515 retval = t;
7517 if (!sop)
7518 used_strict_overflow = 0;
7519 else if (used_strict_overflow < 0)
7520 used_strict_overflow = 1;
7524 if (retval
7525 && used_strict_overflow > 0)
7526 *strict_overflow_p = true;
7528 return retval;
7532 /* Given a comparison code COMP and names N1 and N2, compare all the
7533 ranges equivalent to N1 against all the ranges equivalent to N2
7534 to determine the value of N1 COMP N2. Return the same value
7535 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7536 whether we relied on an overflow infinity in the comparison. */
7539 static tree
7540 compare_names (enum tree_code comp, tree n1, tree n2,
7541 bool *strict_overflow_p)
7543 tree t, retval;
7544 bitmap e1, e2;
7545 bitmap_iterator bi1, bi2;
7546 unsigned i1, i2;
7547 int used_strict_overflow;
7548 static bitmap_obstack *s_obstack = NULL;
7549 static bitmap s_e1 = NULL, s_e2 = NULL;
7551 /* Compare the ranges of every name equivalent to N1 against the
7552 ranges of every name equivalent to N2. */
7553 e1 = get_value_range (n1)->equiv;
7554 e2 = get_value_range (n2)->equiv;
7556 /* Use the fake bitmaps if e1 or e2 are not available. */
7557 if (s_obstack == NULL)
7559 s_obstack = XNEW (bitmap_obstack);
7560 bitmap_obstack_initialize (s_obstack);
7561 s_e1 = BITMAP_ALLOC (s_obstack);
7562 s_e2 = BITMAP_ALLOC (s_obstack);
7564 if (e1 == NULL)
7565 e1 = s_e1;
7566 if (e2 == NULL)
7567 e2 = s_e2;
7569 /* Add N1 and N2 to their own set of equivalences to avoid
7570 duplicating the body of the loop just to check N1 and N2
7571 ranges. */
7572 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7573 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7575 /* If the equivalence sets have a common intersection, then the two
7576 names can be compared without checking their ranges. */
7577 if (bitmap_intersect_p (e1, e2))
7579 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7580 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7582 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7583 ? boolean_true_node
7584 : boolean_false_node;
7587 /* Start at -1. Set it to 0 if we do a comparison without relying
7588 on overflow, or 1 if all comparisons rely on overflow. */
7589 used_strict_overflow = -1;
7591 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7592 N2 to their own set of equivalences to avoid duplicating the body
7593 of the loop just to check N1 and N2 ranges. */
7594 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7596 if (! ssa_name (i1))
7597 continue;
7599 value_range vr1 = get_vr_for_comparison (i1);
7601 t = retval = NULL_TREE;
7602 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7604 if (! ssa_name (i2))
7605 continue;
7607 bool sop = false;
7609 value_range vr2 = get_vr_for_comparison (i2);
7611 t = compare_ranges (comp, &vr1, &vr2, &sop);
7612 if (t)
7614 /* If we get different answers from different members
7615 of the equivalence set this check must be in a dead
7616 code region. Folding it to a trap representation
7617 would be correct here. For now just return don't-know. */
7618 if (retval != NULL
7619 && t != retval)
7621 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7622 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7623 return NULL_TREE;
7625 retval = t;
7627 if (!sop)
7628 used_strict_overflow = 0;
7629 else if (used_strict_overflow < 0)
7630 used_strict_overflow = 1;
7634 if (retval)
7636 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7637 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7638 if (used_strict_overflow > 0)
7639 *strict_overflow_p = true;
7640 return retval;
7644 /* None of the equivalent ranges are useful in computing this
7645 comparison. */
7646 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7647 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7648 return NULL_TREE;
7651 /* Helper function for vrp_evaluate_conditional_warnv & other
7652 optimizers. */
7654 static tree
7655 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7656 tree op0, tree op1,
7657 bool * strict_overflow_p)
7659 value_range *vr0, *vr1;
7661 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7662 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7664 tree res = NULL_TREE;
7665 if (vr0 && vr1)
7666 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7667 if (!res && vr0)
7668 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7669 if (!res && vr1)
7670 res = (compare_range_with_value
7671 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7672 return res;
7675 /* Helper function for vrp_evaluate_conditional_warnv. */
7677 static tree
7678 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7679 tree op1, bool use_equiv_p,
7680 bool *strict_overflow_p, bool *only_ranges)
7682 tree ret;
7683 if (only_ranges)
7684 *only_ranges = true;
7686 /* We only deal with integral and pointer types. */
7687 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7688 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7689 return NULL_TREE;
7691 /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
7692 as a simple equality test, then prefer that over its current form
7693 for evaluation.
7695 An overflow test which collapses to an equality test can always be
7696 expressed as a comparison of one argument against zero. Overflow
7697 occurs when the chosen argument is zero and does not occur if the
7698 chosen argument is not zero. */
7699 tree x;
7700 if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
7702 wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
7703 /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
7704 B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
7705 B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
7706 B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
7707 if (integer_zerop (x))
7709 op1 = x;
7710 code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
7712 /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
7713 B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
7714 B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
7715 B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
7716 else if (wi::eq_p (x, max - 1))
7718 op0 = op1;
7719 op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
7720 code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
7724 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7725 (code, op0, op1, strict_overflow_p)))
7726 return ret;
7727 if (only_ranges)
7728 *only_ranges = false;
7729 /* Do not use compare_names during propagation, it's quadratic. */
7730 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7731 && use_equiv_p)
7732 return compare_names (code, op0, op1, strict_overflow_p);
7733 else if (TREE_CODE (op0) == SSA_NAME)
7734 return compare_name_with_value (code, op0, op1,
7735 strict_overflow_p, use_equiv_p);
7736 else if (TREE_CODE (op1) == SSA_NAME)
7737 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7738 strict_overflow_p, use_equiv_p);
7739 return NULL_TREE;
7742 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7743 information. Return NULL if the conditional can not be evaluated.
7744 The ranges of all the names equivalent with the operands in COND
7745 will be used when trying to compute the value. If the result is
7746 based on undefined signed overflow, issue a warning if
7747 appropriate. */
7749 static tree
7750 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7752 bool sop;
7753 tree ret;
7754 bool only_ranges;
7756 /* Some passes and foldings leak constants with overflow flag set
7757 into the IL. Avoid doing wrong things with these and bail out. */
7758 if ((TREE_CODE (op0) == INTEGER_CST
7759 && TREE_OVERFLOW (op0))
7760 || (TREE_CODE (op1) == INTEGER_CST
7761 && TREE_OVERFLOW (op1)))
7762 return NULL_TREE;
7764 sop = false;
7765 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7766 &only_ranges);
7768 if (ret && sop)
7770 enum warn_strict_overflow_code wc;
7771 const char* warnmsg;
7773 if (is_gimple_min_invariant (ret))
7775 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7776 warnmsg = G_("assuming signed overflow does not occur when "
7777 "simplifying conditional to constant");
7779 else
7781 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7782 warnmsg = G_("assuming signed overflow does not occur when "
7783 "simplifying conditional");
7786 if (issue_strict_overflow_warning (wc))
7788 location_t location;
7790 if (!gimple_has_location (stmt))
7791 location = input_location;
7792 else
7793 location = gimple_location (stmt);
7794 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7798 if (warn_type_limits
7799 && ret && only_ranges
7800 && TREE_CODE_CLASS (code) == tcc_comparison
7801 && TREE_CODE (op0) == SSA_NAME)
7803 /* If the comparison is being folded and the operand on the LHS
7804 is being compared against a constant value that is outside of
7805 the natural range of OP0's type, then the predicate will
7806 always fold regardless of the value of OP0. If -Wtype-limits
7807 was specified, emit a warning. */
7808 tree type = TREE_TYPE (op0);
7809 value_range *vr0 = get_value_range (op0);
7811 if (vr0->type == VR_RANGE
7812 && INTEGRAL_TYPE_P (type)
7813 && vrp_val_is_min (vr0->min)
7814 && vrp_val_is_max (vr0->max)
7815 && is_gimple_min_invariant (op1))
7817 location_t location;
7819 if (!gimple_has_location (stmt))
7820 location = input_location;
7821 else
7822 location = gimple_location (stmt);
7824 warning_at (location, OPT_Wtype_limits,
7825 integer_zerop (ret)
7826 ? G_("comparison always false "
7827 "due to limited range of data type")
7828 : G_("comparison always true "
7829 "due to limited range of data type"));
7833 return ret;
7837 /* Visit conditional statement STMT. If we can determine which edge
7838 will be taken out of STMT's basic block, record it in
7839 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
7841 static void
7842 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7844 tree val;
7845 bool sop;
7847 *taken_edge_p = NULL;
7849 if (dump_file && (dump_flags & TDF_DETAILS))
7851 tree use;
7852 ssa_op_iter i;
7854 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7855 print_gimple_stmt (dump_file, stmt, 0, 0);
7856 fprintf (dump_file, "\nWith known ranges\n");
7858 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7860 fprintf (dump_file, "\t");
7861 print_generic_expr (dump_file, use, 0);
7862 fprintf (dump_file, ": ");
7863 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7866 fprintf (dump_file, "\n");
7869 /* Compute the value of the predicate COND by checking the known
7870 ranges of each of its operands.
7872 Note that we cannot evaluate all the equivalent ranges here
7873 because those ranges may not yet be final and with the current
7874 propagation strategy, we cannot determine when the value ranges
7875 of the names in the equivalence set have changed.
7877 For instance, given the following code fragment
7879 i_5 = PHI <8, i_13>
7881 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7882 if (i_14 == 1)
7885 Assume that on the first visit to i_14, i_5 has the temporary
7886 range [8, 8] because the second argument to the PHI function is
7887 not yet executable. We derive the range ~[0, 0] for i_14 and the
7888 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7889 the first time, since i_14 is equivalent to the range [8, 8], we
7890 determine that the predicate is always false.
7892 On the next round of propagation, i_13 is determined to be
7893 VARYING, which causes i_5 to drop down to VARYING. So, another
7894 visit to i_14 is scheduled. In this second visit, we compute the
7895 exact same range and equivalence set for i_14, namely ~[0, 0] and
7896 { i_5 }. But we did not have the previous range for i_5
7897 registered, so vrp_visit_assignment thinks that the range for
7898 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7899 is not visited again, which stops propagation from visiting
7900 statements in the THEN clause of that if().
7902 To properly fix this we would need to keep the previous range
7903 value for the names in the equivalence set. This way we would've
7904 discovered that from one visit to the other i_5 changed from
7905 range [8, 8] to VR_VARYING.
7907 However, fixing this apparent limitation may not be worth the
7908 additional checking. Testing on several code bases (GCC, DLV,
7909 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7910 4 more predicates folded in SPEC. */
7911 sop = false;
7913 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7914 gimple_cond_lhs (stmt),
7915 gimple_cond_rhs (stmt),
7916 false, &sop, NULL);
7917 if (val)
7919 if (!sop)
7920 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7921 else
7923 if (dump_file && (dump_flags & TDF_DETAILS))
7924 fprintf (dump_file,
7925 "\nIgnoring predicate evaluation because "
7926 "it assumes that signed overflow is undefined");
7927 val = NULL_TREE;
7931 if (dump_file && (dump_flags & TDF_DETAILS))
7933 fprintf (dump_file, "\nPredicate evaluates to: ");
7934 if (val == NULL_TREE)
7935 fprintf (dump_file, "DON'T KNOW\n");
7936 else
7937 print_generic_stmt (dump_file, val, 0);
7941 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7942 that includes the value VAL. The search is restricted to the range
7943 [START_IDX, n - 1] where n is the size of VEC.
7945 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7946 returned.
7948 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7949 it is placed in IDX and false is returned.
7951 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7952 returned. */
7954 static bool
7955 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7957 size_t n = gimple_switch_num_labels (stmt);
7958 size_t low, high;
7960 /* Find case label for minimum of the value range or the next one.
7961 At each iteration we are searching in [low, high - 1]. */
7963 for (low = start_idx, high = n; high != low; )
7965 tree t;
7966 int cmp;
7967 /* Note that i != high, so we never ask for n. */
7968 size_t i = (high + low) / 2;
7969 t = gimple_switch_label (stmt, i);
7971 /* Cache the result of comparing CASE_LOW and val. */
7972 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7974 if (cmp == 0)
7976 /* Ranges cannot be empty. */
7977 *idx = i;
7978 return true;
7980 else if (cmp > 0)
7981 high = i;
7982 else
7984 low = i + 1;
7985 if (CASE_HIGH (t) != NULL
7986 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7988 *idx = i;
7989 return true;
7994 *idx = high;
7995 return false;
7998 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7999 for values between MIN and MAX. The first index is placed in MIN_IDX. The
8000 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
8001 then MAX_IDX < MIN_IDX.
8002 Returns true if the default label is not needed. */
8004 static bool
8005 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
8006 size_t *max_idx)
8008 size_t i, j;
8009 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
8010 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
8012 if (i == j
8013 && min_take_default
8014 && max_take_default)
8016 /* Only the default case label reached.
8017 Return an empty range. */
8018 *min_idx = 1;
8019 *max_idx = 0;
8020 return false;
8022 else
8024 bool take_default = min_take_default || max_take_default;
8025 tree low, high;
8026 size_t k;
8028 if (max_take_default)
8029 j--;
8031 /* If the case label range is continuous, we do not need
8032 the default case label. Verify that. */
8033 high = CASE_LOW (gimple_switch_label (stmt, i));
8034 if (CASE_HIGH (gimple_switch_label (stmt, i)))
8035 high = CASE_HIGH (gimple_switch_label (stmt, i));
8036 for (k = i + 1; k <= j; ++k)
8038 low = CASE_LOW (gimple_switch_label (stmt, k));
8039 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
8041 take_default = true;
8042 break;
8044 high = low;
8045 if (CASE_HIGH (gimple_switch_label (stmt, k)))
8046 high = CASE_HIGH (gimple_switch_label (stmt, k));
8049 *min_idx = i;
8050 *max_idx = j;
8051 return !take_default;
8055 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
8056 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
8057 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
8058 Returns true if the default label is not needed. */
8060 static bool
8061 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
8062 size_t *max_idx1, size_t *min_idx2,
8063 size_t *max_idx2)
8065 size_t i, j, k, l;
8066 unsigned int n = gimple_switch_num_labels (stmt);
8067 bool take_default;
8068 tree case_low, case_high;
8069 tree min = vr->min, max = vr->max;
8071 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
8073 take_default = !find_case_label_range (stmt, min, max, &i, &j);
8075 /* Set second range to emtpy. */
8076 *min_idx2 = 1;
8077 *max_idx2 = 0;
8079 if (vr->type == VR_RANGE)
8081 *min_idx1 = i;
8082 *max_idx1 = j;
8083 return !take_default;
8086 /* Set first range to all case labels. */
8087 *min_idx1 = 1;
8088 *max_idx1 = n - 1;
8090 if (i > j)
8091 return false;
8093 /* Make sure all the values of case labels [i , j] are contained in
8094 range [MIN, MAX]. */
8095 case_low = CASE_LOW (gimple_switch_label (stmt, i));
8096 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
8097 if (tree_int_cst_compare (case_low, min) < 0)
8098 i += 1;
8099 if (case_high != NULL_TREE
8100 && tree_int_cst_compare (max, case_high) < 0)
8101 j -= 1;
8103 if (i > j)
8104 return false;
8106 /* If the range spans case labels [i, j], the corresponding anti-range spans
8107 the labels [1, i - 1] and [j + 1, n - 1]. */
8108 k = j + 1;
8109 l = n - 1;
8110 if (k > l)
8112 k = 1;
8113 l = 0;
8116 j = i - 1;
8117 i = 1;
8118 if (i > j)
8120 i = k;
8121 j = l;
8122 k = 1;
8123 l = 0;
8126 *min_idx1 = i;
8127 *max_idx1 = j;
8128 *min_idx2 = k;
8129 *max_idx2 = l;
8130 return false;
8133 /* Visit switch statement STMT. If we can determine which edge
8134 will be taken out of STMT's basic block, record it in
8135 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
8137 static void
8138 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
8140 tree op, val;
8141 value_range *vr;
8142 size_t i = 0, j = 0, k, l;
8143 bool take_default;
8145 *taken_edge_p = NULL;
8146 op = gimple_switch_index (stmt);
8147 if (TREE_CODE (op) != SSA_NAME)
8148 return;
8150 vr = get_value_range (op);
8151 if (dump_file && (dump_flags & TDF_DETAILS))
8153 fprintf (dump_file, "\nVisiting switch expression with operand ");
8154 print_generic_expr (dump_file, op, 0);
8155 fprintf (dump_file, " with known range ");
8156 dump_value_range (dump_file, vr);
8157 fprintf (dump_file, "\n");
8160 if ((vr->type != VR_RANGE
8161 && vr->type != VR_ANTI_RANGE)
8162 || symbolic_range_p (vr))
8163 return;
8165 /* Find the single edge that is taken from the switch expression. */
8166 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8168 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
8169 label */
8170 if (j < i)
8172 gcc_assert (take_default);
8173 val = gimple_switch_default_label (stmt);
8175 else
8177 /* Check if labels with index i to j and maybe the default label
8178 are all reaching the same label. */
8180 val = gimple_switch_label (stmt, i);
8181 if (take_default
8182 && CASE_LABEL (gimple_switch_default_label (stmt))
8183 != CASE_LABEL (val))
8185 if (dump_file && (dump_flags & TDF_DETAILS))
8186 fprintf (dump_file, " not a single destination for this "
8187 "range\n");
8188 return;
8190 for (++i; i <= j; ++i)
8192 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
8194 if (dump_file && (dump_flags & TDF_DETAILS))
8195 fprintf (dump_file, " not a single destination for this "
8196 "range\n");
8197 return;
8200 for (; k <= l; ++k)
8202 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
8204 if (dump_file && (dump_flags & TDF_DETAILS))
8205 fprintf (dump_file, " not a single destination for this "
8206 "range\n");
8207 return;
8212 *taken_edge_p = find_edge (gimple_bb (stmt),
8213 label_to_block (CASE_LABEL (val)));
8215 if (dump_file && (dump_flags & TDF_DETAILS))
8217 fprintf (dump_file, " will take edge to ");
8218 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
8223 /* Evaluate statement STMT. If the statement produces a useful range,
8224 set VR and corepsponding OUTPUT_P.
8226 If STMT is a conditional branch and we can determine its truth
8227 value, the taken edge is recorded in *TAKEN_EDGE_P. */
8229 static void
8230 extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
8231 tree *output_p, value_range *vr)
8234 if (dump_file && (dump_flags & TDF_DETAILS))
8236 fprintf (dump_file, "\nVisiting statement:\n");
8237 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
8240 if (!stmt_interesting_for_vrp (stmt))
8241 gcc_assert (stmt_ends_bb_p (stmt));
8242 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
8243 vrp_visit_assignment_or_call (stmt, output_p, vr);
8244 else if (gimple_code (stmt) == GIMPLE_COND)
8245 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
8246 else if (gimple_code (stmt) == GIMPLE_SWITCH)
8247 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
8250 /* Evaluate statement STMT. If the statement produces a useful range,
8251 return SSA_PROP_INTERESTING and record the SSA name with the
8252 interesting range into *OUTPUT_P.
8254 If STMT is a conditional branch and we can determine its truth
8255 value, the taken edge is recorded in *TAKEN_EDGE_P.
8257 If STMT produces a varying value, return SSA_PROP_VARYING. */
8259 static enum ssa_prop_result
8260 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
8262 value_range vr = VR_INITIALIZER;
8263 tree lhs = gimple_get_lhs (stmt);
8264 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
8266 if (*output_p)
8268 if (update_value_range (*output_p, &vr))
8270 if (dump_file && (dump_flags & TDF_DETAILS))
8272 fprintf (dump_file, "Found new range for ");
8273 print_generic_expr (dump_file, *output_p, 0);
8274 fprintf (dump_file, ": ");
8275 dump_value_range (dump_file, &vr);
8276 fprintf (dump_file, "\n");
8279 if (vr.type == VR_VARYING)
8280 return SSA_PROP_VARYING;
8282 return SSA_PROP_INTERESTING;
8284 return SSA_PROP_NOT_INTERESTING;
8287 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
8288 switch (gimple_call_internal_fn (stmt))
8290 case IFN_ADD_OVERFLOW:
8291 case IFN_SUB_OVERFLOW:
8292 case IFN_MUL_OVERFLOW:
8293 /* These internal calls return _Complex integer type,
8294 which VRP does not track, but the immediate uses
8295 thereof might be interesting. */
8296 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8298 imm_use_iterator iter;
8299 use_operand_p use_p;
8300 enum ssa_prop_result res = SSA_PROP_VARYING;
8302 set_value_range_to_varying (get_value_range (lhs));
8304 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
8306 gimple *use_stmt = USE_STMT (use_p);
8307 if (!is_gimple_assign (use_stmt))
8308 continue;
8309 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
8310 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
8311 continue;
8312 tree rhs1 = gimple_assign_rhs1 (use_stmt);
8313 tree use_lhs = gimple_assign_lhs (use_stmt);
8314 if (TREE_CODE (rhs1) != rhs_code
8315 || TREE_OPERAND (rhs1, 0) != lhs
8316 || TREE_CODE (use_lhs) != SSA_NAME
8317 || !stmt_interesting_for_vrp (use_stmt)
8318 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
8319 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
8320 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
8321 continue;
8323 /* If there is a change in the value range for any of the
8324 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
8325 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
8326 or IMAGPART_EXPR immediate uses, but none of them have
8327 a change in their value ranges, return
8328 SSA_PROP_NOT_INTERESTING. If there are no
8329 {REAL,IMAG}PART_EXPR uses at all,
8330 return SSA_PROP_VARYING. */
8331 value_range new_vr = VR_INITIALIZER;
8332 extract_range_basic (&new_vr, use_stmt);
8333 value_range *old_vr = get_value_range (use_lhs);
8334 if (old_vr->type != new_vr.type
8335 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
8336 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
8337 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
8338 res = SSA_PROP_INTERESTING;
8339 else
8340 res = SSA_PROP_NOT_INTERESTING;
8341 BITMAP_FREE (new_vr.equiv);
8342 if (res == SSA_PROP_INTERESTING)
8344 *output_p = lhs;
8345 return res;
8349 return res;
8351 break;
8352 default:
8353 break;
8356 /* All other statements produce nothing of interest for VRP, so mark
8357 their outputs varying and prevent further simulation. */
8358 set_defs_to_varying (stmt);
8360 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
8363 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8364 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8365 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8366 possible such range. The resulting range is not canonicalized. */
8368 static void
8369 union_ranges (enum value_range_type *vr0type,
8370 tree *vr0min, tree *vr0max,
8371 enum value_range_type vr1type,
8372 tree vr1min, tree vr1max)
8374 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8375 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8377 /* [] is vr0, () is vr1 in the following classification comments. */
8378 if (mineq && maxeq)
8380 /* [( )] */
8381 if (*vr0type == vr1type)
8382 /* Nothing to do for equal ranges. */
8384 else if ((*vr0type == VR_RANGE
8385 && vr1type == VR_ANTI_RANGE)
8386 || (*vr0type == VR_ANTI_RANGE
8387 && vr1type == VR_RANGE))
8389 /* For anti-range with range union the result is varying. */
8390 goto give_up;
8392 else
8393 gcc_unreachable ();
8395 else if (operand_less_p (*vr0max, vr1min) == 1
8396 || operand_less_p (vr1max, *vr0min) == 1)
8398 /* [ ] ( ) or ( ) [ ]
8399 If the ranges have an empty intersection, result of the union
8400 operation is the anti-range or if both are anti-ranges
8401 it covers all. */
8402 if (*vr0type == VR_ANTI_RANGE
8403 && vr1type == VR_ANTI_RANGE)
8404 goto give_up;
8405 else if (*vr0type == VR_ANTI_RANGE
8406 && vr1type == VR_RANGE)
8408 else if (*vr0type == VR_RANGE
8409 && vr1type == VR_ANTI_RANGE)
8411 *vr0type = vr1type;
8412 *vr0min = vr1min;
8413 *vr0max = vr1max;
8415 else if (*vr0type == VR_RANGE
8416 && vr1type == VR_RANGE)
8418 /* The result is the convex hull of both ranges. */
8419 if (operand_less_p (*vr0max, vr1min) == 1)
8421 /* If the result can be an anti-range, create one. */
8422 if (TREE_CODE (*vr0max) == INTEGER_CST
8423 && TREE_CODE (vr1min) == INTEGER_CST
8424 && vrp_val_is_min (*vr0min)
8425 && vrp_val_is_max (vr1max))
8427 tree min = int_const_binop (PLUS_EXPR,
8428 *vr0max,
8429 build_int_cst (TREE_TYPE (*vr0max), 1));
8430 tree max = int_const_binop (MINUS_EXPR,
8431 vr1min,
8432 build_int_cst (TREE_TYPE (vr1min), 1));
8433 if (!operand_less_p (max, min))
8435 *vr0type = VR_ANTI_RANGE;
8436 *vr0min = min;
8437 *vr0max = max;
8439 else
8440 *vr0max = vr1max;
8442 else
8443 *vr0max = vr1max;
8445 else
8447 /* If the result can be an anti-range, create one. */
8448 if (TREE_CODE (vr1max) == INTEGER_CST
8449 && TREE_CODE (*vr0min) == INTEGER_CST
8450 && vrp_val_is_min (vr1min)
8451 && vrp_val_is_max (*vr0max))
8453 tree min = int_const_binop (PLUS_EXPR,
8454 vr1max,
8455 build_int_cst (TREE_TYPE (vr1max), 1));
8456 tree max = int_const_binop (MINUS_EXPR,
8457 *vr0min,
8458 build_int_cst (TREE_TYPE (*vr0min), 1));
8459 if (!operand_less_p (max, min))
8461 *vr0type = VR_ANTI_RANGE;
8462 *vr0min = min;
8463 *vr0max = max;
8465 else
8466 *vr0min = vr1min;
8468 else
8469 *vr0min = vr1min;
8472 else
8473 gcc_unreachable ();
8475 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8476 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8478 /* [ ( ) ] or [( ) ] or [ ( )] */
8479 if (*vr0type == VR_RANGE
8480 && vr1type == VR_RANGE)
8482 else if (*vr0type == VR_ANTI_RANGE
8483 && vr1type == VR_ANTI_RANGE)
8485 *vr0type = vr1type;
8486 *vr0min = vr1min;
8487 *vr0max = vr1max;
8489 else if (*vr0type == VR_ANTI_RANGE
8490 && vr1type == VR_RANGE)
8492 /* Arbitrarily choose the right or left gap. */
8493 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8494 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8495 build_int_cst (TREE_TYPE (vr1min), 1));
8496 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8497 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8498 build_int_cst (TREE_TYPE (vr1max), 1));
8499 else
8500 goto give_up;
8502 else if (*vr0type == VR_RANGE
8503 && vr1type == VR_ANTI_RANGE)
8504 /* The result covers everything. */
8505 goto give_up;
8506 else
8507 gcc_unreachable ();
8509 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8510 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8512 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8513 if (*vr0type == VR_RANGE
8514 && vr1type == VR_RANGE)
8516 *vr0type = vr1type;
8517 *vr0min = vr1min;
8518 *vr0max = vr1max;
8520 else if (*vr0type == VR_ANTI_RANGE
8521 && vr1type == VR_ANTI_RANGE)
8523 else if (*vr0type == VR_RANGE
8524 && vr1type == VR_ANTI_RANGE)
8526 *vr0type = VR_ANTI_RANGE;
8527 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8529 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8530 build_int_cst (TREE_TYPE (*vr0min), 1));
8531 *vr0min = vr1min;
8533 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8535 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8536 build_int_cst (TREE_TYPE (*vr0max), 1));
8537 *vr0max = vr1max;
8539 else
8540 goto give_up;
8542 else if (*vr0type == VR_ANTI_RANGE
8543 && vr1type == VR_RANGE)
8544 /* The result covers everything. */
8545 goto give_up;
8546 else
8547 gcc_unreachable ();
8549 else if ((operand_less_p (vr1min, *vr0max) == 1
8550 || operand_equal_p (vr1min, *vr0max, 0))
8551 && operand_less_p (*vr0min, vr1min) == 1
8552 && operand_less_p (*vr0max, vr1max) == 1)
8554 /* [ ( ] ) or [ ]( ) */
8555 if (*vr0type == VR_RANGE
8556 && vr1type == VR_RANGE)
8557 *vr0max = vr1max;
8558 else if (*vr0type == VR_ANTI_RANGE
8559 && vr1type == VR_ANTI_RANGE)
8560 *vr0min = vr1min;
8561 else if (*vr0type == VR_ANTI_RANGE
8562 && vr1type == VR_RANGE)
8564 if (TREE_CODE (vr1min) == INTEGER_CST)
8565 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8566 build_int_cst (TREE_TYPE (vr1min), 1));
8567 else
8568 goto give_up;
8570 else if (*vr0type == VR_RANGE
8571 && vr1type == VR_ANTI_RANGE)
8573 if (TREE_CODE (*vr0max) == INTEGER_CST)
8575 *vr0type = vr1type;
8576 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8577 build_int_cst (TREE_TYPE (*vr0max), 1));
8578 *vr0max = vr1max;
8580 else
8581 goto give_up;
8583 else
8584 gcc_unreachable ();
8586 else if ((operand_less_p (*vr0min, vr1max) == 1
8587 || operand_equal_p (*vr0min, vr1max, 0))
8588 && operand_less_p (vr1min, *vr0min) == 1
8589 && operand_less_p (vr1max, *vr0max) == 1)
8591 /* ( [ ) ] or ( )[ ] */
8592 if (*vr0type == VR_RANGE
8593 && vr1type == VR_RANGE)
8594 *vr0min = vr1min;
8595 else if (*vr0type == VR_ANTI_RANGE
8596 && vr1type == VR_ANTI_RANGE)
8597 *vr0max = vr1max;
8598 else if (*vr0type == VR_ANTI_RANGE
8599 && vr1type == VR_RANGE)
8601 if (TREE_CODE (vr1max) == INTEGER_CST)
8602 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8603 build_int_cst (TREE_TYPE (vr1max), 1));
8604 else
8605 goto give_up;
8607 else if (*vr0type == VR_RANGE
8608 && vr1type == VR_ANTI_RANGE)
8610 if (TREE_CODE (*vr0min) == INTEGER_CST)
8612 *vr0type = vr1type;
8613 *vr0min = vr1min;
8614 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8615 build_int_cst (TREE_TYPE (*vr0min), 1));
8617 else
8618 goto give_up;
8620 else
8621 gcc_unreachable ();
8623 else
8624 goto give_up;
8626 return;
8628 give_up:
8629 *vr0type = VR_VARYING;
8630 *vr0min = NULL_TREE;
8631 *vr0max = NULL_TREE;
8634 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8635 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8636 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8637 possible such range. The resulting range is not canonicalized. */
8639 static void
8640 intersect_ranges (enum value_range_type *vr0type,
8641 tree *vr0min, tree *vr0max,
8642 enum value_range_type vr1type,
8643 tree vr1min, tree vr1max)
8645 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8646 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8648 /* [] is vr0, () is vr1 in the following classification comments. */
8649 if (mineq && maxeq)
8651 /* [( )] */
8652 if (*vr0type == vr1type)
8653 /* Nothing to do for equal ranges. */
8655 else if ((*vr0type == VR_RANGE
8656 && vr1type == VR_ANTI_RANGE)
8657 || (*vr0type == VR_ANTI_RANGE
8658 && vr1type == VR_RANGE))
8660 /* For anti-range with range intersection the result is empty. */
8661 *vr0type = VR_UNDEFINED;
8662 *vr0min = NULL_TREE;
8663 *vr0max = NULL_TREE;
8665 else
8666 gcc_unreachable ();
8668 else if (operand_less_p (*vr0max, vr1min) == 1
8669 || operand_less_p (vr1max, *vr0min) == 1)
8671 /* [ ] ( ) or ( ) [ ]
8672 If the ranges have an empty intersection, the result of the
8673 intersect operation is the range for intersecting an
8674 anti-range with a range or empty when intersecting two ranges. */
8675 if (*vr0type == VR_RANGE
8676 && vr1type == VR_ANTI_RANGE)
8678 else if (*vr0type == VR_ANTI_RANGE
8679 && vr1type == VR_RANGE)
8681 *vr0type = vr1type;
8682 *vr0min = vr1min;
8683 *vr0max = vr1max;
8685 else if (*vr0type == VR_RANGE
8686 && vr1type == VR_RANGE)
8688 *vr0type = VR_UNDEFINED;
8689 *vr0min = NULL_TREE;
8690 *vr0max = NULL_TREE;
8692 else if (*vr0type == VR_ANTI_RANGE
8693 && vr1type == VR_ANTI_RANGE)
8695 /* If the anti-ranges are adjacent to each other merge them. */
8696 if (TREE_CODE (*vr0max) == INTEGER_CST
8697 && TREE_CODE (vr1min) == INTEGER_CST
8698 && operand_less_p (*vr0max, vr1min) == 1
8699 && integer_onep (int_const_binop (MINUS_EXPR,
8700 vr1min, *vr0max)))
8701 *vr0max = vr1max;
8702 else if (TREE_CODE (vr1max) == INTEGER_CST
8703 && TREE_CODE (*vr0min) == INTEGER_CST
8704 && operand_less_p (vr1max, *vr0min) == 1
8705 && integer_onep (int_const_binop (MINUS_EXPR,
8706 *vr0min, vr1max)))
8707 *vr0min = vr1min;
8708 /* Else arbitrarily take VR0. */
8711 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8712 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8714 /* [ ( ) ] or [( ) ] or [ ( )] */
8715 if (*vr0type == VR_RANGE
8716 && vr1type == VR_RANGE)
8718 /* If both are ranges the result is the inner one. */
8719 *vr0type = vr1type;
8720 *vr0min = vr1min;
8721 *vr0max = vr1max;
8723 else if (*vr0type == VR_RANGE
8724 && vr1type == VR_ANTI_RANGE)
8726 /* Choose the right gap if the left one is empty. */
8727 if (mineq)
8729 if (TREE_CODE (vr1max) == INTEGER_CST)
8730 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8731 build_int_cst (TREE_TYPE (vr1max), 1));
8732 else
8733 *vr0min = vr1max;
8735 /* Choose the left gap if the right one is empty. */
8736 else if (maxeq)
8738 if (TREE_CODE (vr1min) == INTEGER_CST)
8739 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8740 build_int_cst (TREE_TYPE (vr1min), 1));
8741 else
8742 *vr0max = vr1min;
8744 /* Choose the anti-range if the range is effectively varying. */
8745 else if (vrp_val_is_min (*vr0min)
8746 && vrp_val_is_max (*vr0max))
8748 *vr0type = vr1type;
8749 *vr0min = vr1min;
8750 *vr0max = vr1max;
8752 /* Else choose the range. */
8754 else if (*vr0type == VR_ANTI_RANGE
8755 && vr1type == VR_ANTI_RANGE)
8756 /* If both are anti-ranges the result is the outer one. */
8758 else if (*vr0type == VR_ANTI_RANGE
8759 && vr1type == VR_RANGE)
8761 /* The intersection is empty. */
8762 *vr0type = VR_UNDEFINED;
8763 *vr0min = NULL_TREE;
8764 *vr0max = NULL_TREE;
8766 else
8767 gcc_unreachable ();
8769 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8770 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8772 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8773 if (*vr0type == VR_RANGE
8774 && vr1type == VR_RANGE)
8775 /* Choose the inner range. */
8777 else if (*vr0type == VR_ANTI_RANGE
8778 && vr1type == VR_RANGE)
8780 /* Choose the right gap if the left is empty. */
8781 if (mineq)
8783 *vr0type = VR_RANGE;
8784 if (TREE_CODE (*vr0max) == INTEGER_CST)
8785 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8786 build_int_cst (TREE_TYPE (*vr0max), 1));
8787 else
8788 *vr0min = *vr0max;
8789 *vr0max = vr1max;
8791 /* Choose the left gap if the right is empty. */
8792 else if (maxeq)
8794 *vr0type = VR_RANGE;
8795 if (TREE_CODE (*vr0min) == INTEGER_CST)
8796 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8797 build_int_cst (TREE_TYPE (*vr0min), 1));
8798 else
8799 *vr0max = *vr0min;
8800 *vr0min = vr1min;
8802 /* Choose the anti-range if the range is effectively varying. */
8803 else if (vrp_val_is_min (vr1min)
8804 && vrp_val_is_max (vr1max))
8806 /* Choose the anti-range if it is ~[0,0], that range is special
8807 enough to special case when vr1's range is relatively wide. */
8808 else if (*vr0min == *vr0max
8809 && integer_zerop (*vr0min)
8810 && (TYPE_PRECISION (TREE_TYPE (*vr0min))
8811 == TYPE_PRECISION (ptr_type_node))
8812 && TREE_CODE (vr1max) == INTEGER_CST
8813 && TREE_CODE (vr1min) == INTEGER_CST
8814 && (wi::clz (wi::sub (vr1max, vr1min))
8815 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
8817 /* Else choose the range. */
8818 else
8820 *vr0type = vr1type;
8821 *vr0min = vr1min;
8822 *vr0max = vr1max;
8825 else if (*vr0type == VR_ANTI_RANGE
8826 && vr1type == VR_ANTI_RANGE)
8828 /* If both are anti-ranges the result is the outer one. */
8829 *vr0type = vr1type;
8830 *vr0min = vr1min;
8831 *vr0max = vr1max;
8833 else if (vr1type == VR_ANTI_RANGE
8834 && *vr0type == VR_RANGE)
8836 /* The intersection is empty. */
8837 *vr0type = VR_UNDEFINED;
8838 *vr0min = NULL_TREE;
8839 *vr0max = NULL_TREE;
8841 else
8842 gcc_unreachable ();
8844 else if ((operand_less_p (vr1min, *vr0max) == 1
8845 || operand_equal_p (vr1min, *vr0max, 0))
8846 && operand_less_p (*vr0min, vr1min) == 1)
8848 /* [ ( ] ) or [ ]( ) */
8849 if (*vr0type == VR_ANTI_RANGE
8850 && vr1type == VR_ANTI_RANGE)
8851 *vr0max = vr1max;
8852 else if (*vr0type == VR_RANGE
8853 && vr1type == VR_RANGE)
8854 *vr0min = vr1min;
8855 else if (*vr0type == VR_RANGE
8856 && vr1type == VR_ANTI_RANGE)
8858 if (TREE_CODE (vr1min) == INTEGER_CST)
8859 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8860 build_int_cst (TREE_TYPE (vr1min), 1));
8861 else
8862 *vr0max = vr1min;
8864 else if (*vr0type == VR_ANTI_RANGE
8865 && vr1type == VR_RANGE)
8867 *vr0type = VR_RANGE;
8868 if (TREE_CODE (*vr0max) == INTEGER_CST)
8869 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8870 build_int_cst (TREE_TYPE (*vr0max), 1));
8871 else
8872 *vr0min = *vr0max;
8873 *vr0max = vr1max;
8875 else
8876 gcc_unreachable ();
8878 else if ((operand_less_p (*vr0min, vr1max) == 1
8879 || operand_equal_p (*vr0min, vr1max, 0))
8880 && operand_less_p (vr1min, *vr0min) == 1)
8882 /* ( [ ) ] or ( )[ ] */
8883 if (*vr0type == VR_ANTI_RANGE
8884 && vr1type == VR_ANTI_RANGE)
8885 *vr0min = vr1min;
8886 else if (*vr0type == VR_RANGE
8887 && vr1type == VR_RANGE)
8888 *vr0max = vr1max;
8889 else if (*vr0type == VR_RANGE
8890 && vr1type == VR_ANTI_RANGE)
8892 if (TREE_CODE (vr1max) == INTEGER_CST)
8893 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8894 build_int_cst (TREE_TYPE (vr1max), 1));
8895 else
8896 *vr0min = vr1max;
8898 else if (*vr0type == VR_ANTI_RANGE
8899 && vr1type == VR_RANGE)
8901 *vr0type = VR_RANGE;
8902 if (TREE_CODE (*vr0min) == INTEGER_CST)
8903 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8904 build_int_cst (TREE_TYPE (*vr0min), 1));
8905 else
8906 *vr0max = *vr0min;
8907 *vr0min = vr1min;
8909 else
8910 gcc_unreachable ();
8913 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8914 result for the intersection. That's always a conservative
8915 correct estimate unless VR1 is a constant singleton range
8916 in which case we choose that. */
8917 if (vr1type == VR_RANGE
8918 && is_gimple_min_invariant (vr1min)
8919 && vrp_operand_equal_p (vr1min, vr1max))
8921 *vr0type = vr1type;
8922 *vr0min = vr1min;
8923 *vr0max = vr1max;
8926 return;
8930 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8931 in *VR0. This may not be the smallest possible such range. */
8933 static void
8934 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8936 value_range saved;
8938 /* If either range is VR_VARYING the other one wins. */
8939 if (vr1->type == VR_VARYING)
8940 return;
8941 if (vr0->type == VR_VARYING)
8943 copy_value_range (vr0, vr1);
8944 return;
8947 /* When either range is VR_UNDEFINED the resulting range is
8948 VR_UNDEFINED, too. */
8949 if (vr0->type == VR_UNDEFINED)
8950 return;
8951 if (vr1->type == VR_UNDEFINED)
8953 set_value_range_to_undefined (vr0);
8954 return;
8957 /* Save the original vr0 so we can return it as conservative intersection
8958 result when our worker turns things to varying. */
8959 saved = *vr0;
8960 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8961 vr1->type, vr1->min, vr1->max);
8962 /* Make sure to canonicalize the result though as the inversion of a
8963 VR_RANGE can still be a VR_RANGE. */
8964 set_and_canonicalize_value_range (vr0, vr0->type,
8965 vr0->min, vr0->max, vr0->equiv);
8966 /* If that failed, use the saved original VR0. */
8967 if (vr0->type == VR_VARYING)
8969 *vr0 = saved;
8970 return;
8972 /* If the result is VR_UNDEFINED there is no need to mess with
8973 the equivalencies. */
8974 if (vr0->type == VR_UNDEFINED)
8975 return;
8977 /* The resulting set of equivalences for range intersection is the union of
8978 the two sets. */
8979 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8980 bitmap_ior_into (vr0->equiv, vr1->equiv);
8981 else if (vr1->equiv && !vr0->equiv)
8983 vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
8984 bitmap_copy (vr0->equiv, vr1->equiv);
8988 void
8989 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
8991 if (dump_file && (dump_flags & TDF_DETAILS))
8993 fprintf (dump_file, "Intersecting\n ");
8994 dump_value_range (dump_file, vr0);
8995 fprintf (dump_file, "\nand\n ");
8996 dump_value_range (dump_file, vr1);
8997 fprintf (dump_file, "\n");
8999 vrp_intersect_ranges_1 (vr0, vr1);
9000 if (dump_file && (dump_flags & TDF_DETAILS))
9002 fprintf (dump_file, "to\n ");
9003 dump_value_range (dump_file, vr0);
9004 fprintf (dump_file, "\n");
9008 /* Meet operation for value ranges. Given two value ranges VR0 and
9009 VR1, store in VR0 a range that contains both VR0 and VR1. This
9010 may not be the smallest possible such range. */
9012 static void
9013 vrp_meet_1 (value_range *vr0, const value_range *vr1)
9015 value_range saved;
9017 if (vr0->type == VR_UNDEFINED)
9019 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
9020 return;
9023 if (vr1->type == VR_UNDEFINED)
9025 /* VR0 already has the resulting range. */
9026 return;
9029 if (vr0->type == VR_VARYING)
9031 /* Nothing to do. VR0 already has the resulting range. */
9032 return;
9035 if (vr1->type == VR_VARYING)
9037 set_value_range_to_varying (vr0);
9038 return;
9041 saved = *vr0;
9042 union_ranges (&vr0->type, &vr0->min, &vr0->max,
9043 vr1->type, vr1->min, vr1->max);
9044 if (vr0->type == VR_VARYING)
9046 /* Failed to find an efficient meet. Before giving up and setting
9047 the result to VARYING, see if we can at least derive a useful
9048 anti-range. FIXME, all this nonsense about distinguishing
9049 anti-ranges from ranges is necessary because of the odd
9050 semantics of range_includes_zero_p and friends. */
9051 if (((saved.type == VR_RANGE
9052 && range_includes_zero_p (saved.min, saved.max) == 0)
9053 || (saved.type == VR_ANTI_RANGE
9054 && range_includes_zero_p (saved.min, saved.max) == 1))
9055 && ((vr1->type == VR_RANGE
9056 && range_includes_zero_p (vr1->min, vr1->max) == 0)
9057 || (vr1->type == VR_ANTI_RANGE
9058 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
9060 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
9062 /* Since this meet operation did not result from the meeting of
9063 two equivalent names, VR0 cannot have any equivalences. */
9064 if (vr0->equiv)
9065 bitmap_clear (vr0->equiv);
9066 return;
9069 set_value_range_to_varying (vr0);
9070 return;
9072 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
9073 vr0->equiv);
9074 if (vr0->type == VR_VARYING)
9075 return;
9077 /* The resulting set of equivalences is always the intersection of
9078 the two sets. */
9079 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
9080 bitmap_and_into (vr0->equiv, vr1->equiv);
9081 else if (vr0->equiv && !vr1->equiv)
9082 bitmap_clear (vr0->equiv);
9085 void
9086 vrp_meet (value_range *vr0, const value_range *vr1)
9088 if (dump_file && (dump_flags & TDF_DETAILS))
9090 fprintf (dump_file, "Meeting\n ");
9091 dump_value_range (dump_file, vr0);
9092 fprintf (dump_file, "\nand\n ");
9093 dump_value_range (dump_file, vr1);
9094 fprintf (dump_file, "\n");
9096 vrp_meet_1 (vr0, vr1);
9097 if (dump_file && (dump_flags & TDF_DETAILS))
9099 fprintf (dump_file, "to\n ");
9100 dump_value_range (dump_file, vr0);
9101 fprintf (dump_file, "\n");
9106 /* Visit all arguments for PHI node PHI that flow through executable
9107 edges. If a valid value range can be derived from all the incoming
9108 value ranges, set a new range in VR_RESULT. */
9110 static void
9111 extract_range_from_phi_node (gphi *phi, value_range *vr_result)
9113 size_t i;
9114 tree lhs = PHI_RESULT (phi);
9115 value_range *lhs_vr = get_value_range (lhs);
9116 bool first = true;
9117 int edges, old_edges;
9118 struct loop *l;
9120 if (dump_file && (dump_flags & TDF_DETAILS))
9122 fprintf (dump_file, "\nVisiting PHI node: ");
9123 print_gimple_stmt (dump_file, phi, 0, dump_flags);
9126 bool may_simulate_backedge_again = false;
9127 edges = 0;
9128 for (i = 0; i < gimple_phi_num_args (phi); i++)
9130 edge e = gimple_phi_arg_edge (phi, i);
9132 if (dump_file && (dump_flags & TDF_DETAILS))
9134 fprintf (dump_file,
9135 " Argument #%d (%d -> %d %sexecutable)\n",
9136 (int) i, e->src->index, e->dest->index,
9137 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
9140 if (e->flags & EDGE_EXECUTABLE)
9142 tree arg = PHI_ARG_DEF (phi, i);
9143 value_range vr_arg;
9145 ++edges;
9147 if (TREE_CODE (arg) == SSA_NAME)
9149 /* See if we are eventually going to change one of the args. */
9150 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
9151 if (! gimple_nop_p (def_stmt)
9152 && prop_simulate_again_p (def_stmt)
9153 && e->flags & EDGE_DFS_BACK)
9154 may_simulate_backedge_again = true;
9156 vr_arg = *(get_value_range (arg));
9157 /* Do not allow equivalences or symbolic ranges to leak in from
9158 backedges. That creates invalid equivalencies.
9159 See PR53465 and PR54767. */
9160 if (e->flags & EDGE_DFS_BACK)
9162 if (vr_arg.type == VR_RANGE
9163 || vr_arg.type == VR_ANTI_RANGE)
9165 vr_arg.equiv = NULL;
9166 if (symbolic_range_p (&vr_arg))
9168 vr_arg.type = VR_VARYING;
9169 vr_arg.min = NULL_TREE;
9170 vr_arg.max = NULL_TREE;
9174 else
9176 /* If the non-backedge arguments range is VR_VARYING then
9177 we can still try recording a simple equivalence. */
9178 if (vr_arg.type == VR_VARYING)
9180 vr_arg.type = VR_RANGE;
9181 vr_arg.min = arg;
9182 vr_arg.max = arg;
9183 vr_arg.equiv = NULL;
9187 else
9189 if (TREE_OVERFLOW_P (arg))
9190 arg = drop_tree_overflow (arg);
9192 vr_arg.type = VR_RANGE;
9193 vr_arg.min = arg;
9194 vr_arg.max = arg;
9195 vr_arg.equiv = NULL;
9198 if (dump_file && (dump_flags & TDF_DETAILS))
9200 fprintf (dump_file, "\t");
9201 print_generic_expr (dump_file, arg, dump_flags);
9202 fprintf (dump_file, ": ");
9203 dump_value_range (dump_file, &vr_arg);
9204 fprintf (dump_file, "\n");
9207 if (first)
9208 copy_value_range (vr_result, &vr_arg);
9209 else
9210 vrp_meet (vr_result, &vr_arg);
9211 first = false;
9213 if (vr_result->type == VR_VARYING)
9214 break;
9218 if (vr_result->type == VR_VARYING)
9219 goto varying;
9220 else if (vr_result->type == VR_UNDEFINED)
9221 goto update_range;
9223 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
9224 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
9226 /* To prevent infinite iterations in the algorithm, derive ranges
9227 when the new value is slightly bigger or smaller than the
9228 previous one. We don't do this if we have seen a new executable
9229 edge; this helps us avoid an overflow infinity for conditionals
9230 which are not in a loop. If the old value-range was VR_UNDEFINED
9231 use the updated range and iterate one more time. If we will not
9232 simulate this PHI again via the backedge allow us to iterate. */
9233 if (edges > 0
9234 && gimple_phi_num_args (phi) > 1
9235 && edges == old_edges
9236 && lhs_vr->type != VR_UNDEFINED
9237 && may_simulate_backedge_again)
9239 /* Compare old and new ranges, fall back to varying if the
9240 values are not comparable. */
9241 int cmp_min = compare_values (lhs_vr->min, vr_result->min);
9242 if (cmp_min == -2)
9243 goto varying;
9244 int cmp_max = compare_values (lhs_vr->max, vr_result->max);
9245 if (cmp_max == -2)
9246 goto varying;
9248 /* For non VR_RANGE or for pointers fall back to varying if
9249 the range changed. */
9250 if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
9251 || POINTER_TYPE_P (TREE_TYPE (lhs)))
9252 && (cmp_min != 0 || cmp_max != 0))
9253 goto varying;
9255 /* If the new minimum is larger than the previous one
9256 retain the old value. If the new minimum value is smaller
9257 than the previous one and not -INF go all the way to -INF + 1.
9258 In the first case, to avoid infinite bouncing between different
9259 minimums, and in the other case to avoid iterating millions of
9260 times to reach -INF. Going to -INF + 1 also lets the following
9261 iteration compute whether there will be any overflow, at the
9262 expense of one additional iteration. */
9263 if (cmp_min < 0)
9264 vr_result->min = lhs_vr->min;
9265 else if (cmp_min > 0
9266 && !vrp_val_is_min (vr_result->min))
9267 vr_result->min
9268 = int_const_binop (PLUS_EXPR,
9269 vrp_val_min (TREE_TYPE (vr_result->min)),
9270 build_int_cst (TREE_TYPE (vr_result->min), 1));
9272 /* Similarly for the maximum value. */
9273 if (cmp_max > 0)
9274 vr_result->max = lhs_vr->max;
9275 else if (cmp_max < 0
9276 && !vrp_val_is_max (vr_result->max))
9277 vr_result->max
9278 = int_const_binop (MINUS_EXPR,
9279 vrp_val_max (TREE_TYPE (vr_result->min)),
9280 build_int_cst (TREE_TYPE (vr_result->min), 1));
9282 /* If we dropped either bound to +-INF then if this is a loop
9283 PHI node SCEV may known more about its value-range. */
9284 if (cmp_min > 0 || cmp_min < 0
9285 || cmp_max < 0 || cmp_max > 0)
9286 goto scev_check;
9288 goto infinite_check;
9291 goto update_range;
9293 varying:
9294 set_value_range_to_varying (vr_result);
9296 scev_check:
9297 /* If this is a loop PHI node SCEV may known more about its value-range.
9298 scev_check can be reached from two paths, one is a fall through from above
9299 "varying" label, the other is direct goto from code block which tries to
9300 avoid infinite simulation. */
9301 if ((l = loop_containing_stmt (phi))
9302 && l->header == gimple_bb (phi))
9303 adjust_range_with_scev (vr_result, l, phi, lhs);
9305 infinite_check:
9306 /* If we will end up with a (-INF, +INF) range, set it to
9307 VARYING. Same if the previous max value was invalid for
9308 the type and we end up with vr_result.min > vr_result.max. */
9309 if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
9310 && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
9311 || compare_values (vr_result->min, vr_result->max) > 0))
9313 else
9314 set_value_range_to_varying (vr_result);
9316 /* If the new range is different than the previous value, keep
9317 iterating. */
9318 update_range:
9319 return;
9322 /* Visit all arguments for PHI node PHI that flow through executable
9323 edges. If a valid value range can be derived from all the incoming
9324 value ranges, set a new range for the LHS of PHI. */
9326 static enum ssa_prop_result
9327 vrp_visit_phi_node (gphi *phi)
9329 tree lhs = PHI_RESULT (phi);
9330 value_range vr_result = VR_INITIALIZER;
9331 extract_range_from_phi_node (phi, &vr_result);
9332 if (update_value_range (lhs, &vr_result))
9334 if (dump_file && (dump_flags & TDF_DETAILS))
9336 fprintf (dump_file, "Found new range for ");
9337 print_generic_expr (dump_file, lhs, 0);
9338 fprintf (dump_file, ": ");
9339 dump_value_range (dump_file, &vr_result);
9340 fprintf (dump_file, "\n");
9343 if (vr_result.type == VR_VARYING)
9344 return SSA_PROP_VARYING;
9346 return SSA_PROP_INTERESTING;
9349 /* Nothing changed, don't add outgoing edges. */
9350 return SSA_PROP_NOT_INTERESTING;
9353 /* Simplify boolean operations if the source is known
9354 to be already a boolean. */
9355 static bool
9356 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9358 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9359 tree lhs, op0, op1;
9360 bool need_conversion;
9362 /* We handle only !=/== case here. */
9363 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
9365 op0 = gimple_assign_rhs1 (stmt);
9366 if (!op_with_boolean_value_range_p (op0))
9367 return false;
9369 op1 = gimple_assign_rhs2 (stmt);
9370 if (!op_with_boolean_value_range_p (op1))
9371 return false;
9373 /* Reduce number of cases to handle to NE_EXPR. As there is no
9374 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
9375 if (rhs_code == EQ_EXPR)
9377 if (TREE_CODE (op1) == INTEGER_CST)
9378 op1 = int_const_binop (BIT_XOR_EXPR, op1,
9379 build_int_cst (TREE_TYPE (op1), 1));
9380 else
9381 return false;
9384 lhs = gimple_assign_lhs (stmt);
9385 need_conversion
9386 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
9388 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
9389 if (need_conversion
9390 && !TYPE_UNSIGNED (TREE_TYPE (op0))
9391 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
9392 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
9393 return false;
9395 /* For A != 0 we can substitute A itself. */
9396 if (integer_zerop (op1))
9397 gimple_assign_set_rhs_with_ops (gsi,
9398 need_conversion
9399 ? NOP_EXPR : TREE_CODE (op0), op0);
9400 /* For A != B we substitute A ^ B. Either with conversion. */
9401 else if (need_conversion)
9403 tree tem = make_ssa_name (TREE_TYPE (op0));
9404 gassign *newop
9405 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
9406 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
9407 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
9408 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
9409 set_range_info (tem, VR_RANGE,
9410 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
9411 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
9412 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
9414 /* Or without. */
9415 else
9416 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
9417 update_stmt (gsi_stmt (*gsi));
9418 fold_stmt (gsi, follow_single_use_edges);
9420 return true;
9423 /* Simplify a division or modulo operator to a right shift or bitwise and
9424 if the first operand is unsigned or is greater than zero and the second
9425 operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
9426 constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
9427 optimize it into just op0 if op0's range is known to be a subset of
9428 [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
9429 modulo. */
9431 static bool
9432 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9434 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9435 tree val = NULL;
9436 tree op0 = gimple_assign_rhs1 (stmt);
9437 tree op1 = gimple_assign_rhs2 (stmt);
9438 tree op0min = NULL_TREE, op0max = NULL_TREE;
9439 tree op1min = op1;
9440 value_range *vr = NULL;
9442 if (TREE_CODE (op0) == INTEGER_CST)
9444 op0min = op0;
9445 op0max = op0;
9447 else
9449 vr = get_value_range (op0);
9450 if (range_int_cst_p (vr))
9452 op0min = vr->min;
9453 op0max = vr->max;
9457 if (rhs_code == TRUNC_MOD_EXPR
9458 && TREE_CODE (op1) == SSA_NAME)
9460 value_range *vr1 = get_value_range (op1);
9461 if (range_int_cst_p (vr1))
9462 op1min = vr1->min;
9464 if (rhs_code == TRUNC_MOD_EXPR
9465 && TREE_CODE (op1min) == INTEGER_CST
9466 && tree_int_cst_sgn (op1min) == 1
9467 && op0max
9468 && tree_int_cst_lt (op0max, op1min))
9470 if (TYPE_UNSIGNED (TREE_TYPE (op0))
9471 || tree_int_cst_sgn (op0min) >= 0
9472 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
9473 op0min))
9475 /* If op0 already has the range op0 % op1 has,
9476 then TRUNC_MOD_EXPR won't change anything. */
9477 gimple_assign_set_rhs_from_tree (gsi, op0);
9478 return true;
9482 if (TREE_CODE (op0) != SSA_NAME)
9483 return false;
9485 if (!integer_pow2p (op1))
9487 /* X % -Y can be only optimized into X % Y either if
9488 X is not INT_MIN, or Y is not -1. Fold it now, as after
9489 remove_range_assertions the range info might be not available
9490 anymore. */
9491 if (rhs_code == TRUNC_MOD_EXPR
9492 && fold_stmt (gsi, follow_single_use_edges))
9493 return true;
9494 return false;
9497 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9498 val = integer_one_node;
9499 else
9501 bool sop = false;
9503 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9505 if (val
9506 && sop
9507 && integer_onep (val)
9508 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9510 location_t location;
9512 if (!gimple_has_location (stmt))
9513 location = input_location;
9514 else
9515 location = gimple_location (stmt);
9516 warning_at (location, OPT_Wstrict_overflow,
9517 "assuming signed overflow does not occur when "
9518 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9522 if (val && integer_onep (val))
9524 tree t;
9526 if (rhs_code == TRUNC_DIV_EXPR)
9528 t = build_int_cst (integer_type_node, tree_log2 (op1));
9529 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9530 gimple_assign_set_rhs1 (stmt, op0);
9531 gimple_assign_set_rhs2 (stmt, t);
9533 else
9535 t = build_int_cst (TREE_TYPE (op1), 1);
9536 t = int_const_binop (MINUS_EXPR, op1, t);
9537 t = fold_convert (TREE_TYPE (op0), t);
9539 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9540 gimple_assign_set_rhs1 (stmt, op0);
9541 gimple_assign_set_rhs2 (stmt, t);
9544 update_stmt (stmt);
9545 fold_stmt (gsi, follow_single_use_edges);
9546 return true;
9549 return false;
9552 /* Simplify a min or max if the ranges of the two operands are
9553 disjoint. Return true if we do simplify. */
9555 static bool
9556 simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9558 tree op0 = gimple_assign_rhs1 (stmt);
9559 tree op1 = gimple_assign_rhs2 (stmt);
9560 bool sop = false;
9561 tree val;
9563 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9564 (LE_EXPR, op0, op1, &sop));
9565 if (!val)
9567 sop = false;
9568 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9569 (LT_EXPR, op0, op1, &sop));
9572 if (val)
9574 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9576 location_t location;
9578 if (!gimple_has_location (stmt))
9579 location = input_location;
9580 else
9581 location = gimple_location (stmt);
9582 warning_at (location, OPT_Wstrict_overflow,
9583 "assuming signed overflow does not occur when "
9584 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9587 /* VAL == TRUE -> OP0 < or <= op1
9588 VAL == FALSE -> OP0 > or >= op1. */
9589 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9590 == integer_zerop (val)) ? op0 : op1;
9591 gimple_assign_set_rhs_from_tree (gsi, res);
9592 return true;
9595 return false;
9598 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9599 ABS_EXPR. If the operand is <= 0, then simplify the
9600 ABS_EXPR into a NEGATE_EXPR. */
9602 static bool
9603 simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9605 tree op = gimple_assign_rhs1 (stmt);
9606 value_range *vr = get_value_range (op);
9608 if (vr)
9610 tree val = NULL;
9611 bool sop = false;
9613 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9614 if (!val)
9616 /* The range is neither <= 0 nor > 0. Now see if it is
9617 either < 0 or >= 0. */
9618 sop = false;
9619 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9620 &sop);
9623 if (val)
9625 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9627 location_t location;
9629 if (!gimple_has_location (stmt))
9630 location = input_location;
9631 else
9632 location = gimple_location (stmt);
9633 warning_at (location, OPT_Wstrict_overflow,
9634 "assuming signed overflow does not occur when "
9635 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9638 gimple_assign_set_rhs1 (stmt, op);
9639 if (integer_zerop (val))
9640 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9641 else
9642 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9643 update_stmt (stmt);
9644 fold_stmt (gsi, follow_single_use_edges);
9645 return true;
9649 return false;
9652 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9653 If all the bits that are being cleared by & are already
9654 known to be zero from VR, or all the bits that are being
9655 set by | are already known to be one from VR, the bit
9656 operation is redundant. */
9658 static bool
9659 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9661 tree op0 = gimple_assign_rhs1 (stmt);
9662 tree op1 = gimple_assign_rhs2 (stmt);
9663 tree op = NULL_TREE;
9664 value_range vr0 = VR_INITIALIZER;
9665 value_range vr1 = VR_INITIALIZER;
9666 wide_int may_be_nonzero0, may_be_nonzero1;
9667 wide_int must_be_nonzero0, must_be_nonzero1;
9668 wide_int mask;
9670 if (TREE_CODE (op0) == SSA_NAME)
9671 vr0 = *(get_value_range (op0));
9672 else if (is_gimple_min_invariant (op0))
9673 set_value_range_to_value (&vr0, op0, NULL);
9674 else
9675 return false;
9677 if (TREE_CODE (op1) == SSA_NAME)
9678 vr1 = *(get_value_range (op1));
9679 else if (is_gimple_min_invariant (op1))
9680 set_value_range_to_value (&vr1, op1, NULL);
9681 else
9682 return false;
9684 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9685 &must_be_nonzero0))
9686 return false;
9687 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9688 &must_be_nonzero1))
9689 return false;
9691 switch (gimple_assign_rhs_code (stmt))
9693 case BIT_AND_EXPR:
9694 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9695 if (mask == 0)
9697 op = op0;
9698 break;
9700 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9701 if (mask == 0)
9703 op = op1;
9704 break;
9706 break;
9707 case BIT_IOR_EXPR:
9708 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9709 if (mask == 0)
9711 op = op1;
9712 break;
9714 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9715 if (mask == 0)
9717 op = op0;
9718 break;
9720 break;
9721 default:
9722 gcc_unreachable ();
9725 if (op == NULL_TREE)
9726 return false;
9728 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9729 update_stmt (gsi_stmt (*gsi));
9730 return true;
9733 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9734 a known value range VR.
9736 If there is one and only one value which will satisfy the
9737 conditional, then return that value. Else return NULL.
9739 If signed overflow must be undefined for the value to satisfy
9740 the conditional, then set *STRICT_OVERFLOW_P to true. */
9742 static tree
9743 test_for_singularity (enum tree_code cond_code, tree op0,
9744 tree op1, value_range *vr,
9745 bool *strict_overflow_p)
9747 tree min = NULL;
9748 tree max = NULL;
9750 /* Extract minimum/maximum values which satisfy the conditional as it was
9751 written. */
9752 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9754 /* This should not be negative infinity; there is no overflow
9755 here. */
9756 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9758 max = op1;
9759 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9761 tree one = build_int_cst (TREE_TYPE (op0), 1);
9762 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9763 if (EXPR_P (max))
9764 TREE_NO_WARNING (max) = 1;
9767 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9769 /* This should not be positive infinity; there is no overflow
9770 here. */
9771 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9773 min = op1;
9774 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9776 tree one = build_int_cst (TREE_TYPE (op0), 1);
9777 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9778 if (EXPR_P (min))
9779 TREE_NO_WARNING (min) = 1;
9783 /* Now refine the minimum and maximum values using any
9784 value range information we have for op0. */
9785 if (min && max)
9787 if (compare_values (vr->min, min) == 1)
9788 min = vr->min;
9789 if (compare_values (vr->max, max) == -1)
9790 max = vr->max;
9792 /* If the new min/max values have converged to a single value,
9793 then there is only one value which can satisfy the condition,
9794 return that value. */
9795 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9797 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9798 && is_overflow_infinity (vr->max))
9799 *strict_overflow_p = true;
9800 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9801 && is_overflow_infinity (vr->min))
9802 *strict_overflow_p = true;
9804 return min;
9807 return NULL;
9810 /* Return whether the value range *VR fits in an integer type specified
9811 by PRECISION and UNSIGNED_P. */
9813 static bool
9814 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9816 tree src_type;
9817 unsigned src_precision;
9818 widest_int tem;
9819 signop src_sgn;
9821 /* We can only handle integral and pointer types. */
9822 src_type = TREE_TYPE (vr->min);
9823 if (!INTEGRAL_TYPE_P (src_type)
9824 && !POINTER_TYPE_P (src_type))
9825 return false;
9827 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9828 and so is an identity transform. */
9829 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9830 src_sgn = TYPE_SIGN (src_type);
9831 if ((src_precision < dest_precision
9832 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9833 || (src_precision == dest_precision && src_sgn == dest_sgn))
9834 return true;
9836 /* Now we can only handle ranges with constant bounds. */
9837 if (vr->type != VR_RANGE
9838 || TREE_CODE (vr->min) != INTEGER_CST
9839 || TREE_CODE (vr->max) != INTEGER_CST)
9840 return false;
9842 /* For sign changes, the MSB of the wide_int has to be clear.
9843 An unsigned value with its MSB set cannot be represented by
9844 a signed wide_int, while a negative value cannot be represented
9845 by an unsigned wide_int. */
9846 if (src_sgn != dest_sgn
9847 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9848 return false;
9850 /* Then we can perform the conversion on both ends and compare
9851 the result for equality. */
9852 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9853 if (tem != wi::to_widest (vr->min))
9854 return false;
9855 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9856 if (tem != wi::to_widest (vr->max))
9857 return false;
9859 return true;
9862 /* Simplify a conditional using a relational operator to an equality
9863 test if the range information indicates only one value can satisfy
9864 the original conditional. */
9866 static bool
9867 simplify_cond_using_ranges (gcond *stmt)
9869 tree op0 = gimple_cond_lhs (stmt);
9870 tree op1 = gimple_cond_rhs (stmt);
9871 enum tree_code cond_code = gimple_cond_code (stmt);
9873 if (cond_code != NE_EXPR
9874 && cond_code != EQ_EXPR
9875 && TREE_CODE (op0) == SSA_NAME
9876 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9877 && is_gimple_min_invariant (op1))
9879 value_range *vr = get_value_range (op0);
9881 /* If we have range information for OP0, then we might be
9882 able to simplify this conditional. */
9883 if (vr->type == VR_RANGE)
9885 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9886 bool sop = false;
9887 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9889 if (new_tree
9890 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9892 if (dump_file)
9894 fprintf (dump_file, "Simplified relational ");
9895 print_gimple_stmt (dump_file, stmt, 0, 0);
9896 fprintf (dump_file, " into ");
9899 gimple_cond_set_code (stmt, EQ_EXPR);
9900 gimple_cond_set_lhs (stmt, op0);
9901 gimple_cond_set_rhs (stmt, new_tree);
9903 update_stmt (stmt);
9905 if (dump_file)
9907 print_gimple_stmt (dump_file, stmt, 0, 0);
9908 fprintf (dump_file, "\n");
9911 if (sop && issue_strict_overflow_warning (wc))
9913 location_t location = input_location;
9914 if (gimple_has_location (stmt))
9915 location = gimple_location (stmt);
9917 warning_at (location, OPT_Wstrict_overflow,
9918 "assuming signed overflow does not occur when "
9919 "simplifying conditional");
9922 return true;
9925 /* Try again after inverting the condition. We only deal
9926 with integral types here, so no need to worry about
9927 issues with inverting FP comparisons. */
9928 sop = false;
9929 new_tree = test_for_singularity
9930 (invert_tree_comparison (cond_code, false),
9931 op0, op1, vr, &sop);
9933 if (new_tree
9934 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9936 if (dump_file)
9938 fprintf (dump_file, "Simplified relational ");
9939 print_gimple_stmt (dump_file, stmt, 0, 0);
9940 fprintf (dump_file, " into ");
9943 gimple_cond_set_code (stmt, NE_EXPR);
9944 gimple_cond_set_lhs (stmt, op0);
9945 gimple_cond_set_rhs (stmt, new_tree);
9947 update_stmt (stmt);
9949 if (dump_file)
9951 print_gimple_stmt (dump_file, stmt, 0, 0);
9952 fprintf (dump_file, "\n");
9955 if (sop && issue_strict_overflow_warning (wc))
9957 location_t location = input_location;
9958 if (gimple_has_location (stmt))
9959 location = gimple_location (stmt);
9961 warning_at (location, OPT_Wstrict_overflow,
9962 "assuming signed overflow does not occur when "
9963 "simplifying conditional");
9966 return true;
9971 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9972 see if OP0 was set by a type conversion where the source of
9973 the conversion is another SSA_NAME with a range that fits
9974 into the range of OP0's type.
9976 If so, the conversion is redundant as the earlier SSA_NAME can be
9977 used for the comparison directly if we just massage the constant in the
9978 comparison. */
9979 if (TREE_CODE (op0) == SSA_NAME
9980 && TREE_CODE (op1) == INTEGER_CST)
9982 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9983 tree innerop;
9985 if (!is_gimple_assign (def_stmt)
9986 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9987 return false;
9989 innerop = gimple_assign_rhs1 (def_stmt);
9991 if (TREE_CODE (innerop) == SSA_NAME
9992 && !POINTER_TYPE_P (TREE_TYPE (innerop))
9993 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
9994 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
9996 value_range *vr = get_value_range (innerop);
9998 if (range_int_cst_p (vr)
9999 && range_fits_type_p (vr,
10000 TYPE_PRECISION (TREE_TYPE (op0)),
10001 TYPE_SIGN (TREE_TYPE (op0)))
10002 && int_fits_type_p (op1, TREE_TYPE (innerop))
10003 /* The range must not have overflowed, or if it did overflow
10004 we must not be wrapping/trapping overflow and optimizing
10005 with strict overflow semantics. */
10006 && ((!is_negative_overflow_infinity (vr->min)
10007 && !is_positive_overflow_infinity (vr->max))
10008 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
10010 /* If the range overflowed and the user has asked for warnings
10011 when strict overflow semantics were used to optimize code,
10012 issue an appropriate warning. */
10013 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
10014 && (is_negative_overflow_infinity (vr->min)
10015 || is_positive_overflow_infinity (vr->max))
10016 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
10018 location_t location;
10020 if (!gimple_has_location (stmt))
10021 location = input_location;
10022 else
10023 location = gimple_location (stmt);
10024 warning_at (location, OPT_Wstrict_overflow,
10025 "assuming signed overflow does not occur when "
10026 "simplifying conditional");
10029 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
10030 gimple_cond_set_lhs (stmt, innerop);
10031 gimple_cond_set_rhs (stmt, newconst);
10032 return true;
10037 return false;
10040 /* Simplify a switch statement using the value range of the switch
10041 argument. */
10043 static bool
10044 simplify_switch_using_ranges (gswitch *stmt)
10046 tree op = gimple_switch_index (stmt);
10047 value_range *vr = NULL;
10048 bool take_default;
10049 edge e;
10050 edge_iterator ei;
10051 size_t i = 0, j = 0, n, n2;
10052 tree vec2;
10053 switch_update su;
10054 size_t k = 1, l = 0;
10056 if (TREE_CODE (op) == SSA_NAME)
10058 vr = get_value_range (op);
10060 /* We can only handle integer ranges. */
10061 if ((vr->type != VR_RANGE
10062 && vr->type != VR_ANTI_RANGE)
10063 || symbolic_range_p (vr))
10064 return false;
10066 /* Find case label for min/max of the value range. */
10067 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
10069 else if (TREE_CODE (op) == INTEGER_CST)
10071 take_default = !find_case_label_index (stmt, 1, op, &i);
10072 if (take_default)
10074 i = 1;
10075 j = 0;
10077 else
10079 j = i;
10082 else
10083 return false;
10085 n = gimple_switch_num_labels (stmt);
10087 /* We can truncate the case label ranges that partially overlap with OP's
10088 value range. */
10089 size_t min_idx = 1, max_idx = 0;
10090 if (vr != NULL)
10091 find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
10092 if (min_idx <= max_idx)
10094 tree min_label = gimple_switch_label (stmt, min_idx);
10095 tree max_label = gimple_switch_label (stmt, max_idx);
10097 /* Avoid changing the type of the case labels when truncating. */
10098 tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
10099 tree vr_min = fold_convert (case_label_type, vr->min);
10100 tree vr_max = fold_convert (case_label_type, vr->max);
10102 if (vr->type == VR_RANGE)
10104 /* If OP's value range is [2,8] and the low label range is
10105 0 ... 3, truncate the label's range to 2 .. 3. */
10106 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10107 && CASE_HIGH (min_label) != NULL_TREE
10108 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
10109 CASE_LOW (min_label) = vr_min;
10111 /* If OP's value range is [2,8] and the high label range is
10112 7 ... 10, truncate the label's range to 7 .. 8. */
10113 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
10114 && CASE_HIGH (max_label) != NULL_TREE
10115 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
10116 CASE_HIGH (max_label) = vr_max;
10118 else if (vr->type == VR_ANTI_RANGE)
10120 tree one_cst = build_one_cst (case_label_type);
10122 if (min_label == max_label)
10124 /* If OP's value range is ~[7,8] and the label's range is
10125 7 ... 10, truncate the label's range to 9 ... 10. */
10126 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
10127 && CASE_HIGH (min_label) != NULL_TREE
10128 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
10129 CASE_LOW (min_label)
10130 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
10132 /* If OP's value range is ~[7,8] and the label's range is
10133 5 ... 8, truncate the label's range to 5 ... 6. */
10134 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10135 && CASE_HIGH (min_label) != NULL_TREE
10136 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
10137 CASE_HIGH (min_label)
10138 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
10140 else
10142 /* If OP's value range is ~[2,8] and the low label range is
10143 0 ... 3, truncate the label's range to 0 ... 1. */
10144 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10145 && CASE_HIGH (min_label) != NULL_TREE
10146 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
10147 CASE_HIGH (min_label)
10148 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
10150 /* If OP's value range is ~[2,8] and the high label range is
10151 7 ... 10, truncate the label's range to 9 ... 10. */
10152 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
10153 && CASE_HIGH (max_label) != NULL_TREE
10154 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
10155 CASE_LOW (max_label)
10156 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
10160 /* Canonicalize singleton case ranges. */
10161 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
10162 CASE_HIGH (min_label) = NULL_TREE;
10163 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
10164 CASE_HIGH (max_label) = NULL_TREE;
10167 /* We can also eliminate case labels that lie completely outside OP's value
10168 range. */
10170 /* Bail out if this is just all edges taken. */
10171 if (i == 1
10172 && j == n - 1
10173 && take_default)
10174 return false;
10176 /* Build a new vector of taken case labels. */
10177 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
10178 n2 = 0;
10180 /* Add the default edge, if necessary. */
10181 if (take_default)
10182 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
10184 for (; i <= j; ++i, ++n2)
10185 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
10187 for (; k <= l; ++k, ++n2)
10188 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
10190 /* Mark needed edges. */
10191 for (i = 0; i < n2; ++i)
10193 e = find_edge (gimple_bb (stmt),
10194 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
10195 e->aux = (void *)-1;
10198 /* Queue not needed edges for later removal. */
10199 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
10201 if (e->aux == (void *)-1)
10203 e->aux = NULL;
10204 continue;
10207 if (dump_file && (dump_flags & TDF_DETAILS))
10209 fprintf (dump_file, "removing unreachable case label\n");
10211 to_remove_edges.safe_push (e);
10212 e->flags &= ~EDGE_EXECUTABLE;
10215 /* And queue an update for the stmt. */
10216 su.stmt = stmt;
10217 su.vec = vec2;
10218 to_update_switch_stmts.safe_push (su);
10219 return false;
10222 /* Simplify an integral conversion from an SSA name in STMT. */
10224 static bool
10225 simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10227 tree innerop, middleop, finaltype;
10228 gimple *def_stmt;
10229 signop inner_sgn, middle_sgn, final_sgn;
10230 unsigned inner_prec, middle_prec, final_prec;
10231 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
10233 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
10234 if (!INTEGRAL_TYPE_P (finaltype))
10235 return false;
10236 middleop = gimple_assign_rhs1 (stmt);
10237 def_stmt = SSA_NAME_DEF_STMT (middleop);
10238 if (!is_gimple_assign (def_stmt)
10239 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
10240 return false;
10241 innerop = gimple_assign_rhs1 (def_stmt);
10242 if (TREE_CODE (innerop) != SSA_NAME
10243 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
10244 return false;
10246 /* Get the value-range of the inner operand. Use get_range_info in
10247 case innerop was created during substitute-and-fold. */
10248 wide_int imin, imax;
10249 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
10250 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
10251 return false;
10252 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
10253 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
10255 /* Simulate the conversion chain to check if the result is equal if
10256 the middle conversion is removed. */
10257 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
10258 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
10259 final_prec = TYPE_PRECISION (finaltype);
10261 /* If the first conversion is not injective, the second must not
10262 be widening. */
10263 if (wi::gtu_p (innermax - innermin,
10264 wi::mask <widest_int> (middle_prec, false))
10265 && middle_prec < final_prec)
10266 return false;
10267 /* We also want a medium value so that we can track the effect that
10268 narrowing conversions with sign change have. */
10269 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
10270 if (inner_sgn == UNSIGNED)
10271 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
10272 else
10273 innermed = 0;
10274 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
10275 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
10276 innermed = innermin;
10278 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
10279 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
10280 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
10281 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
10283 /* Require that the final conversion applied to both the original
10284 and the intermediate range produces the same result. */
10285 final_sgn = TYPE_SIGN (finaltype);
10286 if (wi::ext (middlemin, final_prec, final_sgn)
10287 != wi::ext (innermin, final_prec, final_sgn)
10288 || wi::ext (middlemed, final_prec, final_sgn)
10289 != wi::ext (innermed, final_prec, final_sgn)
10290 || wi::ext (middlemax, final_prec, final_sgn)
10291 != wi::ext (innermax, final_prec, final_sgn))
10292 return false;
10294 gimple_assign_set_rhs1 (stmt, innerop);
10295 fold_stmt (gsi, follow_single_use_edges);
10296 return true;
10299 /* Simplify a conversion from integral SSA name to float in STMT. */
10301 static bool
10302 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
10303 gimple *stmt)
10305 tree rhs1 = gimple_assign_rhs1 (stmt);
10306 value_range *vr = get_value_range (rhs1);
10307 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
10308 machine_mode mode;
10309 tree tem;
10310 gassign *conv;
10312 /* We can only handle constant ranges. */
10313 if (vr->type != VR_RANGE
10314 || TREE_CODE (vr->min) != INTEGER_CST
10315 || TREE_CODE (vr->max) != INTEGER_CST)
10316 return false;
10318 /* First check if we can use a signed type in place of an unsigned. */
10319 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
10320 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
10321 != CODE_FOR_nothing)
10322 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
10323 mode = TYPE_MODE (TREE_TYPE (rhs1));
10324 /* If we can do the conversion in the current input mode do nothing. */
10325 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
10326 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
10327 return false;
10328 /* Otherwise search for a mode we can use, starting from the narrowest
10329 integer mode available. */
10330 else
10332 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
10335 /* If we cannot do a signed conversion to float from mode
10336 or if the value-range does not fit in the signed type
10337 try with a wider mode. */
10338 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
10339 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
10340 break;
10342 mode = GET_MODE_WIDER_MODE (mode);
10343 /* But do not widen the input. Instead leave that to the
10344 optabs expansion code. */
10345 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
10346 return false;
10348 while (mode != VOIDmode);
10349 if (mode == VOIDmode)
10350 return false;
10353 /* It works, insert a truncation or sign-change before the
10354 float conversion. */
10355 tem = make_ssa_name (build_nonstandard_integer_type
10356 (GET_MODE_PRECISION (mode), 0));
10357 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
10358 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
10359 gimple_assign_set_rhs1 (stmt, tem);
10360 fold_stmt (gsi, follow_single_use_edges);
10362 return true;
10365 /* Simplify an internal fn call using ranges if possible. */
10367 static bool
10368 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10370 enum tree_code subcode;
10371 bool is_ubsan = false;
10372 bool ovf = false;
10373 switch (gimple_call_internal_fn (stmt))
10375 case IFN_UBSAN_CHECK_ADD:
10376 subcode = PLUS_EXPR;
10377 is_ubsan = true;
10378 break;
10379 case IFN_UBSAN_CHECK_SUB:
10380 subcode = MINUS_EXPR;
10381 is_ubsan = true;
10382 break;
10383 case IFN_UBSAN_CHECK_MUL:
10384 subcode = MULT_EXPR;
10385 is_ubsan = true;
10386 break;
10387 case IFN_ADD_OVERFLOW:
10388 subcode = PLUS_EXPR;
10389 break;
10390 case IFN_SUB_OVERFLOW:
10391 subcode = MINUS_EXPR;
10392 break;
10393 case IFN_MUL_OVERFLOW:
10394 subcode = MULT_EXPR;
10395 break;
10396 default:
10397 return false;
10400 tree op0 = gimple_call_arg (stmt, 0);
10401 tree op1 = gimple_call_arg (stmt, 1);
10402 tree type;
10403 if (is_ubsan)
10405 type = TREE_TYPE (op0);
10406 if (VECTOR_TYPE_P (type))
10407 return false;
10409 else if (gimple_call_lhs (stmt) == NULL_TREE)
10410 return false;
10411 else
10412 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
10413 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
10414 || (is_ubsan && ovf))
10415 return false;
10417 gimple *g;
10418 location_t loc = gimple_location (stmt);
10419 if (is_ubsan)
10420 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
10421 else
10423 int prec = TYPE_PRECISION (type);
10424 tree utype = type;
10425 if (ovf
10426 || !useless_type_conversion_p (type, TREE_TYPE (op0))
10427 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
10428 utype = build_nonstandard_integer_type (prec, 1);
10429 if (TREE_CODE (op0) == INTEGER_CST)
10430 op0 = fold_convert (utype, op0);
10431 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
10433 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
10434 gimple_set_location (g, loc);
10435 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10436 op0 = gimple_assign_lhs (g);
10438 if (TREE_CODE (op1) == INTEGER_CST)
10439 op1 = fold_convert (utype, op1);
10440 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
10442 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
10443 gimple_set_location (g, loc);
10444 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10445 op1 = gimple_assign_lhs (g);
10447 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
10448 gimple_set_location (g, loc);
10449 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10450 if (utype != type)
10452 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
10453 gimple_assign_lhs (g));
10454 gimple_set_location (g, loc);
10455 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10457 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
10458 gimple_assign_lhs (g),
10459 build_int_cst (type, ovf));
10461 gimple_set_location (g, loc);
10462 gsi_replace (gsi, g, false);
10463 return true;
10466 /* Return true if VAR is a two-valued variable. Set a and b with the
10467 two-values when it is true. Return false otherwise. */
10469 static bool
10470 two_valued_val_range_p (tree var, tree *a, tree *b)
10472 value_range *vr = get_value_range (var);
10473 if ((vr->type != VR_RANGE
10474 && vr->type != VR_ANTI_RANGE)
10475 || TREE_CODE (vr->min) != INTEGER_CST
10476 || TREE_CODE (vr->max) != INTEGER_CST)
10477 return false;
10479 if (vr->type == VR_RANGE
10480 && wi::sub (vr->max, vr->min) == 1)
10482 *a = vr->min;
10483 *b = vr->max;
10484 return true;
10487 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
10488 if (vr->type == VR_ANTI_RANGE
10489 && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
10490 && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
10492 *a = vrp_val_min (TREE_TYPE (var));
10493 *b = vrp_val_max (TREE_TYPE (var));
10494 return true;
10497 return false;
10500 /* Simplify STMT using ranges if possible. */
10502 static bool
10503 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
10505 gimple *stmt = gsi_stmt (*gsi);
10506 if (is_gimple_assign (stmt))
10508 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
10509 tree rhs1 = gimple_assign_rhs1 (stmt);
10510 tree rhs2 = gimple_assign_rhs2 (stmt);
10511 tree lhs = gimple_assign_lhs (stmt);
10512 tree val1 = NULL_TREE, val2 = NULL_TREE;
10513 use_operand_p use_p;
10514 gimple *use_stmt;
10516 /* Convert:
10517 LHS = CST BINOP VAR
10518 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10520 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
10522 Also handles:
10523 LHS = VAR BINOP CST
10524 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10526 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
10528 if (TREE_CODE_CLASS (rhs_code) == tcc_binary
10529 && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10530 && ((TREE_CODE (rhs1) == INTEGER_CST
10531 && TREE_CODE (rhs2) == SSA_NAME)
10532 || (TREE_CODE (rhs2) == INTEGER_CST
10533 && TREE_CODE (rhs1) == SSA_NAME))
10534 && single_imm_use (lhs, &use_p, &use_stmt)
10535 && gimple_code (use_stmt) == GIMPLE_COND)
10538 tree new_rhs1 = NULL_TREE;
10539 tree new_rhs2 = NULL_TREE;
10540 tree cmp_var = NULL_TREE;
10542 if (TREE_CODE (rhs2) == SSA_NAME
10543 && two_valued_val_range_p (rhs2, &val1, &val2))
10545 /* Optimize RHS1 OP [VAL1, VAL2]. */
10546 new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
10547 new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
10548 cmp_var = rhs2;
10550 else if (TREE_CODE (rhs1) == SSA_NAME
10551 && two_valued_val_range_p (rhs1, &val1, &val2))
10553 /* Optimize [VAL1, VAL2] OP RHS2. */
10554 new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
10555 new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
10556 cmp_var = rhs1;
10559 /* If we could not find two-vals or the optimzation is invalid as
10560 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
10561 if (new_rhs1 && new_rhs2)
10563 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
10564 gimple_assign_set_rhs_with_ops (gsi,
10565 COND_EXPR, cond,
10566 new_rhs1,
10567 new_rhs2);
10568 update_stmt (gsi_stmt (*gsi));
10569 fold_stmt (gsi, follow_single_use_edges);
10570 return true;
10574 switch (rhs_code)
10576 case EQ_EXPR:
10577 case NE_EXPR:
10578 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
10579 if the RHS is zero or one, and the LHS are known to be boolean
10580 values. */
10581 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10582 return simplify_truth_ops_using_ranges (gsi, stmt);
10583 break;
10585 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
10586 and BIT_AND_EXPR respectively if the first operand is greater
10587 than zero and the second operand is an exact power of two.
10588 Also optimize TRUNC_MOD_EXPR away if the second operand is
10589 constant and the first operand already has the right value
10590 range. */
10591 case TRUNC_DIV_EXPR:
10592 case TRUNC_MOD_EXPR:
10593 if ((TREE_CODE (rhs1) == SSA_NAME
10594 || TREE_CODE (rhs1) == INTEGER_CST)
10595 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10596 return simplify_div_or_mod_using_ranges (gsi, stmt);
10597 break;
10599 /* Transform ABS (X) into X or -X as appropriate. */
10600 case ABS_EXPR:
10601 if (TREE_CODE (rhs1) == SSA_NAME
10602 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10603 return simplify_abs_using_ranges (gsi, stmt);
10604 break;
10606 case BIT_AND_EXPR:
10607 case BIT_IOR_EXPR:
10608 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
10609 if all the bits being cleared are already cleared or
10610 all the bits being set are already set. */
10611 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10612 return simplify_bit_ops_using_ranges (gsi, stmt);
10613 break;
10615 CASE_CONVERT:
10616 if (TREE_CODE (rhs1) == SSA_NAME
10617 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10618 return simplify_conversion_using_ranges (gsi, stmt);
10619 break;
10621 case FLOAT_EXPR:
10622 if (TREE_CODE (rhs1) == SSA_NAME
10623 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10624 return simplify_float_conversion_using_ranges (gsi, stmt);
10625 break;
10627 case MIN_EXPR:
10628 case MAX_EXPR:
10629 return simplify_min_or_max_using_ranges (gsi, stmt);
10631 default:
10632 break;
10635 else if (gimple_code (stmt) == GIMPLE_COND)
10636 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
10637 else if (gimple_code (stmt) == GIMPLE_SWITCH)
10638 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
10639 else if (is_gimple_call (stmt)
10640 && gimple_call_internal_p (stmt))
10641 return simplify_internal_call_using_ranges (gsi, stmt);
10643 return false;
10646 /* If the statement pointed by SI has a predicate whose value can be
10647 computed using the value range information computed by VRP, compute
10648 its value and return true. Otherwise, return false. */
10650 static bool
10651 fold_predicate_in (gimple_stmt_iterator *si)
10653 bool assignment_p = false;
10654 tree val;
10655 gimple *stmt = gsi_stmt (*si);
10657 if (is_gimple_assign (stmt)
10658 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10660 assignment_p = true;
10661 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10662 gimple_assign_rhs1 (stmt),
10663 gimple_assign_rhs2 (stmt),
10664 stmt);
10666 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10667 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10668 gimple_cond_lhs (cond_stmt),
10669 gimple_cond_rhs (cond_stmt),
10670 stmt);
10671 else
10672 return false;
10674 if (val)
10676 if (assignment_p)
10677 val = fold_convert (gimple_expr_type (stmt), val);
10679 if (dump_file)
10681 fprintf (dump_file, "Folding predicate ");
10682 print_gimple_expr (dump_file, stmt, 0, 0);
10683 fprintf (dump_file, " to ");
10684 print_generic_expr (dump_file, val, 0);
10685 fprintf (dump_file, "\n");
10688 if (is_gimple_assign (stmt))
10689 gimple_assign_set_rhs_from_tree (si, val);
10690 else
10692 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10693 gcond *cond_stmt = as_a <gcond *> (stmt);
10694 if (integer_zerop (val))
10695 gimple_cond_make_false (cond_stmt);
10696 else if (integer_onep (val))
10697 gimple_cond_make_true (cond_stmt);
10698 else
10699 gcc_unreachable ();
10702 return true;
10705 return false;
10708 /* Callback for substitute_and_fold folding the stmt at *SI. */
10710 static bool
10711 vrp_fold_stmt (gimple_stmt_iterator *si)
10713 if (fold_predicate_in (si))
10714 return true;
10716 return simplify_stmt_using_ranges (si);
10719 /* Unwindable const/copy equivalences. */
10720 const_and_copies *equiv_stack;
10722 /* A trivial wrapper so that we can present the generic jump threading
10723 code with a simple API for simplifying statements. STMT is the
10724 statement we want to simplify, WITHIN_STMT provides the location
10725 for any overflow warnings. */
10727 static tree
10728 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10729 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10731 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10732 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10733 gimple_cond_lhs (cond_stmt),
10734 gimple_cond_rhs (cond_stmt),
10735 within_stmt);
10737 /* We simplify a switch statement by trying to determine which case label
10738 will be taken. If we are successful then we return the corresponding
10739 CASE_LABEL_EXPR. */
10740 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
10742 tree op = gimple_switch_index (switch_stmt);
10743 if (TREE_CODE (op) != SSA_NAME)
10744 return NULL_TREE;
10746 value_range *vr = get_value_range (op);
10747 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
10748 || symbolic_range_p (vr))
10749 return NULL_TREE;
10751 if (vr->type == VR_RANGE)
10753 size_t i, j;
10754 /* Get the range of labels that contain a part of the operand's
10755 value range. */
10756 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
10758 /* Is there only one such label? */
10759 if (i == j)
10761 tree label = gimple_switch_label (switch_stmt, i);
10763 /* The i'th label will be taken only if the value range of the
10764 operand is entirely within the bounds of this label. */
10765 if (CASE_HIGH (label) != NULL_TREE
10766 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
10767 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
10768 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
10769 && tree_int_cst_equal (vr->min, vr->max)))
10770 return label;
10773 /* If there are no such labels then the default label will be
10774 taken. */
10775 if (i > j)
10776 return gimple_switch_label (switch_stmt, 0);
10779 if (vr->type == VR_ANTI_RANGE)
10781 unsigned n = gimple_switch_num_labels (switch_stmt);
10782 tree min_label = gimple_switch_label (switch_stmt, 1);
10783 tree max_label = gimple_switch_label (switch_stmt, n - 1);
10785 /* The default label will be taken only if the anti-range of the
10786 operand is entirely outside the bounds of all the (non-default)
10787 case labels. */
10788 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
10789 && (CASE_HIGH (max_label) != NULL_TREE
10790 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
10791 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
10792 return gimple_switch_label (switch_stmt, 0);
10795 return NULL_TREE;
10798 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10800 value_range new_vr = VR_INITIALIZER;
10801 tree lhs = gimple_assign_lhs (assign_stmt);
10803 if (TREE_CODE (lhs) == SSA_NAME
10804 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10805 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10807 extract_range_from_assignment (&new_vr, assign_stmt);
10808 if (range_int_cst_singleton_p (&new_vr))
10809 return new_vr.min;
10813 return NULL_TREE;
10816 /* Blocks which have more than one predecessor and more than
10817 one successor present jump threading opportunities, i.e.,
10818 when the block is reached from a specific predecessor, we
10819 may be able to determine which of the outgoing edges will
10820 be traversed. When this optimization applies, we are able
10821 to avoid conditionals at runtime and we may expose secondary
10822 optimization opportunities.
10824 This routine is effectively a driver for the generic jump
10825 threading code. It basically just presents the generic code
10826 with edges that may be suitable for jump threading.
10828 Unlike DOM, we do not iterate VRP if jump threading was successful.
10829 While iterating may expose new opportunities for VRP, it is expected
10830 those opportunities would be very limited and the compile time cost
10831 to expose those opportunities would be significant.
10833 As jump threading opportunities are discovered, they are registered
10834 for later realization. */
10836 static void
10837 identify_jump_threads (void)
10839 basic_block bb;
10840 gcond *dummy;
10841 int i;
10842 edge e;
10844 /* Ugh. When substituting values earlier in this pass we can
10845 wipe the dominance information. So rebuild the dominator
10846 information as we need it within the jump threading code. */
10847 calculate_dominance_info (CDI_DOMINATORS);
10849 /* We do not allow VRP information to be used for jump threading
10850 across a back edge in the CFG. Otherwise it becomes too
10851 difficult to avoid eliminating loop exit tests. Of course
10852 EDGE_DFS_BACK is not accurate at this time so we have to
10853 recompute it. */
10854 mark_dfs_back_edges ();
10856 /* Do not thread across edges we are about to remove. Just marking
10857 them as EDGE_IGNORE will do. */
10858 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10859 e->flags |= EDGE_IGNORE;
10861 /* Allocate our unwinder stack to unwind any temporary equivalences
10862 that might be recorded. */
10863 equiv_stack = new const_and_copies ();
10865 /* To avoid lots of silly node creation, we create a single
10866 conditional and just modify it in-place when attempting to
10867 thread jumps. */
10868 dummy = gimple_build_cond (EQ_EXPR,
10869 integer_zero_node, integer_zero_node,
10870 NULL, NULL);
10872 /* Walk through all the blocks finding those which present a
10873 potential jump threading opportunity. We could set this up
10874 as a dominator walker and record data during the walk, but
10875 I doubt it's worth the effort for the classes of jump
10876 threading opportunities we are trying to identify at this
10877 point in compilation. */
10878 FOR_EACH_BB_FN (bb, cfun)
10880 gimple *last;
10882 /* If the generic jump threading code does not find this block
10883 interesting, then there is nothing to do. */
10884 if (! potentially_threadable_block (bb))
10885 continue;
10887 last = last_stmt (bb);
10889 /* We're basically looking for a switch or any kind of conditional with
10890 integral or pointer type arguments. Note the type of the second
10891 argument will be the same as the first argument, so no need to
10892 check it explicitly.
10894 We also handle the case where there are no statements in the
10895 block. This come up with forwarder blocks that are not
10896 optimized away because they lead to a loop header. But we do
10897 want to thread through them as we can sometimes thread to the
10898 loop exit which is obviously profitable. */
10899 if (!last
10900 || gimple_code (last) == GIMPLE_SWITCH
10901 || (gimple_code (last) == GIMPLE_COND
10902 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10903 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10904 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10905 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10906 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10908 edge_iterator ei;
10910 /* We've got a block with multiple predecessors and multiple
10911 successors which also ends in a suitable conditional or
10912 switch statement. For each predecessor, see if we can thread
10913 it to a specific successor. */
10914 FOR_EACH_EDGE (e, ei, bb->preds)
10916 /* Do not thread across edges marked to ignoreor abnormal
10917 edges in the CFG. */
10918 if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
10919 continue;
10921 thread_across_edge (dummy, e, true, equiv_stack, NULL,
10922 simplify_stmt_for_jump_threading);
10927 /* Clear EDGE_IGNORE. */
10928 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10929 e->flags &= ~EDGE_IGNORE;
10931 /* We do not actually update the CFG or SSA graphs at this point as
10932 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10933 handle ASSERT_EXPRs gracefully. */
10936 /* We identified all the jump threading opportunities earlier, but could
10937 not transform the CFG at that time. This routine transforms the
10938 CFG and arranges for the dominator tree to be rebuilt if necessary.
10940 Note the SSA graph update will occur during the normal TODO
10941 processing by the pass manager. */
10942 static void
10943 finalize_jump_threads (void)
10945 thread_through_all_blocks (false);
10946 delete equiv_stack;
10949 /* Free VRP lattice. */
10951 static void
10952 vrp_free_lattice ()
10954 /* Free allocated memory. */
10955 free (vr_value);
10956 free (vr_phi_edge_counts);
10957 bitmap_obstack_release (&vrp_equiv_obstack);
10958 vrp_value_range_pool.release ();
10960 /* So that we can distinguish between VRP data being available
10961 and not available. */
10962 vr_value = NULL;
10963 vr_phi_edge_counts = NULL;
10966 /* Traverse all the blocks folding conditionals with known ranges. */
10968 static void
10969 vrp_finalize (bool warn_array_bounds_p)
10971 size_t i;
10973 values_propagated = true;
10975 if (dump_file)
10977 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10978 dump_all_value_ranges (dump_file);
10979 fprintf (dump_file, "\n");
10982 /* Set value range to non pointer SSA_NAMEs. */
10983 for (i = 0; i < num_vr_values; i++)
10984 if (vr_value[i])
10986 tree name = ssa_name (i);
10988 if (!name
10989 || (vr_value[i]->type == VR_VARYING)
10990 || (vr_value[i]->type == VR_UNDEFINED)
10991 || (TREE_CODE (vr_value[i]->min) != INTEGER_CST)
10992 || (TREE_CODE (vr_value[i]->max) != INTEGER_CST))
10993 continue;
10995 if (POINTER_TYPE_P (TREE_TYPE (name))
10996 && ((vr_value[i]->type == VR_RANGE
10997 && range_includes_zero_p (vr_value[i]->min,
10998 vr_value[i]->max) == 0)
10999 || (vr_value[i]->type == VR_ANTI_RANGE
11000 && range_includes_zero_p (vr_value[i]->min,
11001 vr_value[i]->max) == 1)))
11002 set_ptr_nonnull (name);
11003 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
11004 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
11005 vr_value[i]->max);
11008 substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
11010 if (warn_array_bounds && warn_array_bounds_p)
11011 check_all_array_refs ();
11013 /* We must identify jump threading opportunities before we release
11014 the datastructures built by VRP. */
11015 identify_jump_threads ();
11018 /* evrp_dom_walker visits the basic blocks in the dominance order and set
11019 the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
11020 discover more VRs. */
11022 class evrp_dom_walker : public dom_walker
11024 public:
11025 evrp_dom_walker ()
11026 : dom_walker (CDI_DOMINATORS), stack (10)
11028 need_eh_cleanup = BITMAP_ALLOC (NULL);
11030 ~evrp_dom_walker ()
11032 BITMAP_FREE (need_eh_cleanup);
11034 virtual edge before_dom_children (basic_block);
11035 virtual void after_dom_children (basic_block);
11036 void push_value_range (tree var, value_range *vr);
11037 value_range *pop_value_range (tree var);
11038 value_range *try_find_new_range (tree op, tree_code code, tree limit);
11040 /* Cond_stack holds the old VR. */
11041 auto_vec<std::pair <tree, value_range*> > stack;
11042 bitmap need_eh_cleanup;
11043 auto_vec<gimple *> stmts_to_fixup;
11044 auto_vec<gimple *> stmts_to_remove;
11047 /* Find new range for OP such that (OP CODE LIMIT) is true. */
11049 value_range *
11050 evrp_dom_walker::try_find_new_range (tree op, tree_code code, tree limit)
11052 value_range vr = VR_INITIALIZER;
11053 value_range *old_vr = get_value_range (op);
11055 /* Discover VR when condition is true. */
11056 extract_range_for_var_from_comparison_expr (op, code, op,
11057 limit, &vr);
11058 if (old_vr->type == VR_RANGE || old_vr->type == VR_ANTI_RANGE)
11059 vrp_intersect_ranges (&vr, old_vr);
11060 /* If we found any usable VR, set the VR to ssa_name and create a
11061 PUSH old value in the stack with the old VR. */
11062 if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
11064 if (old_vr->type == vr.type
11065 && vrp_operand_equal_p (old_vr->min, vr.min)
11066 && vrp_operand_equal_p (old_vr->max, vr.max))
11067 return NULL;
11068 value_range *new_vr = vrp_value_range_pool.allocate ();
11069 *new_vr = vr;
11070 return new_vr;
11072 return NULL;
11075 /* See if there is any new scope is entered with new VR and set that VR to
11076 ssa_name before visiting the statements in the scope. */
11078 edge
11079 evrp_dom_walker::before_dom_children (basic_block bb)
11081 tree op0 = NULL_TREE;
11082 edge_iterator ei;
11083 edge e;
11085 if (dump_file && (dump_flags & TDF_DETAILS))
11086 fprintf (dump_file, "Visiting BB%d\n", bb->index);
11088 stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
11090 edge pred_e = NULL;
11091 FOR_EACH_EDGE (e, ei, bb->preds)
11093 /* Ignore simple backedges from this to allow recording conditions
11094 in loop headers. */
11095 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
11096 continue;
11097 if (! pred_e)
11098 pred_e = e;
11099 else
11101 pred_e = NULL;
11102 break;
11105 if (pred_e)
11107 gimple *stmt = last_stmt (pred_e->src);
11108 if (stmt
11109 && gimple_code (stmt) == GIMPLE_COND
11110 && (op0 = gimple_cond_lhs (stmt))
11111 && TREE_CODE (op0) == SSA_NAME
11112 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
11113 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
11115 if (dump_file && (dump_flags & TDF_DETAILS))
11117 fprintf (dump_file, "Visiting controlling predicate ");
11118 print_gimple_stmt (dump_file, stmt, 0, 0);
11120 /* Entering a new scope. Try to see if we can find a VR
11121 here. */
11122 tree op1 = gimple_cond_rhs (stmt);
11123 tree_code code = gimple_cond_code (stmt);
11125 if (TREE_OVERFLOW_P (op1))
11126 op1 = drop_tree_overflow (op1);
11128 /* If condition is false, invert the cond. */
11129 if (pred_e->flags & EDGE_FALSE_VALUE)
11130 code = invert_tree_comparison (gimple_cond_code (stmt),
11131 HONOR_NANS (op0));
11132 /* Add VR when (OP0 CODE OP1) condition is true. */
11133 value_range *op0_range = try_find_new_range (op0, code, op1);
11135 /* Register ranges for y in x < y where
11136 y might have ranges that are useful. */
11137 tree limit;
11138 tree_code new_code;
11139 if (TREE_CODE (op1) == SSA_NAME
11140 && extract_code_and_val_from_cond_with_ops (op1, code,
11141 op0, op1,
11142 false,
11143 &new_code, &limit))
11145 /* Add VR when (OP1 NEW_CODE LIMIT) condition is true. */
11146 value_range *op1_range = try_find_new_range (op1, new_code, limit);
11147 if (op1_range)
11148 push_value_range (op1, op1_range);
11151 if (op0_range)
11152 push_value_range (op0, op0_range);
11156 /* Visit PHI stmts and discover any new VRs possible. */
11157 bool has_unvisited_preds = false;
11158 FOR_EACH_EDGE (e, ei, bb->preds)
11159 if (e->flags & EDGE_EXECUTABLE
11160 && !(e->src->flags & BB_VISITED))
11162 has_unvisited_preds = true;
11163 break;
11166 for (gphi_iterator gpi = gsi_start_phis (bb);
11167 !gsi_end_p (gpi); gsi_next (&gpi))
11169 gphi *phi = gpi.phi ();
11170 tree lhs = PHI_RESULT (phi);
11171 if (virtual_operand_p (lhs))
11172 continue;
11173 value_range vr_result = VR_INITIALIZER;
11174 bool interesting = stmt_interesting_for_vrp (phi);
11175 if (interesting && dump_file && (dump_flags & TDF_DETAILS))
11177 fprintf (dump_file, "Visiting PHI node ");
11178 print_gimple_stmt (dump_file, phi, 0, 0);
11180 if (!has_unvisited_preds
11181 && interesting)
11182 extract_range_from_phi_node (phi, &vr_result);
11183 else
11185 set_value_range_to_varying (&vr_result);
11186 /* When we have an unvisited executable predecessor we can't
11187 use PHI arg ranges which may be still UNDEFINED but have
11188 to use VARYING for them. But we can still resort to
11189 SCEV for loop header PHIs. */
11190 struct loop *l;
11191 if (interesting
11192 && (l = loop_containing_stmt (phi))
11193 && l->header == gimple_bb (phi))
11194 adjust_range_with_scev (&vr_result, l, phi, lhs);
11196 update_value_range (lhs, &vr_result);
11198 /* Mark PHIs whose lhs we fully propagate for removal. */
11199 tree val = op_with_constant_singleton_value_range (lhs);
11200 if (val && may_propagate_copy (lhs, val))
11202 stmts_to_remove.safe_push (phi);
11203 continue;
11206 /* Set the SSA with the value range. */
11207 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
11209 if ((vr_result.type == VR_RANGE
11210 || vr_result.type == VR_ANTI_RANGE)
11211 && (TREE_CODE (vr_result.min) == INTEGER_CST)
11212 && (TREE_CODE (vr_result.max) == INTEGER_CST))
11213 set_range_info (lhs,
11214 vr_result.type, vr_result.min, vr_result.max);
11216 else if (POINTER_TYPE_P (TREE_TYPE (lhs))
11217 && ((vr_result.type == VR_RANGE
11218 && range_includes_zero_p (vr_result.min,
11219 vr_result.max) == 0)
11220 || (vr_result.type == VR_ANTI_RANGE
11221 && range_includes_zero_p (vr_result.min,
11222 vr_result.max) == 1)))
11223 set_ptr_nonnull (lhs);
11226 edge taken_edge = NULL;
11228 /* Visit all other stmts and discover any new VRs possible. */
11229 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
11230 !gsi_end_p (gsi); gsi_next (&gsi))
11232 gimple *stmt = gsi_stmt (gsi);
11233 tree output = NULL_TREE;
11234 gimple *old_stmt = stmt;
11235 bool was_noreturn = (is_gimple_call (stmt)
11236 && gimple_call_noreturn_p (stmt));
11238 if (dump_file && (dump_flags & TDF_DETAILS))
11240 fprintf (dump_file, "Visiting stmt ");
11241 print_gimple_stmt (dump_file, stmt, 0, 0);
11244 if (gcond *cond = dyn_cast <gcond *> (stmt))
11246 vrp_visit_cond_stmt (cond, &taken_edge);
11247 if (taken_edge)
11249 if (taken_edge->flags & EDGE_TRUE_VALUE)
11250 gimple_cond_make_true (cond);
11251 else if (taken_edge->flags & EDGE_FALSE_VALUE)
11252 gimple_cond_make_false (cond);
11253 else
11254 gcc_unreachable ();
11255 update_stmt (stmt);
11258 else if (stmt_interesting_for_vrp (stmt))
11260 edge taken_edge;
11261 value_range vr = VR_INITIALIZER;
11262 extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
11263 if (output
11264 && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
11266 update_value_range (output, &vr);
11267 vr = *get_value_range (output);
11269 /* Mark stmts whose output we fully propagate for removal. */
11270 tree val;
11271 if ((val = op_with_constant_singleton_value_range (output))
11272 && may_propagate_copy (output, val)
11273 && !stmt_could_throw_p (stmt)
11274 && !gimple_has_side_effects (stmt))
11276 stmts_to_remove.safe_push (stmt);
11277 continue;
11280 /* Set the SSA with the value range. */
11281 if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
11283 if ((vr.type == VR_RANGE
11284 || vr.type == VR_ANTI_RANGE)
11285 && (TREE_CODE (vr.min) == INTEGER_CST)
11286 && (TREE_CODE (vr.max) == INTEGER_CST))
11287 set_range_info (output, vr.type, vr.min, vr.max);
11289 else if (POINTER_TYPE_P (TREE_TYPE (output))
11290 && ((vr.type == VR_RANGE
11291 && range_includes_zero_p (vr.min,
11292 vr.max) == 0)
11293 || (vr.type == VR_ANTI_RANGE
11294 && range_includes_zero_p (vr.min,
11295 vr.max) == 1)))
11296 set_ptr_nonnull (output);
11298 else
11299 set_defs_to_varying (stmt);
11301 else
11302 set_defs_to_varying (stmt);
11304 /* See if we can derive a range for any of STMT's operands. */
11305 tree op;
11306 ssa_op_iter i;
11307 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
11309 tree value;
11310 enum tree_code comp_code;
11312 /* If OP is used in such a way that we can infer a value
11313 range for it, and we don't find a previous assertion for
11314 it, create a new assertion location node for OP. */
11315 if (infer_value_range (stmt, op, &comp_code, &value))
11317 /* If we are able to infer a nonzero value range for OP,
11318 then walk backwards through the use-def chain to see if OP
11319 was set via a typecast.
11320 If so, then we can also infer a nonzero value range
11321 for the operand of the NOP_EXPR. */
11322 if (comp_code == NE_EXPR && integer_zerop (value))
11324 tree t = op;
11325 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
11326 while (is_gimple_assign (def_stmt)
11327 && CONVERT_EXPR_CODE_P
11328 (gimple_assign_rhs_code (def_stmt))
11329 && TREE_CODE
11330 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
11331 && POINTER_TYPE_P
11332 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
11334 t = gimple_assign_rhs1 (def_stmt);
11335 def_stmt = SSA_NAME_DEF_STMT (t);
11337 /* Add VR when (T COMP_CODE value) condition is
11338 true. */
11339 value_range *op_range
11340 = try_find_new_range (t, comp_code, value);
11341 if (op_range)
11342 push_value_range (t, op_range);
11345 /* Add VR when (OP COMP_CODE value) condition is true. */
11346 value_range *op_range = try_find_new_range (op,
11347 comp_code, value);
11348 if (op_range)
11349 push_value_range (op, op_range);
11353 /* Try folding stmts with the VR discovered. */
11354 bool did_replace
11355 = replace_uses_in (stmt, op_with_constant_singleton_value_range);
11356 if (fold_stmt (&gsi, follow_single_use_edges)
11357 || did_replace)
11359 stmt = gsi_stmt (gsi);
11360 update_stmt (stmt);
11361 did_replace = true;
11364 if (did_replace)
11366 /* If we cleaned up EH information from the statement,
11367 remove EH edges. */
11368 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
11369 bitmap_set_bit (need_eh_cleanup, bb->index);
11371 /* If we turned a not noreturn call into a noreturn one
11372 schedule it for fixup. */
11373 if (!was_noreturn
11374 && is_gimple_call (stmt)
11375 && gimple_call_noreturn_p (stmt))
11376 stmts_to_fixup.safe_push (stmt);
11378 if (gimple_assign_single_p (stmt))
11380 tree rhs = gimple_assign_rhs1 (stmt);
11381 if (TREE_CODE (rhs) == ADDR_EXPR)
11382 recompute_tree_invariant_for_addr_expr (rhs);
11387 /* Visit BB successor PHI nodes and replace PHI args. */
11388 FOR_EACH_EDGE (e, ei, bb->succs)
11390 for (gphi_iterator gpi = gsi_start_phis (e->dest);
11391 !gsi_end_p (gpi); gsi_next (&gpi))
11393 gphi *phi = gpi.phi ();
11394 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
11395 tree arg = USE_FROM_PTR (use_p);
11396 if (TREE_CODE (arg) != SSA_NAME
11397 || virtual_operand_p (arg))
11398 continue;
11399 tree val = op_with_constant_singleton_value_range (arg);
11400 if (val && may_propagate_copy (arg, val))
11401 propagate_value (use_p, val);
11405 bb->flags |= BB_VISITED;
11407 return taken_edge;
11410 /* Restore/pop VRs valid only for BB when we leave BB. */
11412 void
11413 evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
11415 gcc_checking_assert (!stack.is_empty ());
11416 while (stack.last ().first != NULL_TREE)
11417 pop_value_range (stack.last ().first);
11418 stack.pop ();
11421 /* Push the Value Range of VAR to the stack and update it with new VR. */
11423 void
11424 evrp_dom_walker::push_value_range (tree var, value_range *vr)
11426 if (SSA_NAME_VERSION (var) >= num_vr_values)
11427 return;
11428 if (dump_file && (dump_flags & TDF_DETAILS))
11430 fprintf (dump_file, "pushing new range for ");
11431 print_generic_expr (dump_file, var, 0);
11432 fprintf (dump_file, ": ");
11433 dump_value_range (dump_file, vr);
11434 fprintf (dump_file, "\n");
11436 stack.safe_push (std::make_pair (var, get_value_range (var)));
11437 vr_value[SSA_NAME_VERSION (var)] = vr;
11440 /* Pop the Value Range from the vrp_stack and update VAR with it. */
11442 value_range *
11443 evrp_dom_walker::pop_value_range (tree var)
11445 value_range *vr = stack.last ().second;
11446 gcc_checking_assert (var == stack.last ().first);
11447 if (dump_file && (dump_flags & TDF_DETAILS))
11449 fprintf (dump_file, "popping range for ");
11450 print_generic_expr (dump_file, var, 0);
11451 fprintf (dump_file, ", restoring ");
11452 dump_value_range (dump_file, vr);
11453 fprintf (dump_file, "\n");
11455 vr_value[SSA_NAME_VERSION (var)] = vr;
11456 stack.pop ();
11457 return vr;
11461 /* Main entry point for the early vrp pass which is a simplified non-iterative
11462 version of vrp where basic blocks are visited in dominance order. Value
11463 ranges discovered in early vrp will also be used by ipa-vrp. */
11465 static unsigned int
11466 execute_early_vrp ()
11468 edge e;
11469 edge_iterator ei;
11470 basic_block bb;
11472 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11473 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11474 scev_initialize ();
11475 calculate_dominance_info (CDI_DOMINATORS);
11476 FOR_EACH_BB_FN (bb, cfun)
11478 bb->flags &= ~BB_VISITED;
11479 FOR_EACH_EDGE (e, ei, bb->preds)
11480 e->flags |= EDGE_EXECUTABLE;
11482 vrp_initialize_lattice ();
11484 /* Walk stmts in dominance order and propagate VRP. */
11485 evrp_dom_walker walker;
11486 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11488 if (dump_file)
11490 fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
11491 dump_all_value_ranges (dump_file);
11492 fprintf (dump_file, "\n");
11495 /* Remove stmts in reverse order to make debug stmt creation possible. */
11496 while (! walker.stmts_to_remove.is_empty ())
11498 gimple *stmt = walker.stmts_to_remove.pop ();
11499 if (dump_file && dump_flags & TDF_DETAILS)
11501 fprintf (dump_file, "Removing dead stmt ");
11502 print_gimple_stmt (dump_file, stmt, 0, 0);
11503 fprintf (dump_file, "\n");
11505 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
11506 if (gimple_code (stmt) == GIMPLE_PHI)
11507 remove_phi_node (&gsi, true);
11508 else
11510 unlink_stmt_vdef (stmt);
11511 gsi_remove (&gsi, true);
11512 release_defs (stmt);
11516 if (!bitmap_empty_p (walker.need_eh_cleanup))
11517 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
11519 /* Fixup stmts that became noreturn calls. This may require splitting
11520 blocks and thus isn't possible during the dominator walk. Do this
11521 in reverse order so we don't inadvertedly remove a stmt we want to
11522 fixup by visiting a dominating now noreturn call first. */
11523 while (!walker.stmts_to_fixup.is_empty ())
11525 gimple *stmt = walker.stmts_to_fixup.pop ();
11526 fixup_noreturn_call (stmt);
11529 vrp_free_lattice ();
11530 scev_finalize ();
11531 loop_optimizer_finalize ();
11532 return 0;
11536 /* Main entry point to VRP (Value Range Propagation). This pass is
11537 loosely based on J. R. C. Patterson, ``Accurate Static Branch
11538 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
11539 Programming Language Design and Implementation, pp. 67-78, 1995.
11540 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
11542 This is essentially an SSA-CCP pass modified to deal with ranges
11543 instead of constants.
11545 While propagating ranges, we may find that two or more SSA name
11546 have equivalent, though distinct ranges. For instance,
11548 1 x_9 = p_3->a;
11549 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
11550 3 if (p_4 == q_2)
11551 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
11552 5 endif
11553 6 if (q_2)
11555 In the code above, pointer p_5 has range [q_2, q_2], but from the
11556 code we can also determine that p_5 cannot be NULL and, if q_2 had
11557 a non-varying range, p_5's range should also be compatible with it.
11559 These equivalences are created by two expressions: ASSERT_EXPR and
11560 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
11561 result of another assertion, then we can use the fact that p_5 and
11562 p_4 are equivalent when evaluating p_5's range.
11564 Together with value ranges, we also propagate these equivalences
11565 between names so that we can take advantage of information from
11566 multiple ranges when doing final replacement. Note that this
11567 equivalency relation is transitive but not symmetric.
11569 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
11570 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
11571 in contexts where that assertion does not hold (e.g., in line 6).
11573 TODO, the main difference between this pass and Patterson's is that
11574 we do not propagate edge probabilities. We only compute whether
11575 edges can be taken or not. That is, instead of having a spectrum
11576 of jump probabilities between 0 and 1, we only deal with 0, 1 and
11577 DON'T KNOW. In the future, it may be worthwhile to propagate
11578 probabilities to aid branch prediction. */
11580 static unsigned int
11581 execute_vrp (bool warn_array_bounds_p)
11583 int i;
11584 edge e;
11585 switch_update *su;
11587 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11588 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11589 scev_initialize ();
11591 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
11592 Inserting assertions may split edges which will invalidate
11593 EDGE_DFS_BACK. */
11594 insert_range_assertions ();
11596 to_remove_edges.create (10);
11597 to_update_switch_stmts.create (5);
11598 threadedge_initialize_values ();
11600 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
11601 mark_dfs_back_edges ();
11603 vrp_initialize_lattice ();
11604 vrp_initialize ();
11605 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
11606 vrp_finalize (warn_array_bounds_p);
11607 vrp_free_lattice ();
11609 free_numbers_of_iterations_estimates (cfun);
11611 /* ASSERT_EXPRs must be removed before finalizing jump threads
11612 as finalizing jump threads calls the CFG cleanup code which
11613 does not properly handle ASSERT_EXPRs. */
11614 remove_range_assertions ();
11616 /* If we exposed any new variables, go ahead and put them into
11617 SSA form now, before we handle jump threading. This simplifies
11618 interactions between rewriting of _DECL nodes into SSA form
11619 and rewriting SSA_NAME nodes into SSA form after block
11620 duplication and CFG manipulation. */
11621 update_ssa (TODO_update_ssa);
11623 finalize_jump_threads ();
11625 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
11626 CFG in a broken state and requires a cfg_cleanup run. */
11627 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
11628 remove_edge (e);
11629 /* Update SWITCH_EXPR case label vector. */
11630 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
11632 size_t j;
11633 size_t n = TREE_VEC_LENGTH (su->vec);
11634 tree label;
11635 gimple_switch_set_num_labels (su->stmt, n);
11636 for (j = 0; j < n; j++)
11637 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
11638 /* As we may have replaced the default label with a regular one
11639 make sure to make it a real default label again. This ensures
11640 optimal expansion. */
11641 label = gimple_switch_label (su->stmt, 0);
11642 CASE_LOW (label) = NULL_TREE;
11643 CASE_HIGH (label) = NULL_TREE;
11646 if (to_remove_edges.length () > 0)
11648 free_dominance_info (CDI_DOMINATORS);
11649 loops_state_set (LOOPS_NEED_FIXUP);
11652 to_remove_edges.release ();
11653 to_update_switch_stmts.release ();
11654 threadedge_finalize_values ();
11656 scev_finalize ();
11657 loop_optimizer_finalize ();
11658 return 0;
11661 namespace {
11663 const pass_data pass_data_vrp =
11665 GIMPLE_PASS, /* type */
11666 "vrp", /* name */
11667 OPTGROUP_NONE, /* optinfo_flags */
11668 TV_TREE_VRP, /* tv_id */
11669 PROP_ssa, /* properties_required */
11670 0, /* properties_provided */
11671 0, /* properties_destroyed */
11672 0, /* todo_flags_start */
11673 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
11676 class pass_vrp : public gimple_opt_pass
11678 public:
11679 pass_vrp (gcc::context *ctxt)
11680 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
11683 /* opt_pass methods: */
11684 opt_pass * clone () { return new pass_vrp (m_ctxt); }
11685 void set_pass_param (unsigned int n, bool param)
11687 gcc_assert (n == 0);
11688 warn_array_bounds_p = param;
11690 virtual bool gate (function *) { return flag_tree_vrp != 0; }
11691 virtual unsigned int execute (function *)
11692 { return execute_vrp (warn_array_bounds_p); }
11694 private:
11695 bool warn_array_bounds_p;
11696 }; // class pass_vrp
11698 } // anon namespace
11700 gimple_opt_pass *
11701 make_pass_vrp (gcc::context *ctxt)
11703 return new pass_vrp (ctxt);
11706 namespace {
11708 const pass_data pass_data_early_vrp =
11710 GIMPLE_PASS, /* type */
11711 "evrp", /* name */
11712 OPTGROUP_NONE, /* optinfo_flags */
11713 TV_TREE_EARLY_VRP, /* tv_id */
11714 PROP_ssa, /* properties_required */
11715 0, /* properties_provided */
11716 0, /* properties_destroyed */
11717 0, /* todo_flags_start */
11718 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
11721 class pass_early_vrp : public gimple_opt_pass
11723 public:
11724 pass_early_vrp (gcc::context *ctxt)
11725 : gimple_opt_pass (pass_data_early_vrp, ctxt)
11728 /* opt_pass methods: */
11729 opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
11730 virtual bool gate (function *)
11732 return flag_tree_vrp != 0;
11734 virtual unsigned int execute (function *)
11735 { return execute_early_vrp (); }
11737 }; // class pass_vrp
11738 } // anon namespace
11740 gimple_opt_pass *
11741 make_pass_early_vrp (gcc::context *ctxt)
11743 return new pass_early_vrp (ctxt);