[AArch64] PR target/79913: VEC_SELECT bugs in aarch64 patterns
[official-gcc.git] / gcc / tree-vrp.c
blob6420041fd1381568df8713cbfe7770ee6e83e99d
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-general.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 #include "params.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
66 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
68 /* Allocation pools for tree-vrp allocations. */
69 static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges");
70 static bitmap_obstack vrp_equiv_obstack;
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Local functions. */
86 static int compare_values (tree val1, tree val2);
87 static int compare_values_warnv (tree val1, tree val2, bool *);
88 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
89 tree, tree, bool, bool *,
90 bool *);
92 /* Location information for ASSERT_EXPRs. Each instance of this
93 structure describes an ASSERT_EXPR for an SSA name. Since a single
94 SSA name may have more than one assertion associated with it, these
95 locations are kept in a linked list attached to the corresponding
96 SSA name. */
97 struct assert_locus
99 /* Basic block where the assertion would be inserted. */
100 basic_block bb;
102 /* Some assertions need to be inserted on an edge (e.g., assertions
103 generated by COND_EXPRs). In those cases, BB will be NULL. */
104 edge e;
106 /* Pointer to the statement that generated this assertion. */
107 gimple_stmt_iterator si;
109 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
110 enum tree_code comp_code;
112 /* Value being compared against. */
113 tree val;
115 /* Expression to compare. */
116 tree expr;
118 /* Next node in the linked list. */
119 assert_locus *next;
122 /* If bit I is present, it means that SSA name N_i has a list of
123 assertions that should be inserted in the IL. */
124 static bitmap need_assert_for;
126 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
127 holds a list of ASSERT_LOCUS_T nodes that describe where
128 ASSERT_EXPRs for SSA name N_I should be inserted. */
129 static assert_locus **asserts_for;
131 /* Value range array. After propagation, VR_VALUE[I] holds the range
132 of values that SSA name N_I may take. */
133 static unsigned num_vr_values;
134 static value_range **vr_value;
135 static bool values_propagated;
137 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
138 number of executable edges we saw the last time we visited the
139 node. */
140 static int *vr_phi_edge_counts;
142 struct switch_update {
143 gswitch *stmt;
144 tree vec;
147 static vec<edge> to_remove_edges;
148 static vec<switch_update> to_update_switch_stmts;
151 /* Return the maximum value for TYPE. */
153 static inline tree
154 vrp_val_max (const_tree type)
156 if (!INTEGRAL_TYPE_P (type))
157 return NULL_TREE;
159 return TYPE_MAX_VALUE (type);
162 /* Return the minimum value for TYPE. */
164 static inline tree
165 vrp_val_min (const_tree type)
167 if (!INTEGRAL_TYPE_P (type))
168 return NULL_TREE;
170 return TYPE_MIN_VALUE (type);
173 /* Return whether VAL is equal to the maximum value of its type. This
174 will be true for a positive overflow infinity. We can't do a
175 simple equality comparison with TYPE_MAX_VALUE because C typedefs
176 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
177 to the integer constant with the same value in the type. */
179 static inline bool
180 vrp_val_is_max (const_tree val)
182 tree type_max = vrp_val_max (TREE_TYPE (val));
183 return (val == type_max
184 || (type_max != NULL_TREE
185 && operand_equal_p (val, type_max, 0)));
188 /* Return whether VAL is equal to the minimum value of its type. This
189 will be true for a negative overflow infinity. */
191 static inline bool
192 vrp_val_is_min (const_tree val)
194 tree type_min = vrp_val_min (TREE_TYPE (val));
195 return (val == type_min
196 || (type_min != NULL_TREE
197 && operand_equal_p (val, type_min, 0)));
201 /* Return whether TYPE should use an overflow infinity distinct from
202 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
203 represent a signed overflow during VRP computations. An infinity
204 is distinct from a half-range, which will go from some number to
205 TYPE_{MIN,MAX}_VALUE. */
207 static inline bool
208 needs_overflow_infinity (const_tree type)
210 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
213 /* Return whether TYPE can support our overflow infinity
214 representation: we use the TREE_OVERFLOW flag, which only exists
215 for constants. If TYPE doesn't support this, we don't optimize
216 cases which would require signed overflow--we drop them to
217 VARYING. */
219 static inline bool
220 supports_overflow_infinity (const_tree type)
222 tree min = vrp_val_min (type), max = vrp_val_max (type);
223 gcc_checking_assert (needs_overflow_infinity (type));
224 return (min != NULL_TREE
225 && CONSTANT_CLASS_P (min)
226 && max != NULL_TREE
227 && CONSTANT_CLASS_P (max));
230 /* VAL is the maximum or minimum value of a type. Return a
231 corresponding overflow infinity. */
233 static inline tree
234 make_overflow_infinity (tree val)
236 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
237 val = copy_node (val);
238 TREE_OVERFLOW (val) = 1;
239 return val;
242 /* Return a negative overflow infinity for TYPE. */
244 static inline tree
245 negative_overflow_infinity (tree type)
247 gcc_checking_assert (supports_overflow_infinity (type));
248 return make_overflow_infinity (vrp_val_min (type));
251 /* Return a positive overflow infinity for TYPE. */
253 static inline tree
254 positive_overflow_infinity (tree type)
256 gcc_checking_assert (supports_overflow_infinity (type));
257 return make_overflow_infinity (vrp_val_max (type));
260 /* Return whether VAL is a negative overflow infinity. */
262 static inline bool
263 is_negative_overflow_infinity (const_tree val)
265 return (TREE_OVERFLOW_P (val)
266 && needs_overflow_infinity (TREE_TYPE (val))
267 && vrp_val_is_min (val));
270 /* Return whether VAL is a positive overflow infinity. */
272 static inline bool
273 is_positive_overflow_infinity (const_tree val)
275 return (TREE_OVERFLOW_P (val)
276 && needs_overflow_infinity (TREE_TYPE (val))
277 && vrp_val_is_max (val));
280 /* Return whether VAL is a positive or negative overflow infinity. */
282 static inline bool
283 is_overflow_infinity (const_tree val)
285 return (TREE_OVERFLOW_P (val)
286 && needs_overflow_infinity (TREE_TYPE (val))
287 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
290 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
292 static inline bool
293 stmt_overflow_infinity (gimple *stmt)
295 if (is_gimple_assign (stmt)
296 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
297 GIMPLE_SINGLE_RHS)
298 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
299 return false;
302 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
303 the same value with TREE_OVERFLOW clear. This can be used to avoid
304 confusing a regular value with an overflow value. */
306 static inline tree
307 avoid_overflow_infinity (tree val)
309 if (!is_overflow_infinity (val))
310 return val;
312 if (vrp_val_is_max (val))
313 return vrp_val_max (TREE_TYPE (val));
314 else
316 gcc_checking_assert (vrp_val_is_min (val));
317 return vrp_val_min (TREE_TYPE (val));
322 /* Set value range VR to VR_UNDEFINED. */
324 static inline void
325 set_value_range_to_undefined (value_range *vr)
327 vr->type = VR_UNDEFINED;
328 vr->min = vr->max = NULL_TREE;
329 if (vr->equiv)
330 bitmap_clear (vr->equiv);
334 /* Set value range VR to VR_VARYING. */
336 static inline void
337 set_value_range_to_varying (value_range *vr)
339 vr->type = VR_VARYING;
340 vr->min = vr->max = NULL_TREE;
341 if (vr->equiv)
342 bitmap_clear (vr->equiv);
346 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
348 static void
349 set_value_range (value_range *vr, enum value_range_type t, tree min,
350 tree max, bitmap equiv)
352 /* Check the validity of the range. */
353 if (flag_checking
354 && (t == VR_RANGE || t == VR_ANTI_RANGE))
356 int cmp;
358 gcc_assert (min && max);
360 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
361 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
363 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
364 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
366 cmp = compare_values (min, max);
367 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
370 if (flag_checking
371 && (t == VR_UNDEFINED || t == VR_VARYING))
373 gcc_assert (min == NULL_TREE && max == NULL_TREE);
374 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
377 vr->type = t;
378 vr->min = min;
379 vr->max = max;
381 /* Since updating the equivalence set involves deep copying the
382 bitmaps, only do it if absolutely necessary. */
383 if (vr->equiv == NULL
384 && equiv != NULL)
385 vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
387 if (equiv != vr->equiv)
389 if (equiv && !bitmap_empty_p (equiv))
390 bitmap_copy (vr->equiv, equiv);
391 else
392 bitmap_clear (vr->equiv);
397 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
398 This means adjusting T, MIN and MAX representing the case of a
399 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
400 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
401 In corner cases where MAX+1 or MIN-1 wraps this will fall back
402 to varying.
403 This routine exists to ease canonicalization in the case where we
404 extract ranges from var + CST op limit. */
406 static void
407 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
408 tree min, tree max, bitmap equiv)
410 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
411 if (t == VR_UNDEFINED)
413 set_value_range_to_undefined (vr);
414 return;
416 else if (t == VR_VARYING)
418 set_value_range_to_varying (vr);
419 return;
422 /* Nothing to canonicalize for symbolic ranges. */
423 if (TREE_CODE (min) != INTEGER_CST
424 || TREE_CODE (max) != INTEGER_CST)
426 set_value_range (vr, t, min, max, equiv);
427 return;
430 /* Wrong order for min and max, to swap them and the VR type we need
431 to adjust them. */
432 if (tree_int_cst_lt (max, min))
434 tree one, tmp;
436 /* For one bit precision if max < min, then the swapped
437 range covers all values, so for VR_RANGE it is varying and
438 for VR_ANTI_RANGE empty range, so drop to varying as well. */
439 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
441 set_value_range_to_varying (vr);
442 return;
445 one = build_int_cst (TREE_TYPE (min), 1);
446 tmp = int_const_binop (PLUS_EXPR, max, one);
447 max = int_const_binop (MINUS_EXPR, min, one);
448 min = tmp;
450 /* There's one corner case, if we had [C+1, C] before we now have
451 that again. But this represents an empty value range, so drop
452 to varying in this case. */
453 if (tree_int_cst_lt (max, min))
455 set_value_range_to_varying (vr);
456 return;
459 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
462 /* Anti-ranges that can be represented as ranges should be so. */
463 if (t == VR_ANTI_RANGE)
465 bool is_min = vrp_val_is_min (min);
466 bool is_max = vrp_val_is_max (max);
468 if (is_min && is_max)
470 /* We cannot deal with empty ranges, drop to varying.
471 ??? This could be VR_UNDEFINED instead. */
472 set_value_range_to_varying (vr);
473 return;
475 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
476 && (is_min || is_max))
478 /* Non-empty boolean ranges can always be represented
479 as a singleton range. */
480 if (is_min)
481 min = max = vrp_val_max (TREE_TYPE (min));
482 else
483 min = max = vrp_val_min (TREE_TYPE (min));
484 t = VR_RANGE;
486 else if (is_min
487 /* As a special exception preserve non-null ranges. */
488 && !(TYPE_UNSIGNED (TREE_TYPE (min))
489 && integer_zerop (max)))
491 tree one = build_int_cst (TREE_TYPE (max), 1);
492 min = int_const_binop (PLUS_EXPR, max, one);
493 max = vrp_val_max (TREE_TYPE (max));
494 t = VR_RANGE;
496 else if (is_max)
498 tree one = build_int_cst (TREE_TYPE (min), 1);
499 max = int_const_binop (MINUS_EXPR, min, one);
500 min = vrp_val_min (TREE_TYPE (min));
501 t = VR_RANGE;
505 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
506 to make sure VRP iteration terminates, otherwise we can get into
507 oscillations. */
509 set_value_range (vr, t, min, max, equiv);
512 /* Copy value range FROM into value range TO. */
514 static inline void
515 copy_value_range (value_range *to, value_range *from)
517 set_value_range (to, from->type, from->min, from->max, from->equiv);
520 /* Set value range VR to a single value. This function is only called
521 with values we get from statements, and exists to clear the
522 TREE_OVERFLOW flag so that we don't think we have an overflow
523 infinity when we shouldn't. */
525 static inline void
526 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
528 gcc_assert (is_gimple_min_invariant (val));
529 if (TREE_OVERFLOW_P (val))
530 val = drop_tree_overflow (val);
531 set_value_range (vr, VR_RANGE, val, val, equiv);
534 /* Set value range VR to a non-negative range of type TYPE.
535 OVERFLOW_INFINITY indicates whether to use an overflow infinity
536 rather than TYPE_MAX_VALUE; this should be true if we determine
537 that the range is nonnegative based on the assumption that signed
538 overflow does not occur. */
540 static inline void
541 set_value_range_to_nonnegative (value_range *vr, tree type,
542 bool overflow_infinity)
544 tree zero;
546 if (overflow_infinity && !supports_overflow_infinity (type))
548 set_value_range_to_varying (vr);
549 return;
552 zero = build_int_cst (type, 0);
553 set_value_range (vr, VR_RANGE, zero,
554 (overflow_infinity
555 ? positive_overflow_infinity (type)
556 : TYPE_MAX_VALUE (type)),
557 vr->equiv);
560 /* Set value range VR to a non-NULL range of type TYPE. */
562 static inline void
563 set_value_range_to_nonnull (value_range *vr, tree type)
565 tree zero = build_int_cst (type, 0);
566 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
570 /* Set value range VR to a NULL range of type TYPE. */
572 static inline void
573 set_value_range_to_null (value_range *vr, tree type)
575 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
579 /* Set value range VR to a range of a truthvalue of type TYPE. */
581 static inline void
582 set_value_range_to_truthvalue (value_range *vr, tree type)
584 if (TYPE_PRECISION (type) == 1)
585 set_value_range_to_varying (vr);
586 else
587 set_value_range (vr, VR_RANGE,
588 build_int_cst (type, 0), build_int_cst (type, 1),
589 vr->equiv);
593 /* If abs (min) < abs (max), set VR to [-max, max], if
594 abs (min) >= abs (max), set VR to [-min, min]. */
596 static void
597 abs_extent_range (value_range *vr, tree min, tree max)
599 int cmp;
601 gcc_assert (TREE_CODE (min) == INTEGER_CST);
602 gcc_assert (TREE_CODE (max) == INTEGER_CST);
603 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
604 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
605 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
606 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
607 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
609 set_value_range_to_varying (vr);
610 return;
612 cmp = compare_values (min, max);
613 if (cmp == -1)
614 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
615 else if (cmp == 0 || cmp == 1)
617 max = min;
618 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
620 else
622 set_value_range_to_varying (vr);
623 return;
625 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
629 /* Return value range information for VAR.
631 If we have no values ranges recorded (ie, VRP is not running), then
632 return NULL. Otherwise create an empty range if none existed for VAR. */
634 static value_range *
635 get_value_range (const_tree var)
637 static const value_range vr_const_varying
638 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
639 value_range *vr;
640 tree sym;
641 unsigned ver = SSA_NAME_VERSION (var);
643 /* If we have no recorded ranges, then return NULL. */
644 if (! vr_value)
645 return NULL;
647 /* If we query the range for a new SSA name return an unmodifiable VARYING.
648 We should get here at most from the substitute-and-fold stage which
649 will never try to change values. */
650 if (ver >= num_vr_values)
651 return CONST_CAST (value_range *, &vr_const_varying);
653 vr = vr_value[ver];
654 if (vr)
655 return vr;
657 /* After propagation finished do not allocate new value-ranges. */
658 if (values_propagated)
659 return CONST_CAST (value_range *, &vr_const_varying);
661 /* Create a default value range. */
662 vr_value[ver] = vr = vrp_value_range_pool.allocate ();
663 memset (vr, 0, sizeof (*vr));
665 /* Defer allocating the equivalence set. */
666 vr->equiv = NULL;
668 /* If VAR is a default definition of a parameter, the variable can
669 take any value in VAR's type. */
670 if (SSA_NAME_IS_DEFAULT_DEF (var))
672 sym = SSA_NAME_VAR (var);
673 if (TREE_CODE (sym) == PARM_DECL)
675 /* Try to use the "nonnull" attribute to create ~[0, 0]
676 anti-ranges for pointers. Note that this is only valid with
677 default definitions of PARM_DECLs. */
678 if (POINTER_TYPE_P (TREE_TYPE (sym))
679 && (nonnull_arg_p (sym)
680 || get_ptr_nonnull (var)))
681 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
682 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
684 wide_int min, max;
685 value_range_type rtype = get_range_info (var, &min, &max);
686 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
687 set_value_range (vr, rtype,
688 wide_int_to_tree (TREE_TYPE (var), min),
689 wide_int_to_tree (TREE_TYPE (var), max),
690 NULL);
691 else
692 set_value_range_to_varying (vr);
694 else
695 set_value_range_to_varying (vr);
697 else if (TREE_CODE (sym) == RESULT_DECL
698 && DECL_BY_REFERENCE (sym))
699 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
702 return vr;
705 /* Set value-ranges of all SSA names defined by STMT to varying. */
707 static void
708 set_defs_to_varying (gimple *stmt)
710 ssa_op_iter i;
711 tree def;
712 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
714 value_range *vr = get_value_range (def);
715 /* Avoid writing to vr_const_varying get_value_range may return. */
716 if (vr->type != VR_VARYING)
717 set_value_range_to_varying (vr);
722 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
724 static inline bool
725 vrp_operand_equal_p (const_tree val1, const_tree val2)
727 if (val1 == val2)
728 return true;
729 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
730 return false;
731 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
734 /* Return true, if the bitmaps B1 and B2 are equal. */
736 static inline bool
737 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
739 return (b1 == b2
740 || ((!b1 || bitmap_empty_p (b1))
741 && (!b2 || bitmap_empty_p (b2)))
742 || (b1 && b2
743 && bitmap_equal_p (b1, b2)));
746 /* Update the value range and equivalence set for variable VAR to
747 NEW_VR. Return true if NEW_VR is different from VAR's previous
748 value.
750 NOTE: This function assumes that NEW_VR is a temporary value range
751 object created for the sole purpose of updating VAR's range. The
752 storage used by the equivalence set from NEW_VR will be freed by
753 this function. Do not call update_value_range when NEW_VR
754 is the range object associated with another SSA name. */
756 static inline bool
757 update_value_range (const_tree var, value_range *new_vr)
759 value_range *old_vr;
760 bool is_new;
762 /* If there is a value-range on the SSA name from earlier analysis
763 factor that in. */
764 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
766 wide_int min, max;
767 value_range_type rtype = get_range_info (var, &min, &max);
768 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
770 tree nr_min, nr_max;
771 /* Range info on SSA names doesn't carry overflow information
772 so make sure to preserve the overflow bit on the lattice. */
773 if (rtype == VR_RANGE
774 && needs_overflow_infinity (TREE_TYPE (var))
775 && (new_vr->type == VR_VARYING
776 || (new_vr->type == VR_RANGE
777 && is_negative_overflow_infinity (new_vr->min)))
778 && wi::eq_p (vrp_val_min (TREE_TYPE (var)), min))
779 nr_min = negative_overflow_infinity (TREE_TYPE (var));
780 else
781 nr_min = wide_int_to_tree (TREE_TYPE (var), min);
782 if (rtype == VR_RANGE
783 && needs_overflow_infinity (TREE_TYPE (var))
784 && (new_vr->type == VR_VARYING
785 || (new_vr->type == VR_RANGE
786 && is_positive_overflow_infinity (new_vr->max)))
787 && wi::eq_p (vrp_val_max (TREE_TYPE (var)), max))
788 nr_max = positive_overflow_infinity (TREE_TYPE (var));
789 else
790 nr_max = wide_int_to_tree (TREE_TYPE (var), max);
791 value_range nr = VR_INITIALIZER;
792 set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
793 vrp_intersect_ranges (new_vr, &nr);
797 /* Update the value range, if necessary. */
798 old_vr = get_value_range (var);
799 is_new = old_vr->type != new_vr->type
800 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
801 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
802 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
804 if (is_new)
806 /* Do not allow transitions up the lattice. The following
807 is slightly more awkward than just new_vr->type < old_vr->type
808 because VR_RANGE and VR_ANTI_RANGE need to be considered
809 the same. We may not have is_new when transitioning to
810 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
811 called. */
812 if (new_vr->type == VR_UNDEFINED)
814 BITMAP_FREE (new_vr->equiv);
815 set_value_range_to_varying (old_vr);
816 set_value_range_to_varying (new_vr);
817 return true;
819 else
820 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
821 new_vr->equiv);
824 BITMAP_FREE (new_vr->equiv);
826 return is_new;
830 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
831 point where equivalence processing can be turned on/off. */
833 static void
834 add_equivalence (bitmap *equiv, const_tree var)
836 unsigned ver = SSA_NAME_VERSION (var);
837 value_range *vr = get_value_range (var);
839 if (*equiv == NULL)
840 *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
841 bitmap_set_bit (*equiv, ver);
842 if (vr && vr->equiv)
843 bitmap_ior_into (*equiv, vr->equiv);
847 /* Return true if VR is ~[0, 0]. */
849 static inline bool
850 range_is_nonnull (value_range *vr)
852 return vr->type == VR_ANTI_RANGE
853 && integer_zerop (vr->min)
854 && integer_zerop (vr->max);
858 /* Return true if VR is [0, 0]. */
860 static inline bool
861 range_is_null (value_range *vr)
863 return vr->type == VR_RANGE
864 && integer_zerop (vr->min)
865 && integer_zerop (vr->max);
868 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
869 a singleton. */
871 static inline bool
872 range_int_cst_p (value_range *vr)
874 return (vr->type == VR_RANGE
875 && TREE_CODE (vr->max) == INTEGER_CST
876 && TREE_CODE (vr->min) == INTEGER_CST);
879 /* Return true if VR is a INTEGER_CST singleton. */
881 static inline bool
882 range_int_cst_singleton_p (value_range *vr)
884 return (range_int_cst_p (vr)
885 && !is_overflow_infinity (vr->min)
886 && !is_overflow_infinity (vr->max)
887 && tree_int_cst_equal (vr->min, vr->max));
890 /* Return true if value range VR involves at least one symbol. */
892 static inline bool
893 symbolic_range_p (value_range *vr)
895 return (!is_gimple_min_invariant (vr->min)
896 || !is_gimple_min_invariant (vr->max));
899 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
900 otherwise. We only handle additive operations and set NEG to true if the
901 symbol is negated and INV to the invariant part, if any. */
903 static tree
904 get_single_symbol (tree t, bool *neg, tree *inv)
906 bool neg_;
907 tree inv_;
909 *inv = NULL_TREE;
910 *neg = false;
912 if (TREE_CODE (t) == PLUS_EXPR
913 || TREE_CODE (t) == POINTER_PLUS_EXPR
914 || TREE_CODE (t) == MINUS_EXPR)
916 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
918 neg_ = (TREE_CODE (t) == MINUS_EXPR);
919 inv_ = TREE_OPERAND (t, 0);
920 t = TREE_OPERAND (t, 1);
922 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
924 neg_ = false;
925 inv_ = TREE_OPERAND (t, 1);
926 t = TREE_OPERAND (t, 0);
928 else
929 return NULL_TREE;
931 else
933 neg_ = false;
934 inv_ = NULL_TREE;
937 if (TREE_CODE (t) == NEGATE_EXPR)
939 t = TREE_OPERAND (t, 0);
940 neg_ = !neg_;
943 if (TREE_CODE (t) != SSA_NAME)
944 return NULL_TREE;
946 *neg = neg_;
947 *inv = inv_;
948 return t;
951 /* The reverse operation: build a symbolic expression with TYPE
952 from symbol SYM, negated according to NEG, and invariant INV. */
954 static tree
955 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
957 const bool pointer_p = POINTER_TYPE_P (type);
958 tree t = sym;
960 if (neg)
961 t = build1 (NEGATE_EXPR, type, t);
963 if (integer_zerop (inv))
964 return t;
966 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
969 /* Return true if value range VR involves exactly one symbol SYM. */
971 static bool
972 symbolic_range_based_on_p (value_range *vr, const_tree sym)
974 bool neg, min_has_symbol, max_has_symbol;
975 tree inv;
977 if (is_gimple_min_invariant (vr->min))
978 min_has_symbol = false;
979 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
980 min_has_symbol = true;
981 else
982 return false;
984 if (is_gimple_min_invariant (vr->max))
985 max_has_symbol = false;
986 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
987 max_has_symbol = true;
988 else
989 return false;
991 return (min_has_symbol || max_has_symbol);
994 /* Return true if value range VR uses an overflow infinity. */
996 static inline bool
997 overflow_infinity_range_p (value_range *vr)
999 return (vr->type == VR_RANGE
1000 && (is_overflow_infinity (vr->min)
1001 || is_overflow_infinity (vr->max)));
1004 /* Return false if we can not make a valid comparison based on VR;
1005 this will be the case if it uses an overflow infinity and overflow
1006 is not undefined (i.e., -fno-strict-overflow is in effect).
1007 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
1008 uses an overflow infinity. */
1010 static bool
1011 usable_range_p (value_range *vr, bool *strict_overflow_p)
1013 gcc_assert (vr->type == VR_RANGE);
1014 if (is_overflow_infinity (vr->min))
1016 *strict_overflow_p = true;
1017 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
1018 return false;
1020 if (is_overflow_infinity (vr->max))
1022 *strict_overflow_p = true;
1023 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1024 return false;
1026 return true;
1029 /* Return true if the result of assignment STMT is know to be non-zero.
1030 If the return value is based on the assumption that signed overflow is
1031 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1032 *STRICT_OVERFLOW_P.*/
1034 static bool
1035 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1037 enum tree_code code = gimple_assign_rhs_code (stmt);
1038 switch (get_gimple_rhs_class (code))
1040 case GIMPLE_UNARY_RHS:
1041 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1042 gimple_expr_type (stmt),
1043 gimple_assign_rhs1 (stmt),
1044 strict_overflow_p);
1045 case GIMPLE_BINARY_RHS:
1046 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1047 gimple_expr_type (stmt),
1048 gimple_assign_rhs1 (stmt),
1049 gimple_assign_rhs2 (stmt),
1050 strict_overflow_p);
1051 case GIMPLE_TERNARY_RHS:
1052 return false;
1053 case GIMPLE_SINGLE_RHS:
1054 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1055 strict_overflow_p);
1056 case GIMPLE_INVALID_RHS:
1057 gcc_unreachable ();
1058 default:
1059 gcc_unreachable ();
1063 /* Return true if STMT is known to compute a non-zero value.
1064 If the return value is based on the assumption that signed overflow is
1065 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1066 *STRICT_OVERFLOW_P.*/
1068 static bool
1069 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1071 switch (gimple_code (stmt))
1073 case GIMPLE_ASSIGN:
1074 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1075 case GIMPLE_CALL:
1077 tree fndecl = gimple_call_fndecl (stmt);
1078 if (!fndecl) return false;
1079 if (flag_delete_null_pointer_checks && !flag_check_new
1080 && DECL_IS_OPERATOR_NEW (fndecl)
1081 && !TREE_NOTHROW (fndecl))
1082 return true;
1083 /* References are always non-NULL. */
1084 if (flag_delete_null_pointer_checks
1085 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1086 return true;
1087 if (flag_delete_null_pointer_checks &&
1088 lookup_attribute ("returns_nonnull",
1089 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1090 return true;
1092 gcall *call_stmt = as_a<gcall *> (stmt);
1093 unsigned rf = gimple_call_return_flags (call_stmt);
1094 if (rf & ERF_RETURNS_ARG)
1096 unsigned argnum = rf & ERF_RETURN_ARG_MASK;
1097 if (argnum < gimple_call_num_args (call_stmt))
1099 tree arg = gimple_call_arg (call_stmt, argnum);
1100 if (SSA_VAR_P (arg)
1101 && infer_nonnull_range_by_attribute (stmt, arg))
1102 return true;
1105 return gimple_alloca_call_p (stmt);
1107 default:
1108 gcc_unreachable ();
1112 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1113 obtained so far. */
1115 static bool
1116 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1118 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1119 return true;
1121 /* If we have an expression of the form &X->a, then the expression
1122 is nonnull if X is nonnull. */
1123 if (is_gimple_assign (stmt)
1124 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1126 tree expr = gimple_assign_rhs1 (stmt);
1127 tree base = get_base_address (TREE_OPERAND (expr, 0));
1129 if (base != NULL_TREE
1130 && TREE_CODE (base) == MEM_REF
1131 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1133 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
1134 if (range_is_nonnull (vr))
1135 return true;
1139 return false;
1142 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1143 a gimple invariant, or SSA_NAME +- CST. */
1145 static bool
1146 valid_value_p (tree expr)
1148 if (TREE_CODE (expr) == SSA_NAME)
1149 return true;
1151 if (TREE_CODE (expr) == PLUS_EXPR
1152 || TREE_CODE (expr) == MINUS_EXPR)
1153 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1154 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1156 return is_gimple_min_invariant (expr);
1159 /* Return
1160 1 if VAL < VAL2
1161 0 if !(VAL < VAL2)
1162 -2 if those are incomparable. */
1163 static inline int
1164 operand_less_p (tree val, tree val2)
1166 /* LT is folded faster than GE and others. Inline the common case. */
1167 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1169 if (! is_positive_overflow_infinity (val2))
1170 return tree_int_cst_lt (val, val2);
1172 else
1174 tree tcmp;
1176 fold_defer_overflow_warnings ();
1178 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1180 fold_undefer_and_ignore_overflow_warnings ();
1182 if (!tcmp
1183 || TREE_CODE (tcmp) != INTEGER_CST)
1184 return -2;
1186 if (!integer_zerop (tcmp))
1187 return 1;
1190 /* val >= val2, not considering overflow infinity. */
1191 if (is_negative_overflow_infinity (val))
1192 return is_negative_overflow_infinity (val2) ? 0 : 1;
1193 else if (is_positive_overflow_infinity (val2))
1194 return is_positive_overflow_infinity (val) ? 0 : 1;
1196 return 0;
1199 /* Compare two values VAL1 and VAL2. Return
1201 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1202 -1 if VAL1 < VAL2,
1203 0 if VAL1 == VAL2,
1204 +1 if VAL1 > VAL2, and
1205 +2 if VAL1 != VAL2
1207 This is similar to tree_int_cst_compare but supports pointer values
1208 and values that cannot be compared at compile time.
1210 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1211 true if the return value is only valid if we assume that signed
1212 overflow is undefined. */
1214 static int
1215 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1217 if (val1 == val2)
1218 return 0;
1220 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1221 both integers. */
1222 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1223 == POINTER_TYPE_P (TREE_TYPE (val2)));
1225 /* Convert the two values into the same type. This is needed because
1226 sizetype causes sign extension even for unsigned types. */
1227 val2 = fold_convert (TREE_TYPE (val1), val2);
1228 STRIP_USELESS_TYPE_CONVERSION (val2);
1230 const bool overflow_undefined
1231 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1232 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1233 tree inv1, inv2;
1234 bool neg1, neg2;
1235 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1236 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1238 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1239 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1240 if (sym1 && sym2)
1242 /* Both values must use the same name with the same sign. */
1243 if (sym1 != sym2 || neg1 != neg2)
1244 return -2;
1246 /* [-]NAME + CST == [-]NAME + CST. */
1247 if (inv1 == inv2)
1248 return 0;
1250 /* If overflow is defined we cannot simplify more. */
1251 if (!overflow_undefined)
1252 return -2;
1254 if (strict_overflow_p != NULL
1255 && (!inv1 || !TREE_NO_WARNING (val1))
1256 && (!inv2 || !TREE_NO_WARNING (val2)))
1257 *strict_overflow_p = true;
1259 if (!inv1)
1260 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1261 if (!inv2)
1262 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1264 return compare_values_warnv (inv1, inv2, strict_overflow_p);
1267 const bool cst1 = is_gimple_min_invariant (val1);
1268 const bool cst2 = is_gimple_min_invariant (val2);
1270 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1271 it might be possible to say something depending on the constants. */
1272 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1274 if (!overflow_undefined)
1275 return -2;
1277 if (strict_overflow_p != NULL
1278 && (!sym1 || !TREE_NO_WARNING (val1))
1279 && (!sym2 || !TREE_NO_WARNING (val2)))
1280 *strict_overflow_p = true;
1282 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1283 tree cst = cst1 ? val1 : val2;
1284 tree inv = cst1 ? inv2 : inv1;
1286 /* Compute the difference between the constants. If it overflows or
1287 underflows, this means that we can trivially compare the NAME with
1288 it and, consequently, the two values with each other. */
1289 wide_int diff = wi::sub (cst, inv);
1290 if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
1292 const int res = wi::cmp (cst, inv, sgn);
1293 return cst1 ? res : -res;
1296 return -2;
1299 /* We cannot say anything more for non-constants. */
1300 if (!cst1 || !cst2)
1301 return -2;
1303 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1305 /* We cannot compare overflowed values, except for overflow
1306 infinities. */
1307 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1309 if (strict_overflow_p != NULL)
1310 *strict_overflow_p = true;
1311 if (is_negative_overflow_infinity (val1))
1312 return is_negative_overflow_infinity (val2) ? 0 : -1;
1313 else if (is_negative_overflow_infinity (val2))
1314 return 1;
1315 else if (is_positive_overflow_infinity (val1))
1316 return is_positive_overflow_infinity (val2) ? 0 : 1;
1317 else if (is_positive_overflow_infinity (val2))
1318 return -1;
1319 return -2;
1322 return tree_int_cst_compare (val1, val2);
1324 else
1326 tree t;
1328 /* First see if VAL1 and VAL2 are not the same. */
1329 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1330 return 0;
1332 /* If VAL1 is a lower address than VAL2, return -1. */
1333 if (operand_less_p (val1, val2) == 1)
1334 return -1;
1336 /* If VAL1 is a higher address than VAL2, return +1. */
1337 if (operand_less_p (val2, val1) == 1)
1338 return 1;
1340 /* If VAL1 is different than VAL2, return +2.
1341 For integer constants we either have already returned -1 or 1
1342 or they are equivalent. We still might succeed in proving
1343 something about non-trivial operands. */
1344 if (TREE_CODE (val1) != INTEGER_CST
1345 || TREE_CODE (val2) != INTEGER_CST)
1347 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1348 if (t && integer_onep (t))
1349 return 2;
1352 return -2;
1356 /* Compare values like compare_values_warnv, but treat comparisons of
1357 nonconstants which rely on undefined overflow as incomparable. */
1359 static int
1360 compare_values (tree val1, tree val2)
1362 bool sop;
1363 int ret;
1365 sop = false;
1366 ret = compare_values_warnv (val1, val2, &sop);
1367 if (sop
1368 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1369 ret = -2;
1370 return ret;
1374 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1375 0 if VAL is not inside [MIN, MAX],
1376 -2 if we cannot tell either way.
1378 Benchmark compile/20001226-1.c compilation time after changing this
1379 function. */
1381 static inline int
1382 value_inside_range (tree val, tree min, tree max)
1384 int cmp1, cmp2;
1386 cmp1 = operand_less_p (val, min);
1387 if (cmp1 == -2)
1388 return -2;
1389 if (cmp1 == 1)
1390 return 0;
1392 cmp2 = operand_less_p (max, val);
1393 if (cmp2 == -2)
1394 return -2;
1396 return !cmp2;
1400 /* Return true if value ranges VR0 and VR1 have a non-empty
1401 intersection.
1403 Benchmark compile/20001226-1.c compilation time after changing this
1404 function.
1407 static inline bool
1408 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1410 /* The value ranges do not intersect if the maximum of the first range is
1411 less than the minimum of the second range or vice versa.
1412 When those relations are unknown, we can't do any better. */
1413 if (operand_less_p (vr0->max, vr1->min) != 0)
1414 return false;
1415 if (operand_less_p (vr1->max, vr0->min) != 0)
1416 return false;
1417 return true;
1421 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1422 include the value zero, -2 if we cannot tell. */
1424 static inline int
1425 range_includes_zero_p (tree min, tree max)
1427 tree zero = build_int_cst (TREE_TYPE (min), 0);
1428 return value_inside_range (zero, min, max);
1431 /* Return true if *VR is know to only contain nonnegative values. */
1433 static inline bool
1434 value_range_nonnegative_p (value_range *vr)
1436 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1437 which would return a useful value should be encoded as a
1438 VR_RANGE. */
1439 if (vr->type == VR_RANGE)
1441 int result = compare_values (vr->min, integer_zero_node);
1442 return (result == 0 || result == 1);
1445 return false;
1448 /* If *VR has a value rante that is a single constant value return that,
1449 otherwise return NULL_TREE. */
1451 static tree
1452 value_range_constant_singleton (value_range *vr)
1454 if (vr->type == VR_RANGE
1455 && vrp_operand_equal_p (vr->min, vr->max)
1456 && is_gimple_min_invariant (vr->min))
1457 return vr->min;
1459 return NULL_TREE;
1462 /* If OP has a value range with a single constant value return that,
1463 otherwise return NULL_TREE. This returns OP itself if OP is a
1464 constant. */
1466 static tree
1467 op_with_constant_singleton_value_range (tree op)
1469 if (is_gimple_min_invariant (op))
1470 return op;
1472 if (TREE_CODE (op) != SSA_NAME)
1473 return NULL_TREE;
1475 return value_range_constant_singleton (get_value_range (op));
1478 /* Return true if op is in a boolean [0, 1] value-range. */
1480 static bool
1481 op_with_boolean_value_range_p (tree op)
1483 value_range *vr;
1485 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1486 return true;
1488 if (integer_zerop (op)
1489 || integer_onep (op))
1490 return true;
1492 if (TREE_CODE (op) != SSA_NAME)
1493 return false;
1495 vr = get_value_range (op);
1496 return (vr->type == VR_RANGE
1497 && integer_zerop (vr->min)
1498 && integer_onep (vr->max));
1501 /* Extract value range information for VAR when (OP COND_CODE LIMIT) is
1502 true and store it in *VR_P. */
1504 static void
1505 extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code,
1506 tree op, tree limit,
1507 value_range *vr_p)
1509 tree min, max, type;
1510 value_range *limit_vr;
1511 limit = avoid_overflow_infinity (limit);
1512 type = TREE_TYPE (var);
1513 gcc_assert (limit != var);
1515 /* For pointer arithmetic, we only keep track of pointer equality
1516 and inequality. */
1517 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1519 set_value_range_to_varying (vr_p);
1520 return;
1523 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1524 try to use LIMIT's range to avoid creating symbolic ranges
1525 unnecessarily. */
1526 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1528 /* LIMIT's range is only interesting if it has any useful information. */
1529 if (! limit_vr
1530 || limit_vr->type == VR_UNDEFINED
1531 || limit_vr->type == VR_VARYING
1532 || (symbolic_range_p (limit_vr)
1533 && ! (limit_vr->type == VR_RANGE
1534 && (limit_vr->min == limit_vr->max
1535 || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
1536 limit_vr = NULL;
1538 /* Initially, the new range has the same set of equivalences of
1539 VAR's range. This will be revised before returning the final
1540 value. Since assertions may be chained via mutually exclusive
1541 predicates, we will need to trim the set of equivalences before
1542 we are done. */
1543 gcc_assert (vr_p->equiv == NULL);
1544 add_equivalence (&vr_p->equiv, var);
1546 /* Extract a new range based on the asserted comparison for VAR and
1547 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1548 will only use it for equality comparisons (EQ_EXPR). For any
1549 other kind of assertion, we cannot derive a range from LIMIT's
1550 anti-range that can be used to describe the new range. For
1551 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1552 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1553 no single range for x_2 that could describe LE_EXPR, so we might
1554 as well build the range [b_4, +INF] for it.
1555 One special case we handle is extracting a range from a
1556 range test encoded as (unsigned)var + CST <= limit. */
1557 if (TREE_CODE (op) == NOP_EXPR
1558 || TREE_CODE (op) == PLUS_EXPR)
1560 if (TREE_CODE (op) == PLUS_EXPR)
1562 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
1563 TREE_OPERAND (op, 1));
1564 max = int_const_binop (PLUS_EXPR, limit, min);
1565 op = TREE_OPERAND (op, 0);
1567 else
1569 min = build_int_cst (TREE_TYPE (var), 0);
1570 max = limit;
1573 /* Make sure to not set TREE_OVERFLOW on the final type
1574 conversion. We are willingly interpreting large positive
1575 unsigned values as negative signed values here. */
1576 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1577 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1579 /* We can transform a max, min range to an anti-range or
1580 vice-versa. Use set_and_canonicalize_value_range which does
1581 this for us. */
1582 if (cond_code == LE_EXPR)
1583 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1584 min, max, vr_p->equiv);
1585 else if (cond_code == GT_EXPR)
1586 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1587 min, max, vr_p->equiv);
1588 else
1589 gcc_unreachable ();
1591 else if (cond_code == EQ_EXPR)
1593 enum value_range_type range_type;
1595 if (limit_vr)
1597 range_type = limit_vr->type;
1598 min = limit_vr->min;
1599 max = limit_vr->max;
1601 else
1603 range_type = VR_RANGE;
1604 min = limit;
1605 max = limit;
1608 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1610 /* When asserting the equality VAR == LIMIT and LIMIT is another
1611 SSA name, the new range will also inherit the equivalence set
1612 from LIMIT. */
1613 if (TREE_CODE (limit) == SSA_NAME)
1614 add_equivalence (&vr_p->equiv, limit);
1616 else if (cond_code == NE_EXPR)
1618 /* As described above, when LIMIT's range is an anti-range and
1619 this assertion is an inequality (NE_EXPR), then we cannot
1620 derive anything from the anti-range. For instance, if
1621 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1622 not imply that VAR's range is [0, 0]. So, in the case of
1623 anti-ranges, we just assert the inequality using LIMIT and
1624 not its anti-range.
1626 If LIMIT_VR is a range, we can only use it to build a new
1627 anti-range if LIMIT_VR is a single-valued range. For
1628 instance, if LIMIT_VR is [0, 1], the predicate
1629 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1630 Rather, it means that for value 0 VAR should be ~[0, 0]
1631 and for value 1, VAR should be ~[1, 1]. We cannot
1632 represent these ranges.
1634 The only situation in which we can build a valid
1635 anti-range is when LIMIT_VR is a single-valued range
1636 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1637 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1638 if (limit_vr
1639 && limit_vr->type == VR_RANGE
1640 && compare_values (limit_vr->min, limit_vr->max) == 0)
1642 min = limit_vr->min;
1643 max = limit_vr->max;
1645 else
1647 /* In any other case, we cannot use LIMIT's range to build a
1648 valid anti-range. */
1649 min = max = limit;
1652 /* If MIN and MAX cover the whole range for their type, then
1653 just use the original LIMIT. */
1654 if (INTEGRAL_TYPE_P (type)
1655 && vrp_val_is_min (min)
1656 && vrp_val_is_max (max))
1657 min = max = limit;
1659 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1660 min, max, vr_p->equiv);
1662 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1664 min = TYPE_MIN_VALUE (type);
1666 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1667 max = limit;
1668 else
1670 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1671 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1672 LT_EXPR. */
1673 max = limit_vr->max;
1676 /* If the maximum value forces us to be out of bounds, simply punt.
1677 It would be pointless to try and do anything more since this
1678 all should be optimized away above us. */
1679 if ((cond_code == LT_EXPR
1680 && compare_values (max, min) == 0)
1681 || is_overflow_infinity (max))
1682 set_value_range_to_varying (vr_p);
1683 else
1685 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1686 if (cond_code == LT_EXPR)
1688 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1689 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1690 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1691 build_int_cst (TREE_TYPE (max), -1));
1692 else
1693 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1694 build_int_cst (TREE_TYPE (max), 1));
1695 if (EXPR_P (max))
1696 TREE_NO_WARNING (max) = 1;
1699 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1702 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1704 max = TYPE_MAX_VALUE (type);
1706 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1707 min = limit;
1708 else
1710 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1711 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1712 GT_EXPR. */
1713 min = limit_vr->min;
1716 /* If the minimum value forces us to be out of bounds, simply punt.
1717 It would be pointless to try and do anything more since this
1718 all should be optimized away above us. */
1719 if ((cond_code == GT_EXPR
1720 && compare_values (min, max) == 0)
1721 || is_overflow_infinity (min))
1722 set_value_range_to_varying (vr_p);
1723 else
1725 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1726 if (cond_code == GT_EXPR)
1728 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1729 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1730 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1731 build_int_cst (TREE_TYPE (min), -1));
1732 else
1733 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1734 build_int_cst (TREE_TYPE (min), 1));
1735 if (EXPR_P (min))
1736 TREE_NO_WARNING (min) = 1;
1739 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1742 else
1743 gcc_unreachable ();
1745 /* Finally intersect the new range with what we already know about var. */
1746 vrp_intersect_ranges (vr_p, get_value_range (var));
1749 /* Extract value range information from an ASSERT_EXPR EXPR and store
1750 it in *VR_P. */
1752 static void
1753 extract_range_from_assert (value_range *vr_p, tree expr)
1755 tree var = ASSERT_EXPR_VAR (expr);
1756 tree cond = ASSERT_EXPR_COND (expr);
1757 tree limit, op;
1758 enum tree_code cond_code;
1759 gcc_assert (COMPARISON_CLASS_P (cond));
1761 /* Find VAR in the ASSERT_EXPR conditional. */
1762 if (var == TREE_OPERAND (cond, 0)
1763 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1764 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1766 /* If the predicate is of the form VAR COMP LIMIT, then we just
1767 take LIMIT from the RHS and use the same comparison code. */
1768 cond_code = TREE_CODE (cond);
1769 limit = TREE_OPERAND (cond, 1);
1770 op = TREE_OPERAND (cond, 0);
1772 else
1774 /* If the predicate is of the form LIMIT COMP VAR, then we need
1775 to flip around the comparison code to create the proper range
1776 for VAR. */
1777 cond_code = swap_tree_comparison (TREE_CODE (cond));
1778 limit = TREE_OPERAND (cond, 0);
1779 op = TREE_OPERAND (cond, 1);
1781 extract_range_for_var_from_comparison_expr (var, cond_code, op,
1782 limit, vr_p);
1785 /* Extract range information from SSA name VAR and store it in VR. If
1786 VAR has an interesting range, use it. Otherwise, create the
1787 range [VAR, VAR] and return it. This is useful in situations where
1788 we may have conditionals testing values of VARYING names. For
1789 instance,
1791 x_3 = y_5;
1792 if (x_3 > y_5)
1795 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1796 always false. */
1798 static void
1799 extract_range_from_ssa_name (value_range *vr, tree var)
1801 value_range *var_vr = get_value_range (var);
1803 if (var_vr->type != VR_VARYING)
1804 copy_value_range (vr, var_vr);
1805 else
1806 set_value_range (vr, VR_RANGE, var, var, NULL);
1808 add_equivalence (&vr->equiv, var);
1812 /* Wrapper around int_const_binop. If the operation overflows and we
1813 are not using wrapping arithmetic, then adjust the result to be
1814 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1815 NULL_TREE if we need to use an overflow infinity representation but
1816 the type does not support it. */
1818 static tree
1819 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1821 tree res;
1823 res = int_const_binop (code, val1, val2);
1825 /* If we are using unsigned arithmetic, operate symbolically
1826 on -INF and +INF as int_const_binop only handles signed overflow. */
1827 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1829 int checkz = compare_values (res, val1);
1830 bool overflow = false;
1832 /* Ensure that res = val1 [+*] val2 >= val1
1833 or that res = val1 - val2 <= val1. */
1834 if ((code == PLUS_EXPR
1835 && !(checkz == 1 || checkz == 0))
1836 || (code == MINUS_EXPR
1837 && !(checkz == 0 || checkz == -1)))
1839 overflow = true;
1841 /* Checking for multiplication overflow is done by dividing the
1842 output of the multiplication by the first input of the
1843 multiplication. If the result of that division operation is
1844 not equal to the second input of the multiplication, then the
1845 multiplication overflowed. */
1846 else if (code == MULT_EXPR && !integer_zerop (val1))
1848 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1849 res,
1850 val1);
1851 int check = compare_values (tmp, val2);
1853 if (check != 0)
1854 overflow = true;
1857 if (overflow)
1859 res = copy_node (res);
1860 TREE_OVERFLOW (res) = 1;
1864 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1865 /* If the singed operation wraps then int_const_binop has done
1866 everything we want. */
1868 /* Signed division of -1/0 overflows and by the time it gets here
1869 returns NULL_TREE. */
1870 else if (!res)
1871 return NULL_TREE;
1872 else if ((TREE_OVERFLOW (res)
1873 && !TREE_OVERFLOW (val1)
1874 && !TREE_OVERFLOW (val2))
1875 || is_overflow_infinity (val1)
1876 || is_overflow_infinity (val2))
1878 /* If the operation overflowed but neither VAL1 nor VAL2 are
1879 overflown, return -INF or +INF depending on the operation
1880 and the combination of signs of the operands. */
1881 int sgn1 = tree_int_cst_sgn (val1);
1882 int sgn2 = tree_int_cst_sgn (val2);
1884 if (needs_overflow_infinity (TREE_TYPE (res))
1885 && !supports_overflow_infinity (TREE_TYPE (res)))
1886 return NULL_TREE;
1888 /* We have to punt on adding infinities of different signs,
1889 since we can't tell what the sign of the result should be.
1890 Likewise for subtracting infinities of the same sign. */
1891 if (((code == PLUS_EXPR && sgn1 != sgn2)
1892 || (code == MINUS_EXPR && sgn1 == sgn2))
1893 && is_overflow_infinity (val1)
1894 && is_overflow_infinity (val2))
1895 return NULL_TREE;
1897 /* Don't try to handle division or shifting of infinities. */
1898 if ((code == TRUNC_DIV_EXPR
1899 || code == FLOOR_DIV_EXPR
1900 || code == CEIL_DIV_EXPR
1901 || code == EXACT_DIV_EXPR
1902 || code == ROUND_DIV_EXPR
1903 || code == RSHIFT_EXPR)
1904 && (is_overflow_infinity (val1)
1905 || is_overflow_infinity (val2)))
1906 return NULL_TREE;
1908 /* Notice that we only need to handle the restricted set of
1909 operations handled by extract_range_from_binary_expr.
1910 Among them, only multiplication, addition and subtraction
1911 can yield overflow without overflown operands because we
1912 are working with integral types only... except in the
1913 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1914 for division too. */
1916 /* For multiplication, the sign of the overflow is given
1917 by the comparison of the signs of the operands. */
1918 if ((code == MULT_EXPR && sgn1 == sgn2)
1919 /* For addition, the operands must be of the same sign
1920 to yield an overflow. Its sign is therefore that
1921 of one of the operands, for example the first. For
1922 infinite operands X + -INF is negative, not positive. */
1923 || (code == PLUS_EXPR
1924 && (sgn1 >= 0
1925 ? !is_negative_overflow_infinity (val2)
1926 : is_positive_overflow_infinity (val2)))
1927 /* For subtraction, non-infinite operands must be of
1928 different signs to yield an overflow. Its sign is
1929 therefore that of the first operand or the opposite of
1930 that of the second operand. A first operand of 0 counts
1931 as positive here, for the corner case 0 - (-INF), which
1932 overflows, but must yield +INF. For infinite operands 0
1933 - INF is negative, not positive. */
1934 || (code == MINUS_EXPR
1935 && (sgn1 >= 0
1936 ? !is_positive_overflow_infinity (val2)
1937 : is_negative_overflow_infinity (val2)))
1938 /* We only get in here with positive shift count, so the
1939 overflow direction is the same as the sign of val1.
1940 Actually rshift does not overflow at all, but we only
1941 handle the case of shifting overflowed -INF and +INF. */
1942 || (code == RSHIFT_EXPR
1943 && sgn1 >= 0)
1944 /* For division, the only case is -INF / -1 = +INF. */
1945 || code == TRUNC_DIV_EXPR
1946 || code == FLOOR_DIV_EXPR
1947 || code == CEIL_DIV_EXPR
1948 || code == EXACT_DIV_EXPR
1949 || code == ROUND_DIV_EXPR)
1950 return (needs_overflow_infinity (TREE_TYPE (res))
1951 ? positive_overflow_infinity (TREE_TYPE (res))
1952 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1953 else
1954 return (needs_overflow_infinity (TREE_TYPE (res))
1955 ? negative_overflow_infinity (TREE_TYPE (res))
1956 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1959 return res;
1963 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1964 bitmask if some bit is unset, it means for all numbers in the range
1965 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1966 bitmask if some bit is set, it means for all numbers in the range
1967 the bit is 1, otherwise it might be 0 or 1. */
1969 static bool
1970 zero_nonzero_bits_from_vr (const tree expr_type,
1971 value_range *vr,
1972 wide_int *may_be_nonzero,
1973 wide_int *must_be_nonzero)
1975 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1976 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1977 if (!range_int_cst_p (vr)
1978 || is_overflow_infinity (vr->min)
1979 || is_overflow_infinity (vr->max))
1980 return false;
1982 if (range_int_cst_singleton_p (vr))
1984 *may_be_nonzero = vr->min;
1985 *must_be_nonzero = *may_be_nonzero;
1987 else if (tree_int_cst_sgn (vr->min) >= 0
1988 || tree_int_cst_sgn (vr->max) < 0)
1990 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1991 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1992 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1993 if (xor_mask != 0)
1995 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1996 may_be_nonzero->get_precision ());
1997 *may_be_nonzero = *may_be_nonzero | mask;
1998 *must_be_nonzero = must_be_nonzero->and_not (mask);
2002 return true;
2005 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2006 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2007 false otherwise. If *AR can be represented with a single range
2008 *VR1 will be VR_UNDEFINED. */
2010 static bool
2011 ranges_from_anti_range (value_range *ar,
2012 value_range *vr0, value_range *vr1)
2014 tree type = TREE_TYPE (ar->min);
2016 vr0->type = VR_UNDEFINED;
2017 vr1->type = VR_UNDEFINED;
2019 if (ar->type != VR_ANTI_RANGE
2020 || TREE_CODE (ar->min) != INTEGER_CST
2021 || TREE_CODE (ar->max) != INTEGER_CST
2022 || !vrp_val_min (type)
2023 || !vrp_val_max (type))
2024 return false;
2026 if (!vrp_val_is_min (ar->min))
2028 vr0->type = VR_RANGE;
2029 vr0->min = vrp_val_min (type);
2030 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
2032 if (!vrp_val_is_max (ar->max))
2034 vr1->type = VR_RANGE;
2035 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
2036 vr1->max = vrp_val_max (type);
2038 if (vr0->type == VR_UNDEFINED)
2040 *vr0 = *vr1;
2041 vr1->type = VR_UNDEFINED;
2044 return vr0->type != VR_UNDEFINED;
2047 /* Helper to extract a value-range *VR for a multiplicative operation
2048 *VR0 CODE *VR1. */
2050 static void
2051 extract_range_from_multiplicative_op_1 (value_range *vr,
2052 enum tree_code code,
2053 value_range *vr0, value_range *vr1)
2055 enum value_range_type type;
2056 tree val[4];
2057 size_t i;
2058 tree min, max;
2059 bool sop;
2060 int cmp;
2062 /* Multiplications, divisions and shifts are a bit tricky to handle,
2063 depending on the mix of signs we have in the two ranges, we
2064 need to operate on different values to get the minimum and
2065 maximum values for the new range. One approach is to figure
2066 out all the variations of range combinations and do the
2067 operations.
2069 However, this involves several calls to compare_values and it
2070 is pretty convoluted. It's simpler to do the 4 operations
2071 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2072 MAX1) and then figure the smallest and largest values to form
2073 the new range. */
2074 gcc_assert (code == MULT_EXPR
2075 || code == TRUNC_DIV_EXPR
2076 || code == FLOOR_DIV_EXPR
2077 || code == CEIL_DIV_EXPR
2078 || code == EXACT_DIV_EXPR
2079 || code == ROUND_DIV_EXPR
2080 || code == RSHIFT_EXPR
2081 || code == LSHIFT_EXPR);
2082 gcc_assert ((vr0->type == VR_RANGE
2083 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2084 && vr0->type == vr1->type);
2086 type = vr0->type;
2088 /* Compute the 4 cross operations. */
2089 sop = false;
2090 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2091 if (val[0] == NULL_TREE)
2092 sop = true;
2094 if (vr1->max == vr1->min)
2095 val[1] = NULL_TREE;
2096 else
2098 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2099 if (val[1] == NULL_TREE)
2100 sop = true;
2103 if (vr0->max == vr0->min)
2104 val[2] = NULL_TREE;
2105 else
2107 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2108 if (val[2] == NULL_TREE)
2109 sop = true;
2112 if (vr0->min == vr0->max || vr1->min == vr1->max)
2113 val[3] = NULL_TREE;
2114 else
2116 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2117 if (val[3] == NULL_TREE)
2118 sop = true;
2121 if (sop)
2123 set_value_range_to_varying (vr);
2124 return;
2127 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2128 of VAL[i]. */
2129 min = val[0];
2130 max = val[0];
2131 for (i = 1; i < 4; i++)
2133 if (!is_gimple_min_invariant (min)
2134 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2135 || !is_gimple_min_invariant (max)
2136 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2137 break;
2139 if (val[i])
2141 if (!is_gimple_min_invariant (val[i])
2142 || (TREE_OVERFLOW (val[i])
2143 && !is_overflow_infinity (val[i])))
2145 /* If we found an overflowed value, set MIN and MAX
2146 to it so that we set the resulting range to
2147 VARYING. */
2148 min = max = val[i];
2149 break;
2152 if (compare_values (val[i], min) == -1)
2153 min = val[i];
2155 if (compare_values (val[i], max) == 1)
2156 max = val[i];
2160 /* If either MIN or MAX overflowed, then set the resulting range to
2161 VARYING. But we do accept an overflow infinity
2162 representation. */
2163 if (min == NULL_TREE
2164 || !is_gimple_min_invariant (min)
2165 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2166 || max == NULL_TREE
2167 || !is_gimple_min_invariant (max)
2168 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2170 set_value_range_to_varying (vr);
2171 return;
2174 /* We punt if:
2175 1) [-INF, +INF]
2176 2) [-INF, +-INF(OVF)]
2177 3) [+-INF(OVF), +INF]
2178 4) [+-INF(OVF), +-INF(OVF)]
2179 We learn nothing when we have INF and INF(OVF) on both sides.
2180 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2181 overflow. */
2182 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2183 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2185 set_value_range_to_varying (vr);
2186 return;
2189 cmp = compare_values (min, max);
2190 if (cmp == -2 || cmp == 1)
2192 /* If the new range has its limits swapped around (MIN > MAX),
2193 then the operation caused one of them to wrap around, mark
2194 the new range VARYING. */
2195 set_value_range_to_varying (vr);
2197 else
2198 set_value_range (vr, type, min, max, NULL);
2201 /* Extract range information from a binary operation CODE based on
2202 the ranges of each of its operands *VR0 and *VR1 with resulting
2203 type EXPR_TYPE. The resulting range is stored in *VR. */
2205 static void
2206 extract_range_from_binary_expr_1 (value_range *vr,
2207 enum tree_code code, tree expr_type,
2208 value_range *vr0_, value_range *vr1_)
2210 value_range vr0 = *vr0_, vr1 = *vr1_;
2211 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2212 enum value_range_type type;
2213 tree min = NULL_TREE, max = NULL_TREE;
2214 int cmp;
2216 if (!INTEGRAL_TYPE_P (expr_type)
2217 && !POINTER_TYPE_P (expr_type))
2219 set_value_range_to_varying (vr);
2220 return;
2223 /* Not all binary expressions can be applied to ranges in a
2224 meaningful way. Handle only arithmetic operations. */
2225 if (code != PLUS_EXPR
2226 && code != MINUS_EXPR
2227 && code != POINTER_PLUS_EXPR
2228 && code != MULT_EXPR
2229 && code != TRUNC_DIV_EXPR
2230 && code != FLOOR_DIV_EXPR
2231 && code != CEIL_DIV_EXPR
2232 && code != EXACT_DIV_EXPR
2233 && code != ROUND_DIV_EXPR
2234 && code != TRUNC_MOD_EXPR
2235 && code != RSHIFT_EXPR
2236 && code != LSHIFT_EXPR
2237 && code != MIN_EXPR
2238 && code != MAX_EXPR
2239 && code != BIT_AND_EXPR
2240 && code != BIT_IOR_EXPR
2241 && code != BIT_XOR_EXPR)
2243 set_value_range_to_varying (vr);
2244 return;
2247 /* If both ranges are UNDEFINED, so is the result. */
2248 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2250 set_value_range_to_undefined (vr);
2251 return;
2253 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2254 code. At some point we may want to special-case operations that
2255 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2256 operand. */
2257 else if (vr0.type == VR_UNDEFINED)
2258 set_value_range_to_varying (&vr0);
2259 else if (vr1.type == VR_UNDEFINED)
2260 set_value_range_to_varying (&vr1);
2262 /* We get imprecise results from ranges_from_anti_range when
2263 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
2264 range, but then we also need to hack up vrp_meet. It's just
2265 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
2266 if (code == EXACT_DIV_EXPR
2267 && vr0.type == VR_ANTI_RANGE
2268 && vr0.min == vr0.max
2269 && integer_zerop (vr0.min))
2271 set_value_range_to_nonnull (vr, expr_type);
2272 return;
2275 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2276 and express ~[] op X as ([]' op X) U ([]'' op X). */
2277 if (vr0.type == VR_ANTI_RANGE
2278 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2280 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2281 if (vrtem1.type != VR_UNDEFINED)
2283 value_range vrres = VR_INITIALIZER;
2284 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2285 &vrtem1, vr1_);
2286 vrp_meet (vr, &vrres);
2288 return;
2290 /* Likewise for X op ~[]. */
2291 if (vr1.type == VR_ANTI_RANGE
2292 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2294 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2295 if (vrtem1.type != VR_UNDEFINED)
2297 value_range vrres = VR_INITIALIZER;
2298 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2299 vr0_, &vrtem1);
2300 vrp_meet (vr, &vrres);
2302 return;
2305 /* The type of the resulting value range defaults to VR0.TYPE. */
2306 type = vr0.type;
2308 /* Refuse to operate on VARYING ranges, ranges of different kinds
2309 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2310 because we may be able to derive a useful range even if one of
2311 the operands is VR_VARYING or symbolic range. Similarly for
2312 divisions, MIN/MAX and PLUS/MINUS.
2314 TODO, we may be able to derive anti-ranges in some cases. */
2315 if (code != BIT_AND_EXPR
2316 && code != BIT_IOR_EXPR
2317 && code != TRUNC_DIV_EXPR
2318 && code != FLOOR_DIV_EXPR
2319 && code != CEIL_DIV_EXPR
2320 && code != EXACT_DIV_EXPR
2321 && code != ROUND_DIV_EXPR
2322 && code != TRUNC_MOD_EXPR
2323 && code != MIN_EXPR
2324 && code != MAX_EXPR
2325 && code != PLUS_EXPR
2326 && code != MINUS_EXPR
2327 && code != RSHIFT_EXPR
2328 && (vr0.type == VR_VARYING
2329 || vr1.type == VR_VARYING
2330 || vr0.type != vr1.type
2331 || symbolic_range_p (&vr0)
2332 || symbolic_range_p (&vr1)))
2334 set_value_range_to_varying (vr);
2335 return;
2338 /* Now evaluate the expression to determine the new range. */
2339 if (POINTER_TYPE_P (expr_type))
2341 if (code == MIN_EXPR || code == MAX_EXPR)
2343 /* For MIN/MAX expressions with pointers, we only care about
2344 nullness, if both are non null, then the result is nonnull.
2345 If both are null, then the result is null. Otherwise they
2346 are varying. */
2347 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2348 set_value_range_to_nonnull (vr, expr_type);
2349 else if (range_is_null (&vr0) && range_is_null (&vr1))
2350 set_value_range_to_null (vr, expr_type);
2351 else
2352 set_value_range_to_varying (vr);
2354 else if (code == POINTER_PLUS_EXPR)
2356 /* For pointer types, we are really only interested in asserting
2357 whether the expression evaluates to non-NULL. */
2358 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2359 set_value_range_to_nonnull (vr, expr_type);
2360 else if (range_is_null (&vr0) && range_is_null (&vr1))
2361 set_value_range_to_null (vr, expr_type);
2362 else
2363 set_value_range_to_varying (vr);
2365 else if (code == BIT_AND_EXPR)
2367 /* For pointer types, we are really only interested in asserting
2368 whether the expression evaluates to non-NULL. */
2369 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2370 set_value_range_to_nonnull (vr, expr_type);
2371 else if (range_is_null (&vr0) || range_is_null (&vr1))
2372 set_value_range_to_null (vr, expr_type);
2373 else
2374 set_value_range_to_varying (vr);
2376 else
2377 set_value_range_to_varying (vr);
2379 return;
2382 /* For integer ranges, apply the operation to each end of the
2383 range and see what we end up with. */
2384 if (code == PLUS_EXPR || code == MINUS_EXPR)
2386 const bool minus_p = (code == MINUS_EXPR);
2387 tree min_op0 = vr0.min;
2388 tree min_op1 = minus_p ? vr1.max : vr1.min;
2389 tree max_op0 = vr0.max;
2390 tree max_op1 = minus_p ? vr1.min : vr1.max;
2391 tree sym_min_op0 = NULL_TREE;
2392 tree sym_min_op1 = NULL_TREE;
2393 tree sym_max_op0 = NULL_TREE;
2394 tree sym_max_op1 = NULL_TREE;
2395 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2397 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2398 single-symbolic ranges, try to compute the precise resulting range,
2399 but only if we know that this resulting range will also be constant
2400 or single-symbolic. */
2401 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2402 && (TREE_CODE (min_op0) == INTEGER_CST
2403 || (sym_min_op0
2404 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2405 && (TREE_CODE (min_op1) == INTEGER_CST
2406 || (sym_min_op1
2407 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2408 && (!(sym_min_op0 && sym_min_op1)
2409 || (sym_min_op0 == sym_min_op1
2410 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2411 && (TREE_CODE (max_op0) == INTEGER_CST
2412 || (sym_max_op0
2413 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2414 && (TREE_CODE (max_op1) == INTEGER_CST
2415 || (sym_max_op1
2416 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2417 && (!(sym_max_op0 && sym_max_op1)
2418 || (sym_max_op0 == sym_max_op1
2419 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2421 const signop sgn = TYPE_SIGN (expr_type);
2422 const unsigned int prec = TYPE_PRECISION (expr_type);
2423 wide_int type_min, type_max, wmin, wmax;
2424 int min_ovf = 0;
2425 int max_ovf = 0;
2427 /* Get the lower and upper bounds of the type. */
2428 if (TYPE_OVERFLOW_WRAPS (expr_type))
2430 type_min = wi::min_value (prec, sgn);
2431 type_max = wi::max_value (prec, sgn);
2433 else
2435 type_min = vrp_val_min (expr_type);
2436 type_max = vrp_val_max (expr_type);
2439 /* Combine the lower bounds, if any. */
2440 if (min_op0 && min_op1)
2442 if (minus_p)
2444 wmin = wi::sub (min_op0, min_op1);
2446 /* Check for overflow. */
2447 if (wi::cmp (0, min_op1, sgn)
2448 != wi::cmp (wmin, min_op0, sgn))
2449 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2451 else
2453 wmin = wi::add (min_op0, min_op1);
2455 /* Check for overflow. */
2456 if (wi::cmp (min_op1, 0, sgn)
2457 != wi::cmp (wmin, min_op0, sgn))
2458 min_ovf = wi::cmp (min_op0, wmin, sgn);
2461 else if (min_op0)
2462 wmin = min_op0;
2463 else if (min_op1)
2464 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2465 else
2466 wmin = wi::shwi (0, prec);
2468 /* Combine the upper bounds, if any. */
2469 if (max_op0 && max_op1)
2471 if (minus_p)
2473 wmax = wi::sub (max_op0, max_op1);
2475 /* Check for overflow. */
2476 if (wi::cmp (0, max_op1, sgn)
2477 != wi::cmp (wmax, max_op0, sgn))
2478 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2480 else
2482 wmax = wi::add (max_op0, max_op1);
2484 if (wi::cmp (max_op1, 0, sgn)
2485 != wi::cmp (wmax, max_op0, sgn))
2486 max_ovf = wi::cmp (max_op0, wmax, sgn);
2489 else if (max_op0)
2490 wmax = max_op0;
2491 else if (max_op1)
2492 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2493 else
2494 wmax = wi::shwi (0, prec);
2496 /* Check for type overflow. */
2497 if (min_ovf == 0)
2499 if (wi::cmp (wmin, type_min, sgn) == -1)
2500 min_ovf = -1;
2501 else if (wi::cmp (wmin, type_max, sgn) == 1)
2502 min_ovf = 1;
2504 if (max_ovf == 0)
2506 if (wi::cmp (wmax, type_min, sgn) == -1)
2507 max_ovf = -1;
2508 else if (wi::cmp (wmax, type_max, sgn) == 1)
2509 max_ovf = 1;
2512 /* If we have overflow for the constant part and the resulting
2513 range will be symbolic, drop to VR_VARYING. */
2514 if ((min_ovf && sym_min_op0 != sym_min_op1)
2515 || (max_ovf && sym_max_op0 != sym_max_op1))
2517 set_value_range_to_varying (vr);
2518 return;
2521 if (TYPE_OVERFLOW_WRAPS (expr_type))
2523 /* If overflow wraps, truncate the values and adjust the
2524 range kind and bounds appropriately. */
2525 wide_int tmin = wide_int::from (wmin, prec, sgn);
2526 wide_int tmax = wide_int::from (wmax, prec, sgn);
2527 if (min_ovf == max_ovf)
2529 /* No overflow or both overflow or underflow. The
2530 range kind stays VR_RANGE. */
2531 min = wide_int_to_tree (expr_type, tmin);
2532 max = wide_int_to_tree (expr_type, tmax);
2534 else if ((min_ovf == -1 && max_ovf == 0)
2535 || (max_ovf == 1 && min_ovf == 0))
2537 /* Min underflow or max overflow. The range kind
2538 changes to VR_ANTI_RANGE. */
2539 bool covers = false;
2540 wide_int tem = tmin;
2541 type = VR_ANTI_RANGE;
2542 tmin = tmax + 1;
2543 if (wi::cmp (tmin, tmax, sgn) < 0)
2544 covers = true;
2545 tmax = tem - 1;
2546 if (wi::cmp (tmax, tem, sgn) > 0)
2547 covers = true;
2548 /* If the anti-range would cover nothing, drop to varying.
2549 Likewise if the anti-range bounds are outside of the
2550 types values. */
2551 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2553 set_value_range_to_varying (vr);
2554 return;
2556 min = wide_int_to_tree (expr_type, tmin);
2557 max = wide_int_to_tree (expr_type, tmax);
2559 else
2561 /* Other underflow and/or overflow, drop to VR_VARYING. */
2562 set_value_range_to_varying (vr);
2563 return;
2566 else
2568 /* If overflow does not wrap, saturate to the types min/max
2569 value. */
2570 if (min_ovf == -1)
2572 if (needs_overflow_infinity (expr_type)
2573 && supports_overflow_infinity (expr_type))
2574 min = negative_overflow_infinity (expr_type);
2575 else
2576 min = wide_int_to_tree (expr_type, type_min);
2578 else if (min_ovf == 1)
2580 if (needs_overflow_infinity (expr_type)
2581 && supports_overflow_infinity (expr_type))
2582 min = positive_overflow_infinity (expr_type);
2583 else
2584 min = wide_int_to_tree (expr_type, type_max);
2586 else
2587 min = wide_int_to_tree (expr_type, wmin);
2589 if (max_ovf == -1)
2591 if (needs_overflow_infinity (expr_type)
2592 && supports_overflow_infinity (expr_type))
2593 max = negative_overflow_infinity (expr_type);
2594 else
2595 max = wide_int_to_tree (expr_type, type_min);
2597 else if (max_ovf == 1)
2599 if (needs_overflow_infinity (expr_type)
2600 && supports_overflow_infinity (expr_type))
2601 max = positive_overflow_infinity (expr_type);
2602 else
2603 max = wide_int_to_tree (expr_type, type_max);
2605 else
2606 max = wide_int_to_tree (expr_type, wmax);
2609 if (needs_overflow_infinity (expr_type)
2610 && supports_overflow_infinity (expr_type))
2612 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2613 || (min_op1
2614 && (minus_p
2615 ? is_positive_overflow_infinity (min_op1)
2616 : is_negative_overflow_infinity (min_op1))))
2617 min = negative_overflow_infinity (expr_type);
2618 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2619 || (max_op1
2620 && (minus_p
2621 ? is_negative_overflow_infinity (max_op1)
2622 : is_positive_overflow_infinity (max_op1))))
2623 max = positive_overflow_infinity (expr_type);
2626 /* If the result lower bound is constant, we're done;
2627 otherwise, build the symbolic lower bound. */
2628 if (sym_min_op0 == sym_min_op1)
2630 else if (sym_min_op0)
2631 min = build_symbolic_expr (expr_type, sym_min_op0,
2632 neg_min_op0, min);
2633 else if (sym_min_op1)
2635 /* We may not negate if that might introduce
2636 undefined overflow. */
2637 if (! minus_p
2638 || neg_min_op1
2639 || TYPE_OVERFLOW_WRAPS (expr_type))
2640 min = build_symbolic_expr (expr_type, sym_min_op1,
2641 neg_min_op1 ^ minus_p, min);
2642 else
2643 min = NULL_TREE;
2646 /* Likewise for the upper bound. */
2647 if (sym_max_op0 == sym_max_op1)
2649 else if (sym_max_op0)
2650 max = build_symbolic_expr (expr_type, sym_max_op0,
2651 neg_max_op0, max);
2652 else if (sym_max_op1)
2654 /* We may not negate if that might introduce
2655 undefined overflow. */
2656 if (! minus_p
2657 || neg_max_op1
2658 || TYPE_OVERFLOW_WRAPS (expr_type))
2659 max = build_symbolic_expr (expr_type, sym_max_op1,
2660 neg_max_op1 ^ minus_p, max);
2661 else
2662 max = NULL_TREE;
2665 else
2667 /* For other cases, for example if we have a PLUS_EXPR with two
2668 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2669 to compute a precise range for such a case.
2670 ??? General even mixed range kind operations can be expressed
2671 by for example transforming ~[3, 5] + [1, 2] to range-only
2672 operations and a union primitive:
2673 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2674 [-INF+1, 4] U [6, +INF(OVF)]
2675 though usually the union is not exactly representable with
2676 a single range or anti-range as the above is
2677 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2678 but one could use a scheme similar to equivalences for this. */
2679 set_value_range_to_varying (vr);
2680 return;
2683 else if (code == MIN_EXPR
2684 || code == MAX_EXPR)
2686 if (vr0.type == VR_RANGE
2687 && !symbolic_range_p (&vr0))
2689 type = VR_RANGE;
2690 if (vr1.type == VR_RANGE
2691 && !symbolic_range_p (&vr1))
2693 /* For operations that make the resulting range directly
2694 proportional to the original ranges, apply the operation to
2695 the same end of each range. */
2696 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2697 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2699 else if (code == MIN_EXPR)
2701 min = vrp_val_min (expr_type);
2702 max = vr0.max;
2704 else if (code == MAX_EXPR)
2706 min = vr0.min;
2707 max = vrp_val_max (expr_type);
2710 else if (vr1.type == VR_RANGE
2711 && !symbolic_range_p (&vr1))
2713 type = VR_RANGE;
2714 if (code == MIN_EXPR)
2716 min = vrp_val_min (expr_type);
2717 max = vr1.max;
2719 else if (code == MAX_EXPR)
2721 min = vr1.min;
2722 max = vrp_val_max (expr_type);
2725 else
2727 set_value_range_to_varying (vr);
2728 return;
2731 else if (code == MULT_EXPR)
2733 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2734 drop to varying. This test requires 2*prec bits if both
2735 operands are signed and 2*prec + 2 bits if either is not. */
2737 signop sign = TYPE_SIGN (expr_type);
2738 unsigned int prec = TYPE_PRECISION (expr_type);
2740 if (range_int_cst_p (&vr0)
2741 && range_int_cst_p (&vr1)
2742 && TYPE_OVERFLOW_WRAPS (expr_type))
2744 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2745 typedef generic_wide_int
2746 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2747 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2748 vrp_int size = sizem1 + 1;
2750 /* Extend the values using the sign of the result to PREC2.
2751 From here on out, everthing is just signed math no matter
2752 what the input types were. */
2753 vrp_int min0 = vrp_int_cst (vr0.min);
2754 vrp_int max0 = vrp_int_cst (vr0.max);
2755 vrp_int min1 = vrp_int_cst (vr1.min);
2756 vrp_int max1 = vrp_int_cst (vr1.max);
2757 /* Canonicalize the intervals. */
2758 if (sign == UNSIGNED)
2760 if (wi::ltu_p (size, min0 + max0))
2762 min0 -= size;
2763 max0 -= size;
2766 if (wi::ltu_p (size, min1 + max1))
2768 min1 -= size;
2769 max1 -= size;
2773 vrp_int prod0 = min0 * min1;
2774 vrp_int prod1 = min0 * max1;
2775 vrp_int prod2 = max0 * min1;
2776 vrp_int prod3 = max0 * max1;
2778 /* Sort the 4 products so that min is in prod0 and max is in
2779 prod3. */
2780 /* min0min1 > max0max1 */
2781 if (prod0 > prod3)
2782 std::swap (prod0, prod3);
2784 /* min0max1 > max0min1 */
2785 if (prod1 > prod2)
2786 std::swap (prod1, prod2);
2788 if (prod0 > prod1)
2789 std::swap (prod0, prod1);
2791 if (prod2 > prod3)
2792 std::swap (prod2, prod3);
2794 /* diff = max - min. */
2795 prod2 = prod3 - prod0;
2796 if (wi::geu_p (prod2, sizem1))
2798 /* the range covers all values. */
2799 set_value_range_to_varying (vr);
2800 return;
2803 /* The following should handle the wrapping and selecting
2804 VR_ANTI_RANGE for us. */
2805 min = wide_int_to_tree (expr_type, prod0);
2806 max = wide_int_to_tree (expr_type, prod3);
2807 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2808 return;
2811 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2812 drop to VR_VARYING. It would take more effort to compute a
2813 precise range for such a case. For example, if we have
2814 op0 == 65536 and op1 == 65536 with their ranges both being
2815 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2816 we cannot claim that the product is in ~[0,0]. Note that we
2817 are guaranteed to have vr0.type == vr1.type at this
2818 point. */
2819 if (vr0.type == VR_ANTI_RANGE
2820 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2822 set_value_range_to_varying (vr);
2823 return;
2826 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2827 return;
2829 else if (code == RSHIFT_EXPR
2830 || code == LSHIFT_EXPR)
2832 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2833 then drop to VR_VARYING. Outside of this range we get undefined
2834 behavior from the shift operation. We cannot even trust
2835 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2836 shifts, and the operation at the tree level may be widened. */
2837 if (range_int_cst_p (&vr1)
2838 && compare_tree_int (vr1.min, 0) >= 0
2839 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2841 if (code == RSHIFT_EXPR)
2843 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2844 useful ranges just from the shift count. E.g.
2845 x >> 63 for signed 64-bit x is always [-1, 0]. */
2846 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2848 vr0.type = type = VR_RANGE;
2849 vr0.min = vrp_val_min (expr_type);
2850 vr0.max = vrp_val_max (expr_type);
2852 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2853 return;
2855 /* We can map lshifts by constants to MULT_EXPR handling. */
2856 else if (code == LSHIFT_EXPR
2857 && range_int_cst_singleton_p (&vr1))
2859 bool saved_flag_wrapv;
2860 value_range vr1p = VR_INITIALIZER;
2861 vr1p.type = VR_RANGE;
2862 vr1p.min = (wide_int_to_tree
2863 (expr_type,
2864 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2865 TYPE_PRECISION (expr_type))));
2866 vr1p.max = vr1p.min;
2867 /* We have to use a wrapping multiply though as signed overflow
2868 on lshifts is implementation defined in C89. */
2869 saved_flag_wrapv = flag_wrapv;
2870 flag_wrapv = 1;
2871 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2872 &vr0, &vr1p);
2873 flag_wrapv = saved_flag_wrapv;
2874 return;
2876 else if (code == LSHIFT_EXPR
2877 && range_int_cst_p (&vr0))
2879 int prec = TYPE_PRECISION (expr_type);
2880 int overflow_pos = prec;
2881 int bound_shift;
2882 wide_int low_bound, high_bound;
2883 bool uns = TYPE_UNSIGNED (expr_type);
2884 bool in_bounds = false;
2886 if (!uns)
2887 overflow_pos -= 1;
2889 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2890 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2891 overflow. However, for that to happen, vr1.max needs to be
2892 zero, which means vr1 is a singleton range of zero, which
2893 means it should be handled by the previous LSHIFT_EXPR
2894 if-clause. */
2895 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2896 wide_int complement = ~(bound - 1);
2898 if (uns)
2900 low_bound = bound;
2901 high_bound = complement;
2902 if (wi::ltu_p (vr0.max, low_bound))
2904 /* [5, 6] << [1, 2] == [10, 24]. */
2905 /* We're shifting out only zeroes, the value increases
2906 monotonically. */
2907 in_bounds = true;
2909 else if (wi::ltu_p (high_bound, vr0.min))
2911 /* [0xffffff00, 0xffffffff] << [1, 2]
2912 == [0xfffffc00, 0xfffffffe]. */
2913 /* We're shifting out only ones, the value decreases
2914 monotonically. */
2915 in_bounds = true;
2918 else
2920 /* [-1, 1] << [1, 2] == [-4, 4]. */
2921 low_bound = complement;
2922 high_bound = bound;
2923 if (wi::lts_p (vr0.max, high_bound)
2924 && wi::lts_p (low_bound, vr0.min))
2926 /* For non-negative numbers, we're shifting out only
2927 zeroes, the value increases monotonically.
2928 For negative numbers, we're shifting out only ones, the
2929 value decreases monotomically. */
2930 in_bounds = true;
2934 if (in_bounds)
2936 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2937 return;
2941 set_value_range_to_varying (vr);
2942 return;
2944 else if (code == TRUNC_DIV_EXPR
2945 || code == FLOOR_DIV_EXPR
2946 || code == CEIL_DIV_EXPR
2947 || code == EXACT_DIV_EXPR
2948 || code == ROUND_DIV_EXPR)
2950 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2952 /* For division, if op1 has VR_RANGE but op0 does not, something
2953 can be deduced just from that range. Say [min, max] / [4, max]
2954 gives [min / 4, max / 4] range. */
2955 if (vr1.type == VR_RANGE
2956 && !symbolic_range_p (&vr1)
2957 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2959 vr0.type = type = VR_RANGE;
2960 vr0.min = vrp_val_min (expr_type);
2961 vr0.max = vrp_val_max (expr_type);
2963 else
2965 set_value_range_to_varying (vr);
2966 return;
2970 /* For divisions, if flag_non_call_exceptions is true, we must
2971 not eliminate a division by zero. */
2972 if (cfun->can_throw_non_call_exceptions
2973 && (vr1.type != VR_RANGE
2974 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2976 set_value_range_to_varying (vr);
2977 return;
2980 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2981 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2982 include 0. */
2983 if (vr0.type == VR_RANGE
2984 && (vr1.type != VR_RANGE
2985 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2987 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2988 int cmp;
2990 min = NULL_TREE;
2991 max = NULL_TREE;
2992 if (TYPE_UNSIGNED (expr_type)
2993 || value_range_nonnegative_p (&vr1))
2995 /* For unsigned division or when divisor is known
2996 to be non-negative, the range has to cover
2997 all numbers from 0 to max for positive max
2998 and all numbers from min to 0 for negative min. */
2999 cmp = compare_values (vr0.max, zero);
3000 if (cmp == -1)
3002 /* When vr0.max < 0, vr1.min != 0 and value
3003 ranges for dividend and divisor are available. */
3004 if (vr1.type == VR_RANGE
3005 && !symbolic_range_p (&vr0)
3006 && !symbolic_range_p (&vr1)
3007 && compare_values (vr1.min, zero) != 0)
3008 max = int_const_binop (code, vr0.max, vr1.min);
3009 else
3010 max = zero;
3012 else if (cmp == 0 || cmp == 1)
3013 max = vr0.max;
3014 else
3015 type = VR_VARYING;
3016 cmp = compare_values (vr0.min, zero);
3017 if (cmp == 1)
3019 /* For unsigned division when value ranges for dividend
3020 and divisor are available. */
3021 if (vr1.type == VR_RANGE
3022 && !symbolic_range_p (&vr0)
3023 && !symbolic_range_p (&vr1)
3024 && compare_values (vr1.max, zero) != 0)
3025 min = int_const_binop (code, vr0.min, vr1.max);
3026 else
3027 min = zero;
3029 else if (cmp == 0 || cmp == -1)
3030 min = vr0.min;
3031 else
3032 type = VR_VARYING;
3034 else
3036 /* Otherwise the range is -max .. max or min .. -min
3037 depending on which bound is bigger in absolute value,
3038 as the division can change the sign. */
3039 abs_extent_range (vr, vr0.min, vr0.max);
3040 return;
3042 if (type == VR_VARYING)
3044 set_value_range_to_varying (vr);
3045 return;
3048 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
3050 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3051 return;
3054 else if (code == TRUNC_MOD_EXPR)
3056 if (range_is_null (&vr1))
3058 set_value_range_to_undefined (vr);
3059 return;
3061 /* ABS (A % B) < ABS (B) and either
3062 0 <= A % B <= A or A <= A % B <= 0. */
3063 type = VR_RANGE;
3064 signop sgn = TYPE_SIGN (expr_type);
3065 unsigned int prec = TYPE_PRECISION (expr_type);
3066 wide_int wmin, wmax, tmp;
3067 wide_int zero = wi::zero (prec);
3068 wide_int one = wi::one (prec);
3069 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3071 wmax = wi::sub (vr1.max, one);
3072 if (sgn == SIGNED)
3074 tmp = wi::sub (wi::minus_one (prec), vr1.min);
3075 wmax = wi::smax (wmax, tmp);
3078 else
3080 wmax = wi::max_value (prec, sgn);
3081 /* X % INT_MIN may be INT_MAX. */
3082 if (sgn == UNSIGNED)
3083 wmax = wmax - one;
3086 if (sgn == UNSIGNED)
3087 wmin = zero;
3088 else
3090 wmin = -wmax;
3091 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3093 tmp = vr0.min;
3094 if (wi::gts_p (tmp, zero))
3095 tmp = zero;
3096 wmin = wi::smax (wmin, tmp);
3100 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3102 tmp = vr0.max;
3103 if (sgn == SIGNED && wi::neg_p (tmp))
3104 tmp = zero;
3105 wmax = wi::min (wmax, tmp, sgn);
3108 min = wide_int_to_tree (expr_type, wmin);
3109 max = wide_int_to_tree (expr_type, wmax);
3111 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3113 bool int_cst_range0, int_cst_range1;
3114 wide_int may_be_nonzero0, may_be_nonzero1;
3115 wide_int must_be_nonzero0, must_be_nonzero1;
3117 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3118 &may_be_nonzero0,
3119 &must_be_nonzero0);
3120 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3121 &may_be_nonzero1,
3122 &must_be_nonzero1);
3124 type = VR_RANGE;
3125 if (code == BIT_AND_EXPR)
3127 min = wide_int_to_tree (expr_type,
3128 must_be_nonzero0 & must_be_nonzero1);
3129 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3130 /* If both input ranges contain only negative values we can
3131 truncate the result range maximum to the minimum of the
3132 input range maxima. */
3133 if (int_cst_range0 && int_cst_range1
3134 && tree_int_cst_sgn (vr0.max) < 0
3135 && tree_int_cst_sgn (vr1.max) < 0)
3137 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3138 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3140 /* If either input range contains only non-negative values
3141 we can truncate the result range maximum to the respective
3142 maximum of the input range. */
3143 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3144 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3145 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3146 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3147 max = wide_int_to_tree (expr_type, wmax);
3148 cmp = compare_values (min, max);
3149 /* PR68217: In case of signed & sign-bit-CST should
3150 result in [-INF, 0] instead of [-INF, INF]. */
3151 if (cmp == -2 || cmp == 1)
3153 wide_int sign_bit
3154 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
3155 TYPE_PRECISION (expr_type));
3156 if (!TYPE_UNSIGNED (expr_type)
3157 && ((value_range_constant_singleton (&vr0)
3158 && !wi::cmps (vr0.min, sign_bit))
3159 || (value_range_constant_singleton (&vr1)
3160 && !wi::cmps (vr1.min, sign_bit))))
3162 min = TYPE_MIN_VALUE (expr_type);
3163 max = build_int_cst (expr_type, 0);
3167 else if (code == BIT_IOR_EXPR)
3169 max = wide_int_to_tree (expr_type,
3170 may_be_nonzero0 | may_be_nonzero1);
3171 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3172 /* If the input ranges contain only positive values we can
3173 truncate the minimum of the result range to the maximum
3174 of the input range minima. */
3175 if (int_cst_range0 && int_cst_range1
3176 && tree_int_cst_sgn (vr0.min) >= 0
3177 && tree_int_cst_sgn (vr1.min) >= 0)
3179 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3180 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3182 /* If either input range contains only negative values
3183 we can truncate the minimum of the result range to the
3184 respective minimum range. */
3185 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3186 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3187 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3188 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3189 min = wide_int_to_tree (expr_type, wmin);
3191 else if (code == BIT_XOR_EXPR)
3193 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3194 | ~(may_be_nonzero0 | may_be_nonzero1));
3195 wide_int result_one_bits
3196 = (must_be_nonzero0.and_not (may_be_nonzero1)
3197 | must_be_nonzero1.and_not (may_be_nonzero0));
3198 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3199 min = wide_int_to_tree (expr_type, result_one_bits);
3200 /* If the range has all positive or all negative values the
3201 result is better than VARYING. */
3202 if (tree_int_cst_sgn (min) < 0
3203 || tree_int_cst_sgn (max) >= 0)
3205 else
3206 max = min = NULL_TREE;
3209 else
3210 gcc_unreachable ();
3212 /* If either MIN or MAX overflowed, then set the resulting range to
3213 VARYING. But we do accept an overflow infinity representation. */
3214 if (min == NULL_TREE
3215 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3216 || max == NULL_TREE
3217 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3219 set_value_range_to_varying (vr);
3220 return;
3223 /* We punt if:
3224 1) [-INF, +INF]
3225 2) [-INF, +-INF(OVF)]
3226 3) [+-INF(OVF), +INF]
3227 4) [+-INF(OVF), +-INF(OVF)]
3228 We learn nothing when we have INF and INF(OVF) on both sides.
3229 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3230 overflow. */
3231 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3232 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3234 set_value_range_to_varying (vr);
3235 return;
3238 cmp = compare_values (min, max);
3239 if (cmp == -2 || cmp == 1)
3241 /* If the new range has its limits swapped around (MIN > MAX),
3242 then the operation caused one of them to wrap around, mark
3243 the new range VARYING. */
3244 set_value_range_to_varying (vr);
3246 else
3247 set_value_range (vr, type, min, max, NULL);
3250 /* Extract range information from a binary expression OP0 CODE OP1 based on
3251 the ranges of each of its operands with resulting type EXPR_TYPE.
3252 The resulting range is stored in *VR. */
3254 static void
3255 extract_range_from_binary_expr (value_range *vr,
3256 enum tree_code code,
3257 tree expr_type, tree op0, tree op1)
3259 value_range vr0 = VR_INITIALIZER;
3260 value_range vr1 = VR_INITIALIZER;
3262 /* Get value ranges for each operand. For constant operands, create
3263 a new value range with the operand to simplify processing. */
3264 if (TREE_CODE (op0) == SSA_NAME)
3265 vr0 = *(get_value_range (op0));
3266 else if (is_gimple_min_invariant (op0))
3267 set_value_range_to_value (&vr0, op0, NULL);
3268 else
3269 set_value_range_to_varying (&vr0);
3271 if (TREE_CODE (op1) == SSA_NAME)
3272 vr1 = *(get_value_range (op1));
3273 else if (is_gimple_min_invariant (op1))
3274 set_value_range_to_value (&vr1, op1, NULL);
3275 else
3276 set_value_range_to_varying (&vr1);
3278 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3280 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3281 and based on the other operand, for example if it was deduced from a
3282 symbolic comparison. When a bound of the range of the first operand
3283 is invariant, we set the corresponding bound of the new range to INF
3284 in order to avoid recursing on the range of the second operand. */
3285 if (vr->type == VR_VARYING
3286 && (code == PLUS_EXPR || code == MINUS_EXPR)
3287 && TREE_CODE (op1) == SSA_NAME
3288 && vr0.type == VR_RANGE
3289 && symbolic_range_based_on_p (&vr0, op1))
3291 const bool minus_p = (code == MINUS_EXPR);
3292 value_range n_vr1 = VR_INITIALIZER;
3294 /* Try with VR0 and [-INF, OP1]. */
3295 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3296 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3298 /* Try with VR0 and [OP1, +INF]. */
3299 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3300 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3302 /* Try with VR0 and [OP1, OP1]. */
3303 else
3304 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3306 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3309 if (vr->type == VR_VARYING
3310 && (code == PLUS_EXPR || code == MINUS_EXPR)
3311 && TREE_CODE (op0) == SSA_NAME
3312 && vr1.type == VR_RANGE
3313 && symbolic_range_based_on_p (&vr1, op0))
3315 const bool minus_p = (code == MINUS_EXPR);
3316 value_range n_vr0 = VR_INITIALIZER;
3318 /* Try with [-INF, OP0] and VR1. */
3319 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3320 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3322 /* Try with [OP0, +INF] and VR1. */
3323 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3324 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3326 /* Try with [OP0, OP0] and VR1. */
3327 else
3328 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3330 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3333 /* If we didn't derive a range for MINUS_EXPR, and
3334 op1's range is ~[op0,op0] or vice-versa, then we
3335 can derive a non-null range. This happens often for
3336 pointer subtraction. */
3337 if (vr->type == VR_VARYING
3338 && code == MINUS_EXPR
3339 && TREE_CODE (op0) == SSA_NAME
3340 && ((vr0.type == VR_ANTI_RANGE
3341 && vr0.min == op1
3342 && vr0.min == vr0.max)
3343 || (vr1.type == VR_ANTI_RANGE
3344 && vr1.min == op0
3345 && vr1.min == vr1.max)))
3346 set_value_range_to_nonnull (vr, TREE_TYPE (op0));
3349 /* Extract range information from a unary operation CODE based on
3350 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3351 The resulting range is stored in *VR. */
3353 void
3354 extract_range_from_unary_expr (value_range *vr,
3355 enum tree_code code, tree type,
3356 value_range *vr0_, tree op0_type)
3358 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3360 /* VRP only operates on integral and pointer types. */
3361 if (!(INTEGRAL_TYPE_P (op0_type)
3362 || POINTER_TYPE_P (op0_type))
3363 || !(INTEGRAL_TYPE_P (type)
3364 || POINTER_TYPE_P (type)))
3366 set_value_range_to_varying (vr);
3367 return;
3370 /* If VR0 is UNDEFINED, so is the result. */
3371 if (vr0.type == VR_UNDEFINED)
3373 set_value_range_to_undefined (vr);
3374 return;
3377 /* Handle operations that we express in terms of others. */
3378 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3380 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3381 copy_value_range (vr, &vr0);
3382 return;
3384 else if (code == NEGATE_EXPR)
3386 /* -X is simply 0 - X, so re-use existing code that also handles
3387 anti-ranges fine. */
3388 value_range zero = VR_INITIALIZER;
3389 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3390 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3391 return;
3393 else if (code == BIT_NOT_EXPR)
3395 /* ~X is simply -1 - X, so re-use existing code that also handles
3396 anti-ranges fine. */
3397 value_range minusone = VR_INITIALIZER;
3398 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3399 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3400 type, &minusone, &vr0);
3401 return;
3404 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3405 and express op ~[] as (op []') U (op []''). */
3406 if (vr0.type == VR_ANTI_RANGE
3407 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3409 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
3410 if (vrtem1.type != VR_UNDEFINED)
3412 value_range vrres = VR_INITIALIZER;
3413 extract_range_from_unary_expr (&vrres, code, type,
3414 &vrtem1, op0_type);
3415 vrp_meet (vr, &vrres);
3417 return;
3420 if (CONVERT_EXPR_CODE_P (code))
3422 tree inner_type = op0_type;
3423 tree outer_type = type;
3425 /* If the expression evaluates to a pointer, we are only interested in
3426 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3427 if (POINTER_TYPE_P (type))
3429 if (range_is_nonnull (&vr0))
3430 set_value_range_to_nonnull (vr, type);
3431 else if (range_is_null (&vr0))
3432 set_value_range_to_null (vr, type);
3433 else
3434 set_value_range_to_varying (vr);
3435 return;
3438 /* If VR0 is varying and we increase the type precision, assume
3439 a full range for the following transformation. */
3440 if (vr0.type == VR_VARYING
3441 && INTEGRAL_TYPE_P (inner_type)
3442 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3444 vr0.type = VR_RANGE;
3445 vr0.min = TYPE_MIN_VALUE (inner_type);
3446 vr0.max = TYPE_MAX_VALUE (inner_type);
3449 /* If VR0 is a constant range or anti-range and the conversion is
3450 not truncating we can convert the min and max values and
3451 canonicalize the resulting range. Otherwise we can do the
3452 conversion if the size of the range is less than what the
3453 precision of the target type can represent and the range is
3454 not an anti-range. */
3455 if ((vr0.type == VR_RANGE
3456 || vr0.type == VR_ANTI_RANGE)
3457 && TREE_CODE (vr0.min) == INTEGER_CST
3458 && TREE_CODE (vr0.max) == INTEGER_CST
3459 && (!is_overflow_infinity (vr0.min)
3460 || (vr0.type == VR_RANGE
3461 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3462 && needs_overflow_infinity (outer_type)
3463 && supports_overflow_infinity (outer_type)))
3464 && (!is_overflow_infinity (vr0.max)
3465 || (vr0.type == VR_RANGE
3466 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3467 && needs_overflow_infinity (outer_type)
3468 && supports_overflow_infinity (outer_type)))
3469 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3470 || (vr0.type == VR_RANGE
3471 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3472 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3473 size_int (TYPE_PRECISION (outer_type)))))))
3475 tree new_min, new_max;
3476 if (is_overflow_infinity (vr0.min))
3477 new_min = negative_overflow_infinity (outer_type);
3478 else
3479 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3480 0, false);
3481 if (is_overflow_infinity (vr0.max))
3482 new_max = positive_overflow_infinity (outer_type);
3483 else
3484 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3485 0, false);
3486 set_and_canonicalize_value_range (vr, vr0.type,
3487 new_min, new_max, NULL);
3488 return;
3491 set_value_range_to_varying (vr);
3492 return;
3494 else if (code == ABS_EXPR)
3496 tree min, max;
3497 int cmp;
3499 /* Pass through vr0 in the easy cases. */
3500 if (TYPE_UNSIGNED (type)
3501 || value_range_nonnegative_p (&vr0))
3503 copy_value_range (vr, &vr0);
3504 return;
3507 /* For the remaining varying or symbolic ranges we can't do anything
3508 useful. */
3509 if (vr0.type == VR_VARYING
3510 || symbolic_range_p (&vr0))
3512 set_value_range_to_varying (vr);
3513 return;
3516 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3517 useful range. */
3518 if (!TYPE_OVERFLOW_UNDEFINED (type)
3519 && ((vr0.type == VR_RANGE
3520 && vrp_val_is_min (vr0.min))
3521 || (vr0.type == VR_ANTI_RANGE
3522 && !vrp_val_is_min (vr0.min))))
3524 set_value_range_to_varying (vr);
3525 return;
3528 /* ABS_EXPR may flip the range around, if the original range
3529 included negative values. */
3530 if (is_overflow_infinity (vr0.min))
3531 min = positive_overflow_infinity (type);
3532 else if (!vrp_val_is_min (vr0.min))
3533 min = fold_unary_to_constant (code, type, vr0.min);
3534 else if (!needs_overflow_infinity (type))
3535 min = TYPE_MAX_VALUE (type);
3536 else if (supports_overflow_infinity (type))
3537 min = positive_overflow_infinity (type);
3538 else
3540 set_value_range_to_varying (vr);
3541 return;
3544 if (is_overflow_infinity (vr0.max))
3545 max = positive_overflow_infinity (type);
3546 else if (!vrp_val_is_min (vr0.max))
3547 max = fold_unary_to_constant (code, type, vr0.max);
3548 else if (!needs_overflow_infinity (type))
3549 max = TYPE_MAX_VALUE (type);
3550 else if (supports_overflow_infinity (type)
3551 /* We shouldn't generate [+INF, +INF] as set_value_range
3552 doesn't like this and ICEs. */
3553 && !is_positive_overflow_infinity (min))
3554 max = positive_overflow_infinity (type);
3555 else
3557 set_value_range_to_varying (vr);
3558 return;
3561 cmp = compare_values (min, max);
3563 /* If a VR_ANTI_RANGEs contains zero, then we have
3564 ~[-INF, min(MIN, MAX)]. */
3565 if (vr0.type == VR_ANTI_RANGE)
3567 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3569 /* Take the lower of the two values. */
3570 if (cmp != 1)
3571 max = min;
3573 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3574 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3575 flag_wrapv is set and the original anti-range doesn't include
3576 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3577 if (TYPE_OVERFLOW_WRAPS (type))
3579 tree type_min_value = TYPE_MIN_VALUE (type);
3581 min = (vr0.min != type_min_value
3582 ? int_const_binop (PLUS_EXPR, type_min_value,
3583 build_int_cst (TREE_TYPE (type_min_value), 1))
3584 : type_min_value);
3586 else
3588 if (overflow_infinity_range_p (&vr0))
3589 min = negative_overflow_infinity (type);
3590 else
3591 min = TYPE_MIN_VALUE (type);
3594 else
3596 /* All else has failed, so create the range [0, INF], even for
3597 flag_wrapv since TYPE_MIN_VALUE is in the original
3598 anti-range. */
3599 vr0.type = VR_RANGE;
3600 min = build_int_cst (type, 0);
3601 if (needs_overflow_infinity (type))
3603 if (supports_overflow_infinity (type))
3604 max = positive_overflow_infinity (type);
3605 else
3607 set_value_range_to_varying (vr);
3608 return;
3611 else
3612 max = TYPE_MAX_VALUE (type);
3616 /* If the range contains zero then we know that the minimum value in the
3617 range will be zero. */
3618 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3620 if (cmp == 1)
3621 max = min;
3622 min = build_int_cst (type, 0);
3624 else
3626 /* If the range was reversed, swap MIN and MAX. */
3627 if (cmp == 1)
3628 std::swap (min, max);
3631 cmp = compare_values (min, max);
3632 if (cmp == -2 || cmp == 1)
3634 /* If the new range has its limits swapped around (MIN > MAX),
3635 then the operation caused one of them to wrap around, mark
3636 the new range VARYING. */
3637 set_value_range_to_varying (vr);
3639 else
3640 set_value_range (vr, vr0.type, min, max, NULL);
3641 return;
3644 /* For unhandled operations fall back to varying. */
3645 set_value_range_to_varying (vr);
3646 return;
3650 /* Extract range information from a unary expression CODE OP0 based on
3651 the range of its operand with resulting type TYPE.
3652 The resulting range is stored in *VR. */
3654 static void
3655 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3656 tree type, tree op0)
3658 value_range vr0 = VR_INITIALIZER;
3660 /* Get value ranges for the operand. For constant operands, create
3661 a new value range with the operand to simplify processing. */
3662 if (TREE_CODE (op0) == SSA_NAME)
3663 vr0 = *(get_value_range (op0));
3664 else if (is_gimple_min_invariant (op0))
3665 set_value_range_to_value (&vr0, op0, NULL);
3666 else
3667 set_value_range_to_varying (&vr0);
3669 extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
3673 /* Extract range information from a conditional expression STMT based on
3674 the ranges of each of its operands and the expression code. */
3676 static void
3677 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3679 tree op0, op1;
3680 value_range vr0 = VR_INITIALIZER;
3681 value_range vr1 = VR_INITIALIZER;
3683 /* Get value ranges for each operand. For constant operands, create
3684 a new value range with the operand to simplify processing. */
3685 op0 = gimple_assign_rhs2 (stmt);
3686 if (TREE_CODE (op0) == SSA_NAME)
3687 vr0 = *(get_value_range (op0));
3688 else if (is_gimple_min_invariant (op0))
3689 set_value_range_to_value (&vr0, op0, NULL);
3690 else
3691 set_value_range_to_varying (&vr0);
3693 op1 = gimple_assign_rhs3 (stmt);
3694 if (TREE_CODE (op1) == SSA_NAME)
3695 vr1 = *(get_value_range (op1));
3696 else if (is_gimple_min_invariant (op1))
3697 set_value_range_to_value (&vr1, op1, NULL);
3698 else
3699 set_value_range_to_varying (&vr1);
3701 /* The resulting value range is the union of the operand ranges */
3702 copy_value_range (vr, &vr0);
3703 vrp_meet (vr, &vr1);
3707 /* Extract range information from a comparison expression EXPR based
3708 on the range of its operand and the expression code. */
3710 static void
3711 extract_range_from_comparison (value_range *vr, enum tree_code code,
3712 tree type, tree op0, tree op1)
3714 bool sop = false;
3715 tree val;
3717 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3718 NULL);
3720 /* A disadvantage of using a special infinity as an overflow
3721 representation is that we lose the ability to record overflow
3722 when we don't have an infinity. So we have to ignore a result
3723 which relies on overflow. */
3725 if (val && !is_overflow_infinity (val) && !sop)
3727 /* Since this expression was found on the RHS of an assignment,
3728 its type may be different from _Bool. Convert VAL to EXPR's
3729 type. */
3730 val = fold_convert (type, val);
3731 if (is_gimple_min_invariant (val))
3732 set_value_range_to_value (vr, val, vr->equiv);
3733 else
3734 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3736 else
3737 /* The result of a comparison is always true or false. */
3738 set_value_range_to_truthvalue (vr, type);
3741 /* Helper function for simplify_internal_call_using_ranges and
3742 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3743 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3744 always overflow. Set *OVF to true if it is known to always
3745 overflow. */
3747 static bool
3748 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3749 tree op0, tree op1, bool *ovf)
3751 value_range vr0 = VR_INITIALIZER;
3752 value_range vr1 = VR_INITIALIZER;
3753 if (TREE_CODE (op0) == SSA_NAME)
3754 vr0 = *get_value_range (op0);
3755 else if (TREE_CODE (op0) == INTEGER_CST)
3756 set_value_range_to_value (&vr0, op0, NULL);
3757 else
3758 set_value_range_to_varying (&vr0);
3760 if (TREE_CODE (op1) == SSA_NAME)
3761 vr1 = *get_value_range (op1);
3762 else if (TREE_CODE (op1) == INTEGER_CST)
3763 set_value_range_to_value (&vr1, op1, NULL);
3764 else
3765 set_value_range_to_varying (&vr1);
3767 if (!range_int_cst_p (&vr0)
3768 || TREE_OVERFLOW (vr0.min)
3769 || TREE_OVERFLOW (vr0.max))
3771 vr0.min = vrp_val_min (TREE_TYPE (op0));
3772 vr0.max = vrp_val_max (TREE_TYPE (op0));
3774 if (!range_int_cst_p (&vr1)
3775 || TREE_OVERFLOW (vr1.min)
3776 || TREE_OVERFLOW (vr1.max))
3778 vr1.min = vrp_val_min (TREE_TYPE (op1));
3779 vr1.max = vrp_val_max (TREE_TYPE (op1));
3781 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3782 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3783 if (arith_overflowed_p (subcode, type, vr0.max,
3784 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3785 return false;
3786 if (subcode == MULT_EXPR)
3788 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3789 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3790 return false;
3792 if (*ovf)
3794 /* So far we found that there is an overflow on the boundaries.
3795 That doesn't prove that there is an overflow even for all values
3796 in between the boundaries. For that compute widest_int range
3797 of the result and see if it doesn't overlap the range of
3798 type. */
3799 widest_int wmin, wmax;
3800 widest_int w[4];
3801 int i;
3802 w[0] = wi::to_widest (vr0.min);
3803 w[1] = wi::to_widest (vr0.max);
3804 w[2] = wi::to_widest (vr1.min);
3805 w[3] = wi::to_widest (vr1.max);
3806 for (i = 0; i < 4; i++)
3808 widest_int wt;
3809 switch (subcode)
3811 case PLUS_EXPR:
3812 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3813 break;
3814 case MINUS_EXPR:
3815 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3816 break;
3817 case MULT_EXPR:
3818 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3819 break;
3820 default:
3821 gcc_unreachable ();
3823 if (i == 0)
3825 wmin = wt;
3826 wmax = wt;
3828 else
3830 wmin = wi::smin (wmin, wt);
3831 wmax = wi::smax (wmax, wt);
3834 /* The result of op0 CODE op1 is known to be in range
3835 [wmin, wmax]. */
3836 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3837 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3838 /* If all values in [wmin, wmax] are smaller than
3839 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3840 the arithmetic operation will always overflow. */
3841 if (wmax < wtmin || wmin > wtmax)
3842 return true;
3843 return false;
3845 return true;
3848 /* Try to derive a nonnegative or nonzero range out of STMT relying
3849 primarily on generic routines in fold in conjunction with range data.
3850 Store the result in *VR */
3852 static void
3853 extract_range_basic (value_range *vr, gimple *stmt)
3855 bool sop = false;
3856 tree type = gimple_expr_type (stmt);
3858 if (is_gimple_call (stmt))
3860 tree arg;
3861 int mini, maxi, zerov = 0, prec;
3862 enum tree_code subcode = ERROR_MARK;
3863 combined_fn cfn = gimple_call_combined_fn (stmt);
3865 switch (cfn)
3867 case CFN_BUILT_IN_CONSTANT_P:
3868 /* If the call is __builtin_constant_p and the argument is a
3869 function parameter resolve it to false. This avoids bogus
3870 array bound warnings.
3871 ??? We could do this as early as inlining is finished. */
3872 arg = gimple_call_arg (stmt, 0);
3873 if (TREE_CODE (arg) == SSA_NAME
3874 && SSA_NAME_IS_DEFAULT_DEF (arg)
3875 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
3876 && cfun->after_inlining)
3878 set_value_range_to_null (vr, type);
3879 return;
3881 break;
3882 /* Both __builtin_ffs* and __builtin_popcount return
3883 [0, prec]. */
3884 CASE_CFN_FFS:
3885 CASE_CFN_POPCOUNT:
3886 arg = gimple_call_arg (stmt, 0);
3887 prec = TYPE_PRECISION (TREE_TYPE (arg));
3888 mini = 0;
3889 maxi = prec;
3890 if (TREE_CODE (arg) == SSA_NAME)
3892 value_range *vr0 = get_value_range (arg);
3893 /* If arg is non-zero, then ffs or popcount
3894 are non-zero. */
3895 if (((vr0->type == VR_RANGE
3896 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3897 || (vr0->type == VR_ANTI_RANGE
3898 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3899 && !is_overflow_infinity (vr0->min)
3900 && !is_overflow_infinity (vr0->max))
3901 mini = 1;
3902 /* If some high bits are known to be zero,
3903 we can decrease the maximum. */
3904 if (vr0->type == VR_RANGE
3905 && TREE_CODE (vr0->max) == INTEGER_CST
3906 && !operand_less_p (vr0->min,
3907 build_zero_cst (TREE_TYPE (vr0->min)))
3908 && !is_overflow_infinity (vr0->max))
3909 maxi = tree_floor_log2 (vr0->max) + 1;
3911 goto bitop_builtin;
3912 /* __builtin_parity* returns [0, 1]. */
3913 CASE_CFN_PARITY:
3914 mini = 0;
3915 maxi = 1;
3916 goto bitop_builtin;
3917 /* __builtin_c[lt]z* return [0, prec-1], except for
3918 when the argument is 0, but that is undefined behavior.
3919 On many targets where the CLZ RTL or optab value is defined
3920 for 0 the value is prec, so include that in the range
3921 by default. */
3922 CASE_CFN_CLZ:
3923 arg = gimple_call_arg (stmt, 0);
3924 prec = TYPE_PRECISION (TREE_TYPE (arg));
3925 mini = 0;
3926 maxi = prec;
3927 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3928 != CODE_FOR_nothing
3929 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3930 zerov)
3931 /* Handle only the single common value. */
3932 && zerov != prec)
3933 /* Magic value to give up, unless vr0 proves
3934 arg is non-zero. */
3935 mini = -2;
3936 if (TREE_CODE (arg) == SSA_NAME)
3938 value_range *vr0 = get_value_range (arg);
3939 /* From clz of VR_RANGE minimum we can compute
3940 result maximum. */
3941 if (vr0->type == VR_RANGE
3942 && TREE_CODE (vr0->min) == INTEGER_CST
3943 && !is_overflow_infinity (vr0->min))
3945 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3946 if (maxi != prec)
3947 mini = 0;
3949 else if (vr0->type == VR_ANTI_RANGE
3950 && integer_zerop (vr0->min)
3951 && !is_overflow_infinity (vr0->min))
3953 maxi = prec - 1;
3954 mini = 0;
3956 if (mini == -2)
3957 break;
3958 /* From clz of VR_RANGE maximum we can compute
3959 result minimum. */
3960 if (vr0->type == VR_RANGE
3961 && TREE_CODE (vr0->max) == INTEGER_CST
3962 && !is_overflow_infinity (vr0->max))
3964 mini = prec - 1 - tree_floor_log2 (vr0->max);
3965 if (mini == prec)
3966 break;
3969 if (mini == -2)
3970 break;
3971 goto bitop_builtin;
3972 /* __builtin_ctz* return [0, prec-1], except for
3973 when the argument is 0, but that is undefined behavior.
3974 If there is a ctz optab for this mode and
3975 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3976 otherwise just assume 0 won't be seen. */
3977 CASE_CFN_CTZ:
3978 arg = gimple_call_arg (stmt, 0);
3979 prec = TYPE_PRECISION (TREE_TYPE (arg));
3980 mini = 0;
3981 maxi = prec - 1;
3982 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3983 != CODE_FOR_nothing
3984 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3985 zerov))
3987 /* Handle only the two common values. */
3988 if (zerov == -1)
3989 mini = -1;
3990 else if (zerov == prec)
3991 maxi = prec;
3992 else
3993 /* Magic value to give up, unless vr0 proves
3994 arg is non-zero. */
3995 mini = -2;
3997 if (TREE_CODE (arg) == SSA_NAME)
3999 value_range *vr0 = get_value_range (arg);
4000 /* If arg is non-zero, then use [0, prec - 1]. */
4001 if (((vr0->type == VR_RANGE
4002 && integer_nonzerop (vr0->min))
4003 || (vr0->type == VR_ANTI_RANGE
4004 && integer_zerop (vr0->min)))
4005 && !is_overflow_infinity (vr0->min))
4007 mini = 0;
4008 maxi = prec - 1;
4010 /* If some high bits are known to be zero,
4011 we can decrease the result maximum. */
4012 if (vr0->type == VR_RANGE
4013 && TREE_CODE (vr0->max) == INTEGER_CST
4014 && !is_overflow_infinity (vr0->max))
4016 maxi = tree_floor_log2 (vr0->max);
4017 /* For vr0 [0, 0] give up. */
4018 if (maxi == -1)
4019 break;
4022 if (mini == -2)
4023 break;
4024 goto bitop_builtin;
4025 /* __builtin_clrsb* returns [0, prec-1]. */
4026 CASE_CFN_CLRSB:
4027 arg = gimple_call_arg (stmt, 0);
4028 prec = TYPE_PRECISION (TREE_TYPE (arg));
4029 mini = 0;
4030 maxi = prec - 1;
4031 goto bitop_builtin;
4032 bitop_builtin:
4033 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
4034 build_int_cst (type, maxi), NULL);
4035 return;
4036 case CFN_UBSAN_CHECK_ADD:
4037 subcode = PLUS_EXPR;
4038 break;
4039 case CFN_UBSAN_CHECK_SUB:
4040 subcode = MINUS_EXPR;
4041 break;
4042 case CFN_UBSAN_CHECK_MUL:
4043 subcode = MULT_EXPR;
4044 break;
4045 case CFN_GOACC_DIM_SIZE:
4046 case CFN_GOACC_DIM_POS:
4047 /* Optimizing these two internal functions helps the loop
4048 optimizer eliminate outer comparisons. Size is [1,N]
4049 and pos is [0,N-1]. */
4051 bool is_pos = cfn == CFN_GOACC_DIM_POS;
4052 int axis = oacc_get_ifn_dim_arg (stmt);
4053 int size = oacc_get_fn_dim_size (current_function_decl, axis);
4055 if (!size)
4056 /* If it's dynamic, the backend might know a hardware
4057 limitation. */
4058 size = targetm.goacc.dim_limit (axis);
4060 tree type = TREE_TYPE (gimple_call_lhs (stmt));
4061 set_value_range (vr, VR_RANGE,
4062 build_int_cst (type, is_pos ? 0 : 1),
4063 size ? build_int_cst (type, size - is_pos)
4064 : vrp_val_max (type), NULL);
4066 return;
4067 case CFN_BUILT_IN_STRLEN:
4068 if (tree lhs = gimple_call_lhs (stmt))
4069 if (ptrdiff_type_node
4070 && (TYPE_PRECISION (ptrdiff_type_node)
4071 == TYPE_PRECISION (TREE_TYPE (lhs))))
4073 tree type = TREE_TYPE (lhs);
4074 tree max = vrp_val_max (ptrdiff_type_node);
4075 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
4076 tree range_min = build_zero_cst (type);
4077 tree range_max = wide_int_to_tree (type, wmax - 1);
4078 set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
4079 return;
4081 break;
4082 default:
4083 break;
4085 if (subcode != ERROR_MARK)
4087 bool saved_flag_wrapv = flag_wrapv;
4088 /* Pretend the arithmetics is wrapping. If there is
4089 any overflow, we'll complain, but will actually do
4090 wrapping operation. */
4091 flag_wrapv = 1;
4092 extract_range_from_binary_expr (vr, subcode, type,
4093 gimple_call_arg (stmt, 0),
4094 gimple_call_arg (stmt, 1));
4095 flag_wrapv = saved_flag_wrapv;
4097 /* If for both arguments vrp_valueize returned non-NULL,
4098 this should have been already folded and if not, it
4099 wasn't folded because of overflow. Avoid removing the
4100 UBSAN_CHECK_* calls in that case. */
4101 if (vr->type == VR_RANGE
4102 && (vr->min == vr->max
4103 || operand_equal_p (vr->min, vr->max, 0)))
4104 set_value_range_to_varying (vr);
4105 return;
4108 /* Handle extraction of the two results (result of arithmetics and
4109 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4110 internal function. */
4111 else if (is_gimple_assign (stmt)
4112 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4113 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4114 && INTEGRAL_TYPE_P (type))
4116 enum tree_code code = gimple_assign_rhs_code (stmt);
4117 tree op = gimple_assign_rhs1 (stmt);
4118 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4120 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
4121 if (is_gimple_call (g) && gimple_call_internal_p (g))
4123 enum tree_code subcode = ERROR_MARK;
4124 switch (gimple_call_internal_fn (g))
4126 case IFN_ADD_OVERFLOW:
4127 subcode = PLUS_EXPR;
4128 break;
4129 case IFN_SUB_OVERFLOW:
4130 subcode = MINUS_EXPR;
4131 break;
4132 case IFN_MUL_OVERFLOW:
4133 subcode = MULT_EXPR;
4134 break;
4135 default:
4136 break;
4138 if (subcode != ERROR_MARK)
4140 tree op0 = gimple_call_arg (g, 0);
4141 tree op1 = gimple_call_arg (g, 1);
4142 if (code == IMAGPART_EXPR)
4144 bool ovf = false;
4145 if (check_for_binary_op_overflow (subcode, type,
4146 op0, op1, &ovf))
4147 set_value_range_to_value (vr,
4148 build_int_cst (type, ovf),
4149 NULL);
4150 else if (TYPE_PRECISION (type) == 1
4151 && !TYPE_UNSIGNED (type))
4152 set_value_range_to_varying (vr);
4153 else
4154 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4155 build_int_cst (type, 1), NULL);
4157 else if (types_compatible_p (type, TREE_TYPE (op0))
4158 && types_compatible_p (type, TREE_TYPE (op1)))
4160 bool saved_flag_wrapv = flag_wrapv;
4161 /* Pretend the arithmetics is wrapping. If there is
4162 any overflow, IMAGPART_EXPR will be set. */
4163 flag_wrapv = 1;
4164 extract_range_from_binary_expr (vr, subcode, type,
4165 op0, op1);
4166 flag_wrapv = saved_flag_wrapv;
4168 else
4170 value_range vr0 = VR_INITIALIZER;
4171 value_range vr1 = VR_INITIALIZER;
4172 bool saved_flag_wrapv = flag_wrapv;
4173 /* Pretend the arithmetics is wrapping. If there is
4174 any overflow, IMAGPART_EXPR will be set. */
4175 flag_wrapv = 1;
4176 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4177 type, op0);
4178 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4179 type, op1);
4180 extract_range_from_binary_expr_1 (vr, subcode, type,
4181 &vr0, &vr1);
4182 flag_wrapv = saved_flag_wrapv;
4184 return;
4189 if (INTEGRAL_TYPE_P (type)
4190 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4191 set_value_range_to_nonnegative (vr, type,
4192 sop || stmt_overflow_infinity (stmt));
4193 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4194 && !sop)
4195 set_value_range_to_nonnull (vr, type);
4196 else
4197 set_value_range_to_varying (vr);
4201 /* Try to compute a useful range out of assignment STMT and store it
4202 in *VR. */
4204 static void
4205 extract_range_from_assignment (value_range *vr, gassign *stmt)
4207 enum tree_code code = gimple_assign_rhs_code (stmt);
4209 if (code == ASSERT_EXPR)
4210 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4211 else if (code == SSA_NAME)
4212 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4213 else if (TREE_CODE_CLASS (code) == tcc_binary)
4214 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4215 gimple_expr_type (stmt),
4216 gimple_assign_rhs1 (stmt),
4217 gimple_assign_rhs2 (stmt));
4218 else if (TREE_CODE_CLASS (code) == tcc_unary)
4219 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4220 gimple_expr_type (stmt),
4221 gimple_assign_rhs1 (stmt));
4222 else if (code == COND_EXPR)
4223 extract_range_from_cond_expr (vr, stmt);
4224 else if (TREE_CODE_CLASS (code) == tcc_comparison)
4225 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4226 gimple_expr_type (stmt),
4227 gimple_assign_rhs1 (stmt),
4228 gimple_assign_rhs2 (stmt));
4229 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4230 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4231 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4232 else
4233 set_value_range_to_varying (vr);
4235 if (vr->type == VR_VARYING)
4236 extract_range_basic (vr, stmt);
4239 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4240 would be profitable to adjust VR using scalar evolution information
4241 for VAR. If so, update VR with the new limits. */
4243 static void
4244 adjust_range_with_scev (value_range *vr, struct loop *loop,
4245 gimple *stmt, tree var)
4247 tree init, step, chrec, tmin, tmax, min, max, type, tem;
4248 enum ev_direction dir;
4250 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4251 better opportunities than a regular range, but I'm not sure. */
4252 if (vr->type == VR_ANTI_RANGE)
4253 return;
4255 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4257 /* Like in PR19590, scev can return a constant function. */
4258 if (is_gimple_min_invariant (chrec))
4260 set_value_range_to_value (vr, chrec, vr->equiv);
4261 return;
4264 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4265 return;
4267 init = initial_condition_in_loop_num (chrec, loop->num);
4268 tem = op_with_constant_singleton_value_range (init);
4269 if (tem)
4270 init = tem;
4271 step = evolution_part_in_loop_num (chrec, loop->num);
4272 tem = op_with_constant_singleton_value_range (step);
4273 if (tem)
4274 step = tem;
4276 /* If STEP is symbolic, we can't know whether INIT will be the
4277 minimum or maximum value in the range. Also, unless INIT is
4278 a simple expression, compare_values and possibly other functions
4279 in tree-vrp won't be able to handle it. */
4280 if (step == NULL_TREE
4281 || !is_gimple_min_invariant (step)
4282 || !valid_value_p (init))
4283 return;
4285 dir = scev_direction (chrec);
4286 if (/* Do not adjust ranges if we do not know whether the iv increases
4287 or decreases, ... */
4288 dir == EV_DIR_UNKNOWN
4289 /* ... or if it may wrap. */
4290 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
4291 get_chrec_loop (chrec), true))
4292 return;
4294 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4295 negative_overflow_infinity and positive_overflow_infinity,
4296 because we have concluded that the loop probably does not
4297 wrap. */
4299 type = TREE_TYPE (var);
4300 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4301 tmin = lower_bound_in_type (type, type);
4302 else
4303 tmin = TYPE_MIN_VALUE (type);
4304 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4305 tmax = upper_bound_in_type (type, type);
4306 else
4307 tmax = TYPE_MAX_VALUE (type);
4309 /* Try to use estimated number of iterations for the loop to constrain the
4310 final value in the evolution. */
4311 if (TREE_CODE (step) == INTEGER_CST
4312 && is_gimple_val (init)
4313 && (TREE_CODE (init) != SSA_NAME
4314 || get_value_range (init)->type == VR_RANGE))
4316 widest_int nit;
4318 /* We are only entering here for loop header PHI nodes, so using
4319 the number of latch executions is the correct thing to use. */
4320 if (max_loop_iterations (loop, &nit))
4322 value_range maxvr = VR_INITIALIZER;
4323 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4324 bool overflow;
4326 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4327 &overflow);
4328 /* If the multiplication overflowed we can't do a meaningful
4329 adjustment. Likewise if the result doesn't fit in the type
4330 of the induction variable. For a signed type we have to
4331 check whether the result has the expected signedness which
4332 is that of the step as number of iterations is unsigned. */
4333 if (!overflow
4334 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4335 && (sgn == UNSIGNED
4336 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4338 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4339 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4340 TREE_TYPE (init), init, tem);
4341 /* Likewise if the addition did. */
4342 if (maxvr.type == VR_RANGE)
4344 value_range initvr = VR_INITIALIZER;
4346 if (TREE_CODE (init) == SSA_NAME)
4347 initvr = *(get_value_range (init));
4348 else if (is_gimple_min_invariant (init))
4349 set_value_range_to_value (&initvr, init, NULL);
4350 else
4351 return;
4353 /* Check if init + nit * step overflows. Though we checked
4354 scev {init, step}_loop doesn't wrap, it is not enough
4355 because the loop may exit immediately. Overflow could
4356 happen in the plus expression in this case. */
4357 if ((dir == EV_DIR_DECREASES
4358 && (is_negative_overflow_infinity (maxvr.min)
4359 || compare_values (maxvr.min, initvr.min) != -1))
4360 || (dir == EV_DIR_GROWS
4361 && (is_positive_overflow_infinity (maxvr.max)
4362 || compare_values (maxvr.max, initvr.max) != 1)))
4363 return;
4365 tmin = maxvr.min;
4366 tmax = maxvr.max;
4372 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4374 min = tmin;
4375 max = tmax;
4377 /* For VARYING or UNDEFINED ranges, just about anything we get
4378 from scalar evolutions should be better. */
4380 if (dir == EV_DIR_DECREASES)
4381 max = init;
4382 else
4383 min = init;
4385 else if (vr->type == VR_RANGE)
4387 min = vr->min;
4388 max = vr->max;
4390 if (dir == EV_DIR_DECREASES)
4392 /* INIT is the maximum value. If INIT is lower than VR->MAX
4393 but no smaller than VR->MIN, set VR->MAX to INIT. */
4394 if (compare_values (init, max) == -1)
4395 max = init;
4397 /* According to the loop information, the variable does not
4398 overflow. If we think it does, probably because of an
4399 overflow due to arithmetic on a different INF value,
4400 reset now. */
4401 if (is_negative_overflow_infinity (min)
4402 || compare_values (min, tmin) == -1)
4403 min = tmin;
4406 else
4408 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4409 if (compare_values (init, min) == 1)
4410 min = init;
4412 if (is_positive_overflow_infinity (max)
4413 || compare_values (tmax, max) == -1)
4414 max = tmax;
4417 else
4418 return;
4420 /* If we just created an invalid range with the minimum
4421 greater than the maximum, we fail conservatively.
4422 This should happen only in unreachable
4423 parts of code, or for invalid programs. */
4424 if (compare_values (min, max) == 1
4425 || (is_negative_overflow_infinity (min)
4426 && is_positive_overflow_infinity (max)))
4427 return;
4429 /* Even for valid range info, sometimes overflow flag will leak in.
4430 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4431 drop them except for +-overflow_infinity which still need special
4432 handling in vrp pass. */
4433 if (TREE_OVERFLOW_P (min)
4434 && ! is_negative_overflow_infinity (min))
4435 min = drop_tree_overflow (min);
4436 if (TREE_OVERFLOW_P (max)
4437 && ! is_positive_overflow_infinity (max))
4438 max = drop_tree_overflow (max);
4440 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4444 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4446 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4447 all the values in the ranges.
4449 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4451 - Return NULL_TREE if it is not always possible to determine the
4452 value of the comparison.
4454 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4455 overflow infinity was used in the test. */
4458 static tree
4459 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4460 bool *strict_overflow_p)
4462 /* VARYING or UNDEFINED ranges cannot be compared. */
4463 if (vr0->type == VR_VARYING
4464 || vr0->type == VR_UNDEFINED
4465 || vr1->type == VR_VARYING
4466 || vr1->type == VR_UNDEFINED)
4467 return NULL_TREE;
4469 /* Anti-ranges need to be handled separately. */
4470 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4472 /* If both are anti-ranges, then we cannot compute any
4473 comparison. */
4474 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4475 return NULL_TREE;
4477 /* These comparisons are never statically computable. */
4478 if (comp == GT_EXPR
4479 || comp == GE_EXPR
4480 || comp == LT_EXPR
4481 || comp == LE_EXPR)
4482 return NULL_TREE;
4484 /* Equality can be computed only between a range and an
4485 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4486 if (vr0->type == VR_RANGE)
4488 /* To simplify processing, make VR0 the anti-range. */
4489 value_range *tmp = vr0;
4490 vr0 = vr1;
4491 vr1 = tmp;
4494 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4496 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4497 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4498 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4500 return NULL_TREE;
4503 if (!usable_range_p (vr0, strict_overflow_p)
4504 || !usable_range_p (vr1, strict_overflow_p))
4505 return NULL_TREE;
4507 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4508 operands around and change the comparison code. */
4509 if (comp == GT_EXPR || comp == GE_EXPR)
4511 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4512 std::swap (vr0, vr1);
4515 if (comp == EQ_EXPR)
4517 /* Equality may only be computed if both ranges represent
4518 exactly one value. */
4519 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4520 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4522 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4523 strict_overflow_p);
4524 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4525 strict_overflow_p);
4526 if (cmp_min == 0 && cmp_max == 0)
4527 return boolean_true_node;
4528 else if (cmp_min != -2 && cmp_max != -2)
4529 return boolean_false_node;
4531 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4532 else if (compare_values_warnv (vr0->min, vr1->max,
4533 strict_overflow_p) == 1
4534 || compare_values_warnv (vr1->min, vr0->max,
4535 strict_overflow_p) == 1)
4536 return boolean_false_node;
4538 return NULL_TREE;
4540 else if (comp == NE_EXPR)
4542 int cmp1, cmp2;
4544 /* If VR0 is completely to the left or completely to the right
4545 of VR1, they are always different. Notice that we need to
4546 make sure that both comparisons yield similar results to
4547 avoid comparing values that cannot be compared at
4548 compile-time. */
4549 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4550 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4551 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4552 return boolean_true_node;
4554 /* If VR0 and VR1 represent a single value and are identical,
4555 return false. */
4556 else if (compare_values_warnv (vr0->min, vr0->max,
4557 strict_overflow_p) == 0
4558 && compare_values_warnv (vr1->min, vr1->max,
4559 strict_overflow_p) == 0
4560 && compare_values_warnv (vr0->min, vr1->min,
4561 strict_overflow_p) == 0
4562 && compare_values_warnv (vr0->max, vr1->max,
4563 strict_overflow_p) == 0)
4564 return boolean_false_node;
4566 /* Otherwise, they may or may not be different. */
4567 else
4568 return NULL_TREE;
4570 else if (comp == LT_EXPR || comp == LE_EXPR)
4572 int tst;
4574 /* If VR0 is to the left of VR1, return true. */
4575 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4576 if ((comp == LT_EXPR && tst == -1)
4577 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4579 if (overflow_infinity_range_p (vr0)
4580 || overflow_infinity_range_p (vr1))
4581 *strict_overflow_p = true;
4582 return boolean_true_node;
4585 /* If VR0 is to the right of VR1, return false. */
4586 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4587 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4588 || (comp == LE_EXPR && tst == 1))
4590 if (overflow_infinity_range_p (vr0)
4591 || overflow_infinity_range_p (vr1))
4592 *strict_overflow_p = true;
4593 return boolean_false_node;
4596 /* Otherwise, we don't know. */
4597 return NULL_TREE;
4600 gcc_unreachable ();
4604 /* Given a value range VR, a value VAL and a comparison code COMP, return
4605 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4606 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4607 always returns false. Return NULL_TREE if it is not always
4608 possible to determine the value of the comparison. Also set
4609 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4610 infinity was used in the test. */
4612 static tree
4613 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4614 bool *strict_overflow_p)
4616 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4617 return NULL_TREE;
4619 /* Anti-ranges need to be handled separately. */
4620 if (vr->type == VR_ANTI_RANGE)
4622 /* For anti-ranges, the only predicates that we can compute at
4623 compile time are equality and inequality. */
4624 if (comp == GT_EXPR
4625 || comp == GE_EXPR
4626 || comp == LT_EXPR
4627 || comp == LE_EXPR)
4628 return NULL_TREE;
4630 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4631 if (value_inside_range (val, vr->min, vr->max) == 1)
4632 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4634 return NULL_TREE;
4637 if (!usable_range_p (vr, strict_overflow_p))
4638 return NULL_TREE;
4640 if (comp == EQ_EXPR)
4642 /* EQ_EXPR may only be computed if VR represents exactly
4643 one value. */
4644 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4646 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4647 if (cmp == 0)
4648 return boolean_true_node;
4649 else if (cmp == -1 || cmp == 1 || cmp == 2)
4650 return boolean_false_node;
4652 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4653 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4654 return boolean_false_node;
4656 return NULL_TREE;
4658 else if (comp == NE_EXPR)
4660 /* If VAL is not inside VR, then they are always different. */
4661 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4662 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4663 return boolean_true_node;
4665 /* If VR represents exactly one value equal to VAL, then return
4666 false. */
4667 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4668 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4669 return boolean_false_node;
4671 /* Otherwise, they may or may not be different. */
4672 return NULL_TREE;
4674 else if (comp == LT_EXPR || comp == LE_EXPR)
4676 int tst;
4678 /* If VR is to the left of VAL, return true. */
4679 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4680 if ((comp == LT_EXPR && tst == -1)
4681 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4683 if (overflow_infinity_range_p (vr))
4684 *strict_overflow_p = true;
4685 return boolean_true_node;
4688 /* If VR is to the right of VAL, return false. */
4689 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4690 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4691 || (comp == LE_EXPR && tst == 1))
4693 if (overflow_infinity_range_p (vr))
4694 *strict_overflow_p = true;
4695 return boolean_false_node;
4698 /* Otherwise, we don't know. */
4699 return NULL_TREE;
4701 else if (comp == GT_EXPR || comp == GE_EXPR)
4703 int tst;
4705 /* If VR is to the right of VAL, return true. */
4706 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4707 if ((comp == GT_EXPR && tst == 1)
4708 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4710 if (overflow_infinity_range_p (vr))
4711 *strict_overflow_p = true;
4712 return boolean_true_node;
4715 /* If VR is to the left of VAL, return false. */
4716 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4717 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4718 || (comp == GE_EXPR && tst == -1))
4720 if (overflow_infinity_range_p (vr))
4721 *strict_overflow_p = true;
4722 return boolean_false_node;
4725 /* Otherwise, we don't know. */
4726 return NULL_TREE;
4729 gcc_unreachable ();
4733 /* Debugging dumps. */
4735 void dump_value_range (FILE *, const value_range *);
4736 void debug_value_range (value_range *);
4737 void dump_all_value_ranges (FILE *);
4738 void debug_all_value_ranges (void);
4739 void dump_vr_equiv (FILE *, bitmap);
4740 void debug_vr_equiv (bitmap);
4743 /* Dump value range VR to FILE. */
4745 void
4746 dump_value_range (FILE *file, const value_range *vr)
4748 if (vr == NULL)
4749 fprintf (file, "[]");
4750 else if (vr->type == VR_UNDEFINED)
4751 fprintf (file, "UNDEFINED");
4752 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4754 tree type = TREE_TYPE (vr->min);
4756 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4758 if (is_negative_overflow_infinity (vr->min))
4759 fprintf (file, "-INF(OVF)");
4760 else if (INTEGRAL_TYPE_P (type)
4761 && !TYPE_UNSIGNED (type)
4762 && vrp_val_is_min (vr->min))
4763 fprintf (file, "-INF");
4764 else
4765 print_generic_expr (file, vr->min, 0);
4767 fprintf (file, ", ");
4769 if (is_positive_overflow_infinity (vr->max))
4770 fprintf (file, "+INF(OVF)");
4771 else if (INTEGRAL_TYPE_P (type)
4772 && vrp_val_is_max (vr->max))
4773 fprintf (file, "+INF");
4774 else
4775 print_generic_expr (file, vr->max, 0);
4777 fprintf (file, "]");
4779 if (vr->equiv)
4781 bitmap_iterator bi;
4782 unsigned i, c = 0;
4784 fprintf (file, " EQUIVALENCES: { ");
4786 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4788 print_generic_expr (file, ssa_name (i), 0);
4789 fprintf (file, " ");
4790 c++;
4793 fprintf (file, "} (%u elements)", c);
4796 else if (vr->type == VR_VARYING)
4797 fprintf (file, "VARYING");
4798 else
4799 fprintf (file, "INVALID RANGE");
4803 /* Dump value range VR to stderr. */
4805 DEBUG_FUNCTION void
4806 debug_value_range (value_range *vr)
4808 dump_value_range (stderr, vr);
4809 fprintf (stderr, "\n");
4813 /* Dump value ranges of all SSA_NAMEs to FILE. */
4815 void
4816 dump_all_value_ranges (FILE *file)
4818 size_t i;
4820 for (i = 0; i < num_vr_values; i++)
4822 if (vr_value[i])
4824 print_generic_expr (file, ssa_name (i), 0);
4825 fprintf (file, ": ");
4826 dump_value_range (file, vr_value[i]);
4827 fprintf (file, "\n");
4831 fprintf (file, "\n");
4835 /* Dump all value ranges to stderr. */
4837 DEBUG_FUNCTION void
4838 debug_all_value_ranges (void)
4840 dump_all_value_ranges (stderr);
4844 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4845 create a new SSA name N and return the assertion assignment
4846 'N = ASSERT_EXPR <V, V OP W>'. */
4848 static gimple *
4849 build_assert_expr_for (tree cond, tree v)
4851 tree a;
4852 gassign *assertion;
4854 gcc_assert (TREE_CODE (v) == SSA_NAME
4855 && COMPARISON_CLASS_P (cond));
4857 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4858 assertion = gimple_build_assign (NULL_TREE, a);
4860 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4861 operand of the ASSERT_EXPR. Create it so the new name and the old one
4862 are registered in the replacement table so that we can fix the SSA web
4863 after adding all the ASSERT_EXPRs. */
4864 create_new_def_for (v, assertion, NULL);
4866 return assertion;
4870 /* Return false if EXPR is a predicate expression involving floating
4871 point values. */
4873 static inline bool
4874 fp_predicate (gimple *stmt)
4876 GIMPLE_CHECK (stmt, GIMPLE_COND);
4878 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4881 /* If the range of values taken by OP can be inferred after STMT executes,
4882 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4883 describes the inferred range. Return true if a range could be
4884 inferred. */
4886 static bool
4887 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4889 *val_p = NULL_TREE;
4890 *comp_code_p = ERROR_MARK;
4892 /* Do not attempt to infer anything in names that flow through
4893 abnormal edges. */
4894 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4895 return false;
4897 /* If STMT is the last statement of a basic block with no normal
4898 successors, there is no point inferring anything about any of its
4899 operands. We would not be able to find a proper insertion point
4900 for the assertion, anyway. */
4901 if (stmt_ends_bb_p (stmt))
4903 edge_iterator ei;
4904 edge e;
4906 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4907 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4908 break;
4909 if (e == NULL)
4910 return false;
4913 if (infer_nonnull_range (stmt, op))
4915 *val_p = build_int_cst (TREE_TYPE (op), 0);
4916 *comp_code_p = NE_EXPR;
4917 return true;
4920 return false;
4924 void dump_asserts_for (FILE *, tree);
4925 void debug_asserts_for (tree);
4926 void dump_all_asserts (FILE *);
4927 void debug_all_asserts (void);
4929 /* Dump all the registered assertions for NAME to FILE. */
4931 void
4932 dump_asserts_for (FILE *file, tree name)
4934 assert_locus *loc;
4936 fprintf (file, "Assertions to be inserted for ");
4937 print_generic_expr (file, name, 0);
4938 fprintf (file, "\n");
4940 loc = asserts_for[SSA_NAME_VERSION (name)];
4941 while (loc)
4943 fprintf (file, "\t");
4944 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4945 fprintf (file, "\n\tBB #%d", loc->bb->index);
4946 if (loc->e)
4948 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4949 loc->e->dest->index);
4950 dump_edge_info (file, loc->e, dump_flags, 0);
4952 fprintf (file, "\n\tPREDICATE: ");
4953 print_generic_expr (file, loc->expr, 0);
4954 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4955 print_generic_expr (file, loc->val, 0);
4956 fprintf (file, "\n\n");
4957 loc = loc->next;
4960 fprintf (file, "\n");
4964 /* Dump all the registered assertions for NAME to stderr. */
4966 DEBUG_FUNCTION void
4967 debug_asserts_for (tree name)
4969 dump_asserts_for (stderr, name);
4973 /* Dump all the registered assertions for all the names to FILE. */
4975 void
4976 dump_all_asserts (FILE *file)
4978 unsigned i;
4979 bitmap_iterator bi;
4981 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4982 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4983 dump_asserts_for (file, ssa_name (i));
4984 fprintf (file, "\n");
4988 /* Dump all the registered assertions for all the names to stderr. */
4990 DEBUG_FUNCTION void
4991 debug_all_asserts (void)
4993 dump_all_asserts (stderr);
4997 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4998 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4999 E->DEST, then register this location as a possible insertion point
5000 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
5002 BB, E and SI provide the exact insertion point for the new
5003 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
5004 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
5005 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
5006 must not be NULL. */
5008 static void
5009 register_new_assert_for (tree name, tree expr,
5010 enum tree_code comp_code,
5011 tree val,
5012 basic_block bb,
5013 edge e,
5014 gimple_stmt_iterator si)
5016 assert_locus *n, *loc, *last_loc;
5017 basic_block dest_bb;
5019 gcc_checking_assert (bb == NULL || e == NULL);
5021 if (e == NULL)
5022 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
5023 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
5025 /* Never build an assert comparing against an integer constant with
5026 TREE_OVERFLOW set. This confuses our undefined overflow warning
5027 machinery. */
5028 if (TREE_OVERFLOW_P (val))
5029 val = drop_tree_overflow (val);
5031 /* The new assertion A will be inserted at BB or E. We need to
5032 determine if the new location is dominated by a previously
5033 registered location for A. If we are doing an edge insertion,
5034 assume that A will be inserted at E->DEST. Note that this is not
5035 necessarily true.
5037 If E is a critical edge, it will be split. But even if E is
5038 split, the new block will dominate the same set of blocks that
5039 E->DEST dominates.
5041 The reverse, however, is not true, blocks dominated by E->DEST
5042 will not be dominated by the new block created to split E. So,
5043 if the insertion location is on a critical edge, we will not use
5044 the new location to move another assertion previously registered
5045 at a block dominated by E->DEST. */
5046 dest_bb = (bb) ? bb : e->dest;
5048 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
5049 VAL at a block dominating DEST_BB, then we don't need to insert a new
5050 one. Similarly, if the same assertion already exists at a block
5051 dominated by DEST_BB and the new location is not on a critical
5052 edge, then update the existing location for the assertion (i.e.,
5053 move the assertion up in the dominance tree).
5055 Note, this is implemented as a simple linked list because there
5056 should not be more than a handful of assertions registered per
5057 name. If this becomes a performance problem, a table hashed by
5058 COMP_CODE and VAL could be implemented. */
5059 loc = asserts_for[SSA_NAME_VERSION (name)];
5060 last_loc = loc;
5061 while (loc)
5063 if (loc->comp_code == comp_code
5064 && (loc->val == val
5065 || operand_equal_p (loc->val, val, 0))
5066 && (loc->expr == expr
5067 || operand_equal_p (loc->expr, expr, 0)))
5069 /* If E is not a critical edge and DEST_BB
5070 dominates the existing location for the assertion, move
5071 the assertion up in the dominance tree by updating its
5072 location information. */
5073 if ((e == NULL || !EDGE_CRITICAL_P (e))
5074 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
5076 loc->bb = dest_bb;
5077 loc->e = e;
5078 loc->si = si;
5079 return;
5083 /* Update the last node of the list and move to the next one. */
5084 last_loc = loc;
5085 loc = loc->next;
5088 /* If we didn't find an assertion already registered for
5089 NAME COMP_CODE VAL, add a new one at the end of the list of
5090 assertions associated with NAME. */
5091 n = XNEW (struct assert_locus);
5092 n->bb = dest_bb;
5093 n->e = e;
5094 n->si = si;
5095 n->comp_code = comp_code;
5096 n->val = val;
5097 n->expr = expr;
5098 n->next = NULL;
5100 if (last_loc)
5101 last_loc->next = n;
5102 else
5103 asserts_for[SSA_NAME_VERSION (name)] = n;
5105 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5108 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5109 Extract a suitable test code and value and store them into *CODE_P and
5110 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5112 If no extraction was possible, return FALSE, otherwise return TRUE.
5114 If INVERT is true, then we invert the result stored into *CODE_P. */
5116 static bool
5117 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5118 tree cond_op0, tree cond_op1,
5119 bool invert, enum tree_code *code_p,
5120 tree *val_p)
5122 enum tree_code comp_code;
5123 tree val;
5125 /* Otherwise, we have a comparison of the form NAME COMP VAL
5126 or VAL COMP NAME. */
5127 if (name == cond_op1)
5129 /* If the predicate is of the form VAL COMP NAME, flip
5130 COMP around because we need to register NAME as the
5131 first operand in the predicate. */
5132 comp_code = swap_tree_comparison (cond_code);
5133 val = cond_op0;
5135 else if (name == cond_op0)
5137 /* The comparison is of the form NAME COMP VAL, so the
5138 comparison code remains unchanged. */
5139 comp_code = cond_code;
5140 val = cond_op1;
5142 else
5143 gcc_unreachable ();
5145 /* Invert the comparison code as necessary. */
5146 if (invert)
5147 comp_code = invert_tree_comparison (comp_code, 0);
5149 /* VRP only handles integral and pointer types. */
5150 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
5151 && ! POINTER_TYPE_P (TREE_TYPE (val)))
5152 return false;
5154 /* Do not register always-false predicates.
5155 FIXME: this works around a limitation in fold() when dealing with
5156 enumerations. Given 'enum { N1, N2 } x;', fold will not
5157 fold 'if (x > N2)' to 'if (0)'. */
5158 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5159 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5161 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5162 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5164 if (comp_code == GT_EXPR
5165 && (!max
5166 || compare_values (val, max) == 0))
5167 return false;
5169 if (comp_code == LT_EXPR
5170 && (!min
5171 || compare_values (val, min) == 0))
5172 return false;
5174 *code_p = comp_code;
5175 *val_p = val;
5176 return true;
5179 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5180 (otherwise return VAL). VAL and MASK must be zero-extended for
5181 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5182 (to transform signed values into unsigned) and at the end xor
5183 SGNBIT back. */
5185 static wide_int
5186 masked_increment (const wide_int &val_in, const wide_int &mask,
5187 const wide_int &sgnbit, unsigned int prec)
5189 wide_int bit = wi::one (prec), res;
5190 unsigned int i;
5192 wide_int val = val_in ^ sgnbit;
5193 for (i = 0; i < prec; i++, bit += bit)
5195 res = mask;
5196 if ((res & bit) == 0)
5197 continue;
5198 res = bit - 1;
5199 res = (val + bit).and_not (res);
5200 res &= mask;
5201 if (wi::gtu_p (res, val))
5202 return res ^ sgnbit;
5204 return val ^ sgnbit;
5207 /* Helper for overflow_comparison_p
5209 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
5210 OP1's defining statement to see if it ultimately has the form
5211 OP0 CODE (OP0 PLUS INTEGER_CST)
5213 If so, return TRUE indicating this is an overflow test and store into
5214 *NEW_CST an updated constant that can be used in a narrowed range test.
5216 REVERSED indicates if the comparison was originally:
5218 OP1 CODE' OP0.
5220 This affects how we build the updated constant. */
5222 static bool
5223 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
5224 bool follow_assert_exprs, bool reversed, tree *new_cst)
5226 /* See if this is a relational operation between two SSA_NAMES with
5227 unsigned, overflow wrapping values. If so, check it more deeply. */
5228 if ((code == LT_EXPR || code == LE_EXPR
5229 || code == GE_EXPR || code == GT_EXPR)
5230 && TREE_CODE (op0) == SSA_NAME
5231 && TREE_CODE (op1) == SSA_NAME
5232 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
5233 && TYPE_UNSIGNED (TREE_TYPE (op0))
5234 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
5236 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
5238 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
5239 if (follow_assert_exprs)
5241 while (gimple_assign_single_p (op1_def)
5242 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
5244 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
5245 if (TREE_CODE (op1) != SSA_NAME)
5246 break;
5247 op1_def = SSA_NAME_DEF_STMT (op1);
5251 /* Now look at the defining statement of OP1 to see if it adds
5252 or subtracts a nonzero constant from another operand. */
5253 if (op1_def
5254 && is_gimple_assign (op1_def)
5255 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
5256 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
5257 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
5259 tree target = gimple_assign_rhs1 (op1_def);
5261 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
5262 for one where TARGET appears on the RHS. */
5263 if (follow_assert_exprs)
5265 /* Now see if that "other operand" is op0, following the chain
5266 of ASSERT_EXPRs if necessary. */
5267 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
5268 while (op0 != target
5269 && gimple_assign_single_p (op0_def)
5270 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
5272 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
5273 if (TREE_CODE (op0) != SSA_NAME)
5274 break;
5275 op0_def = SSA_NAME_DEF_STMT (op0);
5279 /* If we did not find our target SSA_NAME, then this is not
5280 an overflow test. */
5281 if (op0 != target)
5282 return false;
5284 tree type = TREE_TYPE (op0);
5285 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
5286 tree inc = gimple_assign_rhs2 (op1_def);
5287 if (reversed)
5288 *new_cst = wide_int_to_tree (type, max + inc);
5289 else
5290 *new_cst = wide_int_to_tree (type, max - inc);
5291 return true;
5294 return false;
5297 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
5298 OP1's defining statement to see if it ultimately has the form
5299 OP0 CODE (OP0 PLUS INTEGER_CST)
5301 If so, return TRUE indicating this is an overflow test and store into
5302 *NEW_CST an updated constant that can be used in a narrowed range test.
5304 These statements are left as-is in the IL to facilitate discovery of
5305 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
5306 the alternate range representation is often useful within VRP. */
5308 static bool
5309 overflow_comparison_p (tree_code code, tree name, tree val,
5310 bool use_equiv_p, tree *new_cst)
5312 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
5313 return true;
5314 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
5315 use_equiv_p, true, new_cst);
5319 /* Try to register an edge assertion for SSA name NAME on edge E for
5320 the condition COND contributing to the conditional jump pointed to by BSI.
5321 Invert the condition COND if INVERT is true. */
5323 static void
5324 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5325 enum tree_code cond_code,
5326 tree cond_op0, tree cond_op1, bool invert)
5328 tree val;
5329 enum tree_code comp_code;
5331 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5332 cond_op0,
5333 cond_op1,
5334 invert, &comp_code, &val))
5335 return;
5337 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5338 reachable from E. */
5339 if (live_on_edge (e, name))
5341 tree x;
5342 if (overflow_comparison_p (comp_code, name, val, false, &x))
5344 enum tree_code new_code
5345 = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
5346 ? GT_EXPR : LE_EXPR);
5347 register_new_assert_for (name, name, new_code, x, NULL, e, bsi);
5349 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5352 /* In the case of NAME <= CST and NAME being defined as
5353 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5354 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5355 This catches range and anti-range tests. */
5356 if ((comp_code == LE_EXPR
5357 || comp_code == GT_EXPR)
5358 && TREE_CODE (val) == INTEGER_CST
5359 && TYPE_UNSIGNED (TREE_TYPE (val)))
5361 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5362 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5364 /* Extract CST2 from the (optional) addition. */
5365 if (is_gimple_assign (def_stmt)
5366 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5368 name2 = gimple_assign_rhs1 (def_stmt);
5369 cst2 = gimple_assign_rhs2 (def_stmt);
5370 if (TREE_CODE (name2) == SSA_NAME
5371 && TREE_CODE (cst2) == INTEGER_CST)
5372 def_stmt = SSA_NAME_DEF_STMT (name2);
5375 /* Extract NAME2 from the (optional) sign-changing cast. */
5376 if (gimple_assign_cast_p (def_stmt))
5378 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5379 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5380 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5381 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5382 name3 = gimple_assign_rhs1 (def_stmt);
5385 /* If name3 is used later, create an ASSERT_EXPR for it. */
5386 if (name3 != NULL_TREE
5387 && TREE_CODE (name3) == SSA_NAME
5388 && (cst2 == NULL_TREE
5389 || TREE_CODE (cst2) == INTEGER_CST)
5390 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5391 && live_on_edge (e, name3))
5393 tree tmp;
5395 /* Build an expression for the range test. */
5396 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5397 if (cst2 != NULL_TREE)
5398 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5400 if (dump_file)
5402 fprintf (dump_file, "Adding assert for ");
5403 print_generic_expr (dump_file, name3, 0);
5404 fprintf (dump_file, " from ");
5405 print_generic_expr (dump_file, tmp, 0);
5406 fprintf (dump_file, "\n");
5409 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5412 /* If name2 is used later, create an ASSERT_EXPR for it. */
5413 if (name2 != NULL_TREE
5414 && TREE_CODE (name2) == SSA_NAME
5415 && TREE_CODE (cst2) == INTEGER_CST
5416 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5417 && live_on_edge (e, name2))
5419 tree tmp;
5421 /* Build an expression for the range test. */
5422 tmp = name2;
5423 if (TREE_TYPE (name) != TREE_TYPE (name2))
5424 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5425 if (cst2 != NULL_TREE)
5426 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5428 if (dump_file)
5430 fprintf (dump_file, "Adding assert for ");
5431 print_generic_expr (dump_file, name2, 0);
5432 fprintf (dump_file, " from ");
5433 print_generic_expr (dump_file, tmp, 0);
5434 fprintf (dump_file, "\n");
5437 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5441 /* In the case of post-in/decrement tests like if (i++) ... and uses
5442 of the in/decremented value on the edge the extra name we want to
5443 assert for is not on the def chain of the name compared. Instead
5444 it is in the set of use stmts.
5445 Similar cases happen for conversions that were simplified through
5446 fold_{sign_changed,widened}_comparison. */
5447 if ((comp_code == NE_EXPR
5448 || comp_code == EQ_EXPR)
5449 && TREE_CODE (val) == INTEGER_CST)
5451 imm_use_iterator ui;
5452 gimple *use_stmt;
5453 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5455 if (!is_gimple_assign (use_stmt))
5456 continue;
5458 /* Cut off to use-stmts that are dominating the predecessor. */
5459 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5460 continue;
5462 tree name2 = gimple_assign_lhs (use_stmt);
5463 if (TREE_CODE (name2) != SSA_NAME
5464 || !live_on_edge (e, name2))
5465 continue;
5467 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5468 tree cst;
5469 if (code == PLUS_EXPR
5470 || code == MINUS_EXPR)
5472 cst = gimple_assign_rhs2 (use_stmt);
5473 if (TREE_CODE (cst) != INTEGER_CST)
5474 continue;
5475 cst = int_const_binop (code, val, cst);
5477 else if (CONVERT_EXPR_CODE_P (code))
5479 /* For truncating conversions we cannot record
5480 an inequality. */
5481 if (comp_code == NE_EXPR
5482 && (TYPE_PRECISION (TREE_TYPE (name2))
5483 < TYPE_PRECISION (TREE_TYPE (name))))
5484 continue;
5485 cst = fold_convert (TREE_TYPE (name2), val);
5487 else
5488 continue;
5490 if (TREE_OVERFLOW_P (cst))
5491 cst = drop_tree_overflow (cst);
5492 register_new_assert_for (name2, name2, comp_code, cst,
5493 NULL, e, bsi);
5497 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5498 && TREE_CODE (val) == INTEGER_CST)
5500 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5501 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5502 tree val2 = NULL_TREE;
5503 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5504 wide_int mask = wi::zero (prec);
5505 unsigned int nprec = prec;
5506 enum tree_code rhs_code = ERROR_MARK;
5508 if (is_gimple_assign (def_stmt))
5509 rhs_code = gimple_assign_rhs_code (def_stmt);
5511 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5512 assert that A != CST1 -+ CST2. */
5513 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5514 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5516 tree op0 = gimple_assign_rhs1 (def_stmt);
5517 tree op1 = gimple_assign_rhs2 (def_stmt);
5518 if (TREE_CODE (op0) == SSA_NAME
5519 && TREE_CODE (op1) == INTEGER_CST
5520 && live_on_edge (e, op0))
5522 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5523 ? MINUS_EXPR : PLUS_EXPR);
5524 op1 = int_const_binop (reverse_op, val, op1);
5525 if (TREE_OVERFLOW (op1))
5526 op1 = drop_tree_overflow (op1);
5527 register_new_assert_for (op0, op0, comp_code, op1, NULL, e, bsi);
5531 /* Add asserts for NAME cmp CST and NAME being defined
5532 as NAME = (int) NAME2. */
5533 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5534 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5535 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5536 && gimple_assign_cast_p (def_stmt))
5538 name2 = gimple_assign_rhs1 (def_stmt);
5539 if (CONVERT_EXPR_CODE_P (rhs_code)
5540 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5541 && TYPE_UNSIGNED (TREE_TYPE (name2))
5542 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5543 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5544 || !tree_int_cst_equal (val,
5545 TYPE_MIN_VALUE (TREE_TYPE (val))))
5546 && live_on_edge (e, name2))
5548 tree tmp, cst;
5549 enum tree_code new_comp_code = comp_code;
5551 cst = fold_convert (TREE_TYPE (name2),
5552 TYPE_MIN_VALUE (TREE_TYPE (val)));
5553 /* Build an expression for the range test. */
5554 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5555 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5556 fold_convert (TREE_TYPE (name2), val));
5557 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5559 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5560 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5561 build_int_cst (TREE_TYPE (name2), 1));
5564 if (dump_file)
5566 fprintf (dump_file, "Adding assert for ");
5567 print_generic_expr (dump_file, name2, 0);
5568 fprintf (dump_file, " from ");
5569 print_generic_expr (dump_file, tmp, 0);
5570 fprintf (dump_file, "\n");
5573 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5574 e, bsi);
5578 /* Add asserts for NAME cmp CST and NAME being defined as
5579 NAME = NAME2 >> CST2.
5581 Extract CST2 from the right shift. */
5582 if (rhs_code == RSHIFT_EXPR)
5584 name2 = gimple_assign_rhs1 (def_stmt);
5585 cst2 = gimple_assign_rhs2 (def_stmt);
5586 if (TREE_CODE (name2) == SSA_NAME
5587 && tree_fits_uhwi_p (cst2)
5588 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5589 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5590 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5591 && live_on_edge (e, name2))
5593 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5594 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5597 if (val2 != NULL_TREE
5598 && TREE_CODE (val2) == INTEGER_CST
5599 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5600 TREE_TYPE (val),
5601 val2, cst2), val))
5603 enum tree_code new_comp_code = comp_code;
5604 tree tmp, new_val;
5606 tmp = name2;
5607 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5609 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5611 tree type = build_nonstandard_integer_type (prec, 1);
5612 tmp = build1 (NOP_EXPR, type, name2);
5613 val2 = fold_convert (type, val2);
5615 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5616 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5617 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5619 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5621 wide_int minval
5622 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5623 new_val = val2;
5624 if (minval == new_val)
5625 new_val = NULL_TREE;
5627 else
5629 wide_int maxval
5630 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5631 mask |= val2;
5632 if (mask == maxval)
5633 new_val = NULL_TREE;
5634 else
5635 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5638 if (new_val)
5640 if (dump_file)
5642 fprintf (dump_file, "Adding assert for ");
5643 print_generic_expr (dump_file, name2, 0);
5644 fprintf (dump_file, " from ");
5645 print_generic_expr (dump_file, tmp, 0);
5646 fprintf (dump_file, "\n");
5649 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5650 NULL, e, bsi);
5654 /* Add asserts for NAME cmp CST and NAME being defined as
5655 NAME = NAME2 & CST2.
5657 Extract CST2 from the and.
5659 Also handle
5660 NAME = (unsigned) NAME2;
5661 casts where NAME's type is unsigned and has smaller precision
5662 than NAME2's type as if it was NAME = NAME2 & MASK. */
5663 names[0] = NULL_TREE;
5664 names[1] = NULL_TREE;
5665 cst2 = NULL_TREE;
5666 if (rhs_code == BIT_AND_EXPR
5667 || (CONVERT_EXPR_CODE_P (rhs_code)
5668 && INTEGRAL_TYPE_P (TREE_TYPE (val))
5669 && TYPE_UNSIGNED (TREE_TYPE (val))
5670 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5671 > prec))
5673 name2 = gimple_assign_rhs1 (def_stmt);
5674 if (rhs_code == BIT_AND_EXPR)
5675 cst2 = gimple_assign_rhs2 (def_stmt);
5676 else
5678 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5679 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5681 if (TREE_CODE (name2) == SSA_NAME
5682 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5683 && TREE_CODE (cst2) == INTEGER_CST
5684 && !integer_zerop (cst2)
5685 && (nprec > 1
5686 || TYPE_UNSIGNED (TREE_TYPE (val))))
5688 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5689 if (gimple_assign_cast_p (def_stmt2))
5691 names[1] = gimple_assign_rhs1 (def_stmt2);
5692 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5693 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5694 || (TYPE_PRECISION (TREE_TYPE (name2))
5695 != TYPE_PRECISION (TREE_TYPE (names[1])))
5696 || !live_on_edge (e, names[1]))
5697 names[1] = NULL_TREE;
5699 if (live_on_edge (e, name2))
5700 names[0] = name2;
5703 if (names[0] || names[1])
5705 wide_int minv, maxv, valv, cst2v;
5706 wide_int tem, sgnbit;
5707 bool valid_p = false, valn, cst2n;
5708 enum tree_code ccode = comp_code;
5710 valv = wide_int::from (val, nprec, UNSIGNED);
5711 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5712 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5713 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5714 /* If CST2 doesn't have most significant bit set,
5715 but VAL is negative, we have comparison like
5716 if ((x & 0x123) > -4) (always true). Just give up. */
5717 if (!cst2n && valn)
5718 ccode = ERROR_MARK;
5719 if (cst2n)
5720 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5721 else
5722 sgnbit = wi::zero (nprec);
5723 minv = valv & cst2v;
5724 switch (ccode)
5726 case EQ_EXPR:
5727 /* Minimum unsigned value for equality is VAL & CST2
5728 (should be equal to VAL, otherwise we probably should
5729 have folded the comparison into false) and
5730 maximum unsigned value is VAL | ~CST2. */
5731 maxv = valv | ~cst2v;
5732 valid_p = true;
5733 break;
5735 case NE_EXPR:
5736 tem = valv | ~cst2v;
5737 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5738 if (valv == 0)
5740 cst2n = false;
5741 sgnbit = wi::zero (nprec);
5742 goto gt_expr;
5744 /* If (VAL | ~CST2) is all ones, handle it as
5745 (X & CST2) < VAL. */
5746 if (tem == -1)
5748 cst2n = false;
5749 valn = false;
5750 sgnbit = wi::zero (nprec);
5751 goto lt_expr;
5753 if (!cst2n && wi::neg_p (cst2v))
5754 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5755 if (sgnbit != 0)
5757 if (valv == sgnbit)
5759 cst2n = true;
5760 valn = true;
5761 goto gt_expr;
5763 if (tem == wi::mask (nprec - 1, false, nprec))
5765 cst2n = true;
5766 goto lt_expr;
5768 if (!cst2n)
5769 sgnbit = wi::zero (nprec);
5771 break;
5773 case GE_EXPR:
5774 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5775 is VAL and maximum unsigned value is ~0. For signed
5776 comparison, if CST2 doesn't have most significant bit
5777 set, handle it similarly. If CST2 has MSB set,
5778 the minimum is the same, and maximum is ~0U/2. */
5779 if (minv != valv)
5781 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5782 VAL. */
5783 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5784 if (minv == valv)
5785 break;
5787 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5788 valid_p = true;
5789 break;
5791 case GT_EXPR:
5792 gt_expr:
5793 /* Find out smallest MINV where MINV > VAL
5794 && (MINV & CST2) == MINV, if any. If VAL is signed and
5795 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5796 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5797 if (minv == valv)
5798 break;
5799 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5800 valid_p = true;
5801 break;
5803 case LE_EXPR:
5804 /* Minimum unsigned value for <= is 0 and maximum
5805 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5806 Otherwise, find smallest VAL2 where VAL2 > VAL
5807 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5808 as maximum.
5809 For signed comparison, if CST2 doesn't have most
5810 significant bit set, handle it similarly. If CST2 has
5811 MSB set, the maximum is the same and minimum is INT_MIN. */
5812 if (minv == valv)
5813 maxv = valv;
5814 else
5816 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5817 if (maxv == valv)
5818 break;
5819 maxv -= 1;
5821 maxv |= ~cst2v;
5822 minv = sgnbit;
5823 valid_p = true;
5824 break;
5826 case LT_EXPR:
5827 lt_expr:
5828 /* Minimum unsigned value for < is 0 and maximum
5829 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5830 Otherwise, find smallest VAL2 where VAL2 > VAL
5831 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5832 as maximum.
5833 For signed comparison, if CST2 doesn't have most
5834 significant bit set, handle it similarly. If CST2 has
5835 MSB set, the maximum is the same and minimum is INT_MIN. */
5836 if (minv == valv)
5838 if (valv == sgnbit)
5839 break;
5840 maxv = valv;
5842 else
5844 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5845 if (maxv == valv)
5846 break;
5848 maxv -= 1;
5849 maxv |= ~cst2v;
5850 minv = sgnbit;
5851 valid_p = true;
5852 break;
5854 default:
5855 break;
5857 if (valid_p
5858 && (maxv - minv) != -1)
5860 tree tmp, new_val, type;
5861 int i;
5863 for (i = 0; i < 2; i++)
5864 if (names[i])
5866 wide_int maxv2 = maxv;
5867 tmp = names[i];
5868 type = TREE_TYPE (names[i]);
5869 if (!TYPE_UNSIGNED (type))
5871 type = build_nonstandard_integer_type (nprec, 1);
5872 tmp = build1 (NOP_EXPR, type, names[i]);
5874 if (minv != 0)
5876 tmp = build2 (PLUS_EXPR, type, tmp,
5877 wide_int_to_tree (type, -minv));
5878 maxv2 = maxv - minv;
5880 new_val = wide_int_to_tree (type, maxv2);
5882 if (dump_file)
5884 fprintf (dump_file, "Adding assert for ");
5885 print_generic_expr (dump_file, names[i], 0);
5886 fprintf (dump_file, " from ");
5887 print_generic_expr (dump_file, tmp, 0);
5888 fprintf (dump_file, "\n");
5891 register_new_assert_for (names[i], tmp, LE_EXPR,
5892 new_val, NULL, e, bsi);
5899 /* OP is an operand of a truth value expression which is known to have
5900 a particular value. Register any asserts for OP and for any
5901 operands in OP's defining statement.
5903 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5904 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5906 static void
5907 register_edge_assert_for_1 (tree op, enum tree_code code,
5908 edge e, gimple_stmt_iterator bsi)
5910 gimple *op_def;
5911 tree val;
5912 enum tree_code rhs_code;
5914 /* We only care about SSA_NAMEs. */
5915 if (TREE_CODE (op) != SSA_NAME)
5916 return;
5918 /* We know that OP will have a zero or nonzero value. If OP is used
5919 more than once go ahead and register an assert for OP. */
5920 if (live_on_edge (e, op))
5922 val = build_int_cst (TREE_TYPE (op), 0);
5923 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5926 /* Now look at how OP is set. If it's set from a comparison,
5927 a truth operation or some bit operations, then we may be able
5928 to register information about the operands of that assignment. */
5929 op_def = SSA_NAME_DEF_STMT (op);
5930 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5931 return;
5933 rhs_code = gimple_assign_rhs_code (op_def);
5935 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5937 bool invert = (code == EQ_EXPR ? true : false);
5938 tree op0 = gimple_assign_rhs1 (op_def);
5939 tree op1 = gimple_assign_rhs2 (op_def);
5941 if (TREE_CODE (op0) == SSA_NAME)
5942 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5943 if (TREE_CODE (op1) == SSA_NAME)
5944 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5946 else if ((code == NE_EXPR
5947 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5948 || (code == EQ_EXPR
5949 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5951 /* Recurse on each operand. */
5952 tree op0 = gimple_assign_rhs1 (op_def);
5953 tree op1 = gimple_assign_rhs2 (op_def);
5954 if (TREE_CODE (op0) == SSA_NAME
5955 && has_single_use (op0))
5956 register_edge_assert_for_1 (op0, code, e, bsi);
5957 if (TREE_CODE (op1) == SSA_NAME
5958 && has_single_use (op1))
5959 register_edge_assert_for_1 (op1, code, e, bsi);
5961 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5962 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5964 /* Recurse, flipping CODE. */
5965 code = invert_tree_comparison (code, false);
5966 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5968 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5970 /* Recurse through the copy. */
5971 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5973 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5975 /* Recurse through the type conversion, unless it is a narrowing
5976 conversion or conversion from non-integral type. */
5977 tree rhs = gimple_assign_rhs1 (op_def);
5978 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5979 && (TYPE_PRECISION (TREE_TYPE (rhs))
5980 <= TYPE_PRECISION (TREE_TYPE (op))))
5981 register_edge_assert_for_1 (rhs, code, e, bsi);
5985 /* Try to register an edge assertion for SSA name NAME on edge E for
5986 the condition COND contributing to the conditional jump pointed to by
5987 SI. */
5989 static void
5990 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5991 enum tree_code cond_code, tree cond_op0,
5992 tree cond_op1)
5994 tree val;
5995 enum tree_code comp_code;
5996 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5998 /* Do not attempt to infer anything in names that flow through
5999 abnormal edges. */
6000 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
6001 return;
6003 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
6004 cond_op0, cond_op1,
6005 is_else_edge,
6006 &comp_code, &val))
6007 return;
6009 /* Register ASSERT_EXPRs for name. */
6010 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
6011 cond_op1, is_else_edge);
6014 /* If COND is effectively an equality test of an SSA_NAME against
6015 the value zero or one, then we may be able to assert values
6016 for SSA_NAMEs which flow into COND. */
6018 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
6019 statement of NAME we can assert both operands of the BIT_AND_EXPR
6020 have nonzero value. */
6021 if (((comp_code == EQ_EXPR && integer_onep (val))
6022 || (comp_code == NE_EXPR && integer_zerop (val))))
6024 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
6026 if (is_gimple_assign (def_stmt)
6027 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
6029 tree op0 = gimple_assign_rhs1 (def_stmt);
6030 tree op1 = gimple_assign_rhs2 (def_stmt);
6031 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
6032 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
6036 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
6037 statement of NAME we can assert both operands of the BIT_IOR_EXPR
6038 have zero value. */
6039 if (((comp_code == EQ_EXPR && integer_zerop (val))
6040 || (comp_code == NE_EXPR && integer_onep (val))))
6042 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
6044 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
6045 necessarily zero value, or if type-precision is one. */
6046 if (is_gimple_assign (def_stmt)
6047 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
6048 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
6049 || comp_code == EQ_EXPR)))
6051 tree op0 = gimple_assign_rhs1 (def_stmt);
6052 tree op1 = gimple_assign_rhs2 (def_stmt);
6053 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
6054 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
6060 /* Determine whether the outgoing edges of BB should receive an
6061 ASSERT_EXPR for each of the operands of BB's LAST statement.
6062 The last statement of BB must be a COND_EXPR.
6064 If any of the sub-graphs rooted at BB have an interesting use of
6065 the predicate operands, an assert location node is added to the
6066 list of assertions for the corresponding operands. */
6068 static void
6069 find_conditional_asserts (basic_block bb, gcond *last)
6071 gimple_stmt_iterator bsi;
6072 tree op;
6073 edge_iterator ei;
6074 edge e;
6075 ssa_op_iter iter;
6077 bsi = gsi_for_stmt (last);
6079 /* Look for uses of the operands in each of the sub-graphs
6080 rooted at BB. We need to check each of the outgoing edges
6081 separately, so that we know what kind of ASSERT_EXPR to
6082 insert. */
6083 FOR_EACH_EDGE (e, ei, bb->succs)
6085 if (e->dest == bb)
6086 continue;
6088 /* Register the necessary assertions for each operand in the
6089 conditional predicate. */
6090 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
6091 register_edge_assert_for (op, e, bsi,
6092 gimple_cond_code (last),
6093 gimple_cond_lhs (last),
6094 gimple_cond_rhs (last));
6098 struct case_info
6100 tree expr;
6101 basic_block bb;
6104 /* Compare two case labels sorting first by the destination bb index
6105 and then by the case value. */
6107 static int
6108 compare_case_labels (const void *p1, const void *p2)
6110 const struct case_info *ci1 = (const struct case_info *) p1;
6111 const struct case_info *ci2 = (const struct case_info *) p2;
6112 int idx1 = ci1->bb->index;
6113 int idx2 = ci2->bb->index;
6115 if (idx1 < idx2)
6116 return -1;
6117 else if (idx1 == idx2)
6119 /* Make sure the default label is first in a group. */
6120 if (!CASE_LOW (ci1->expr))
6121 return -1;
6122 else if (!CASE_LOW (ci2->expr))
6123 return 1;
6124 else
6125 return tree_int_cst_compare (CASE_LOW (ci1->expr),
6126 CASE_LOW (ci2->expr));
6128 else
6129 return 1;
6132 /* Determine whether the outgoing edges of BB should receive an
6133 ASSERT_EXPR for each of the operands of BB's LAST statement.
6134 The last statement of BB must be a SWITCH_EXPR.
6136 If any of the sub-graphs rooted at BB have an interesting use of
6137 the predicate operands, an assert location node is added to the
6138 list of assertions for the corresponding operands. */
6140 static void
6141 find_switch_asserts (basic_block bb, gswitch *last)
6143 gimple_stmt_iterator bsi;
6144 tree op;
6145 edge e;
6146 struct case_info *ci;
6147 size_t n = gimple_switch_num_labels (last);
6148 #if GCC_VERSION >= 4000
6149 unsigned int idx;
6150 #else
6151 /* Work around GCC 3.4 bug (PR 37086). */
6152 volatile unsigned int idx;
6153 #endif
6155 bsi = gsi_for_stmt (last);
6156 op = gimple_switch_index (last);
6157 if (TREE_CODE (op) != SSA_NAME)
6158 return;
6160 /* Build a vector of case labels sorted by destination label. */
6161 ci = XNEWVEC (struct case_info, n);
6162 for (idx = 0; idx < n; ++idx)
6164 ci[idx].expr = gimple_switch_label (last, idx);
6165 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
6167 edge default_edge = find_edge (bb, ci[0].bb);
6168 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
6170 for (idx = 0; idx < n; ++idx)
6172 tree min, max;
6173 tree cl = ci[idx].expr;
6174 basic_block cbb = ci[idx].bb;
6176 min = CASE_LOW (cl);
6177 max = CASE_HIGH (cl);
6179 /* If there are multiple case labels with the same destination
6180 we need to combine them to a single value range for the edge. */
6181 if (idx + 1 < n && cbb == ci[idx + 1].bb)
6183 /* Skip labels until the last of the group. */
6184 do {
6185 ++idx;
6186 } while (idx < n && cbb == ci[idx].bb);
6187 --idx;
6189 /* Pick up the maximum of the case label range. */
6190 if (CASE_HIGH (ci[idx].expr))
6191 max = CASE_HIGH (ci[idx].expr);
6192 else
6193 max = CASE_LOW (ci[idx].expr);
6196 /* Can't extract a useful assertion out of a range that includes the
6197 default label. */
6198 if (min == NULL_TREE)
6199 continue;
6201 /* Find the edge to register the assert expr on. */
6202 e = find_edge (bb, cbb);
6204 /* Register the necessary assertions for the operand in the
6205 SWITCH_EXPR. */
6206 register_edge_assert_for (op, e, bsi,
6207 max ? GE_EXPR : EQ_EXPR,
6208 op, fold_convert (TREE_TYPE (op), min));
6209 if (max)
6210 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
6211 fold_convert (TREE_TYPE (op), max));
6214 XDELETEVEC (ci);
6216 if (!live_on_edge (default_edge, op))
6217 return;
6219 /* Now register along the default label assertions that correspond to the
6220 anti-range of each label. */
6221 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
6222 if (insertion_limit == 0)
6223 return;
6225 /* We can't do this if the default case shares a label with another case. */
6226 tree default_cl = gimple_switch_default_label (last);
6227 for (idx = 1; idx < n; idx++)
6229 tree min, max;
6230 tree cl = gimple_switch_label (last, idx);
6231 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
6232 continue;
6234 min = CASE_LOW (cl);
6235 max = CASE_HIGH (cl);
6237 /* Combine contiguous case ranges to reduce the number of assertions
6238 to insert. */
6239 for (idx = idx + 1; idx < n; idx++)
6241 tree next_min, next_max;
6242 tree next_cl = gimple_switch_label (last, idx);
6243 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
6244 break;
6246 next_min = CASE_LOW (next_cl);
6247 next_max = CASE_HIGH (next_cl);
6249 wide_int difference = wi::sub (next_min, max ? max : min);
6250 if (wi::eq_p (difference, 1))
6251 max = next_max ? next_max : next_min;
6252 else
6253 break;
6255 idx--;
6257 if (max == NULL_TREE)
6259 /* Register the assertion OP != MIN. */
6260 min = fold_convert (TREE_TYPE (op), min);
6261 register_edge_assert_for (op, default_edge, bsi, NE_EXPR, op, min);
6263 else
6265 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
6266 which will give OP the anti-range ~[MIN,MAX]. */
6267 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
6268 min = fold_convert (TREE_TYPE (uop), min);
6269 max = fold_convert (TREE_TYPE (uop), max);
6271 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
6272 tree rhs = int_const_binop (MINUS_EXPR, max, min);
6273 register_new_assert_for (op, lhs, GT_EXPR, rhs,
6274 NULL, default_edge, bsi);
6277 if (--insertion_limit == 0)
6278 break;
6283 /* Traverse all the statements in block BB looking for statements that
6284 may generate useful assertions for the SSA names in their operand.
6285 If a statement produces a useful assertion A for name N_i, then the
6286 list of assertions already generated for N_i is scanned to
6287 determine if A is actually needed.
6289 If N_i already had the assertion A at a location dominating the
6290 current location, then nothing needs to be done. Otherwise, the
6291 new location for A is recorded instead.
6293 1- For every statement S in BB, all the variables used by S are
6294 added to bitmap FOUND_IN_SUBGRAPH.
6296 2- If statement S uses an operand N in a way that exposes a known
6297 value range for N, then if N was not already generated by an
6298 ASSERT_EXPR, create a new assert location for N. For instance,
6299 if N is a pointer and the statement dereferences it, we can
6300 assume that N is not NULL.
6302 3- COND_EXPRs are a special case of #2. We can derive range
6303 information from the predicate but need to insert different
6304 ASSERT_EXPRs for each of the sub-graphs rooted at the
6305 conditional block. If the last statement of BB is a conditional
6306 expression of the form 'X op Y', then
6308 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6310 b) If the conditional is the only entry point to the sub-graph
6311 corresponding to the THEN_CLAUSE, recurse into it. On
6312 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6313 an ASSERT_EXPR is added for the corresponding variable.
6315 c) Repeat step (b) on the ELSE_CLAUSE.
6317 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6319 For instance,
6321 if (a == 9)
6322 b = a;
6323 else
6324 b = c + 1;
6326 In this case, an assertion on the THEN clause is useful to
6327 determine that 'a' is always 9 on that edge. However, an assertion
6328 on the ELSE clause would be unnecessary.
6330 4- If BB does not end in a conditional expression, then we recurse
6331 into BB's dominator children.
6333 At the end of the recursive traversal, every SSA name will have a
6334 list of locations where ASSERT_EXPRs should be added. When a new
6335 location for name N is found, it is registered by calling
6336 register_new_assert_for. That function keeps track of all the
6337 registered assertions to prevent adding unnecessary assertions.
6338 For instance, if a pointer P_4 is dereferenced more than once in a
6339 dominator tree, only the location dominating all the dereference of
6340 P_4 will receive an ASSERT_EXPR. */
6342 static void
6343 find_assert_locations_1 (basic_block bb, sbitmap live)
6345 gimple *last;
6347 last = last_stmt (bb);
6349 /* If BB's last statement is a conditional statement involving integer
6350 operands, determine if we need to add ASSERT_EXPRs. */
6351 if (last
6352 && gimple_code (last) == GIMPLE_COND
6353 && !fp_predicate (last)
6354 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6355 find_conditional_asserts (bb, as_a <gcond *> (last));
6357 /* If BB's last statement is a switch statement involving integer
6358 operands, determine if we need to add ASSERT_EXPRs. */
6359 if (last
6360 && gimple_code (last) == GIMPLE_SWITCH
6361 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6362 find_switch_asserts (bb, as_a <gswitch *> (last));
6364 /* Traverse all the statements in BB marking used names and looking
6365 for statements that may infer assertions for their used operands. */
6366 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6367 gsi_prev (&si))
6369 gimple *stmt;
6370 tree op;
6371 ssa_op_iter i;
6373 stmt = gsi_stmt (si);
6375 if (is_gimple_debug (stmt))
6376 continue;
6378 /* See if we can derive an assertion for any of STMT's operands. */
6379 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6381 tree value;
6382 enum tree_code comp_code;
6384 /* If op is not live beyond this stmt, do not bother to insert
6385 asserts for it. */
6386 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6387 continue;
6389 /* If OP is used in such a way that we can infer a value
6390 range for it, and we don't find a previous assertion for
6391 it, create a new assertion location node for OP. */
6392 if (infer_value_range (stmt, op, &comp_code, &value))
6394 /* If we are able to infer a nonzero value range for OP,
6395 then walk backwards through the use-def chain to see if OP
6396 was set via a typecast.
6398 If so, then we can also infer a nonzero value range
6399 for the operand of the NOP_EXPR. */
6400 if (comp_code == NE_EXPR && integer_zerop (value))
6402 tree t = op;
6403 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6405 while (is_gimple_assign (def_stmt)
6406 && CONVERT_EXPR_CODE_P
6407 (gimple_assign_rhs_code (def_stmt))
6408 && TREE_CODE
6409 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6410 && POINTER_TYPE_P
6411 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6413 t = gimple_assign_rhs1 (def_stmt);
6414 def_stmt = SSA_NAME_DEF_STMT (t);
6416 /* Note we want to register the assert for the
6417 operand of the NOP_EXPR after SI, not after the
6418 conversion. */
6419 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
6420 register_new_assert_for (t, t, comp_code, value,
6421 bb, NULL, si);
6425 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6429 /* Update live. */
6430 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6431 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6432 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6433 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6436 /* Traverse all PHI nodes in BB, updating live. */
6437 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6438 gsi_next (&si))
6440 use_operand_p arg_p;
6441 ssa_op_iter i;
6442 gphi *phi = si.phi ();
6443 tree res = gimple_phi_result (phi);
6445 if (virtual_operand_p (res))
6446 continue;
6448 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6450 tree arg = USE_FROM_PTR (arg_p);
6451 if (TREE_CODE (arg) == SSA_NAME)
6452 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6455 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6459 /* Do an RPO walk over the function computing SSA name liveness
6460 on-the-fly and deciding on assert expressions to insert. */
6462 static void
6463 find_assert_locations (void)
6465 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6466 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6467 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6468 int rpo_cnt, i;
6470 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6471 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6472 for (i = 0; i < rpo_cnt; ++i)
6473 bb_rpo[rpo[i]] = i;
6475 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6476 the order we compute liveness and insert asserts we otherwise
6477 fail to insert asserts into the loop latch. */
6478 loop_p loop;
6479 FOR_EACH_LOOP (loop, 0)
6481 i = loop->latch->index;
6482 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6483 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6484 !gsi_end_p (gsi); gsi_next (&gsi))
6486 gphi *phi = gsi.phi ();
6487 if (virtual_operand_p (gimple_phi_result (phi)))
6488 continue;
6489 tree arg = gimple_phi_arg_def (phi, j);
6490 if (TREE_CODE (arg) == SSA_NAME)
6492 if (live[i] == NULL)
6494 live[i] = sbitmap_alloc (num_ssa_names);
6495 bitmap_clear (live[i]);
6497 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6502 for (i = rpo_cnt - 1; i >= 0; --i)
6504 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6505 edge e;
6506 edge_iterator ei;
6508 if (!live[rpo[i]])
6510 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6511 bitmap_clear (live[rpo[i]]);
6514 /* Process BB and update the live information with uses in
6515 this block. */
6516 find_assert_locations_1 (bb, live[rpo[i]]);
6518 /* Merge liveness into the predecessor blocks and free it. */
6519 if (!bitmap_empty_p (live[rpo[i]]))
6521 int pred_rpo = i;
6522 FOR_EACH_EDGE (e, ei, bb->preds)
6524 int pred = e->src->index;
6525 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6526 continue;
6528 if (!live[pred])
6530 live[pred] = sbitmap_alloc (num_ssa_names);
6531 bitmap_clear (live[pred]);
6533 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6535 if (bb_rpo[pred] < pred_rpo)
6536 pred_rpo = bb_rpo[pred];
6539 /* Record the RPO number of the last visited block that needs
6540 live information from this block. */
6541 last_rpo[rpo[i]] = pred_rpo;
6543 else
6545 sbitmap_free (live[rpo[i]]);
6546 live[rpo[i]] = NULL;
6549 /* We can free all successors live bitmaps if all their
6550 predecessors have been visited already. */
6551 FOR_EACH_EDGE (e, ei, bb->succs)
6552 if (last_rpo[e->dest->index] == i
6553 && live[e->dest->index])
6555 sbitmap_free (live[e->dest->index]);
6556 live[e->dest->index] = NULL;
6560 XDELETEVEC (rpo);
6561 XDELETEVEC (bb_rpo);
6562 XDELETEVEC (last_rpo);
6563 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6564 if (live[i])
6565 sbitmap_free (live[i]);
6566 XDELETEVEC (live);
6569 /* Create an ASSERT_EXPR for NAME and insert it in the location
6570 indicated by LOC. Return true if we made any edge insertions. */
6572 static bool
6573 process_assert_insertions_for (tree name, assert_locus *loc)
6575 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6576 gimple *stmt;
6577 tree cond;
6578 gimple *assert_stmt;
6579 edge_iterator ei;
6580 edge e;
6582 /* If we have X <=> X do not insert an assert expr for that. */
6583 if (loc->expr == loc->val)
6584 return false;
6586 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6587 assert_stmt = build_assert_expr_for (cond, name);
6588 if (loc->e)
6590 /* We have been asked to insert the assertion on an edge. This
6591 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6592 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6593 || (gimple_code (gsi_stmt (loc->si))
6594 == GIMPLE_SWITCH));
6596 gsi_insert_on_edge (loc->e, assert_stmt);
6597 return true;
6600 /* If the stmt iterator points at the end then this is an insertion
6601 at the beginning of a block. */
6602 if (gsi_end_p (loc->si))
6604 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
6605 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
6606 return false;
6609 /* Otherwise, we can insert right after LOC->SI iff the
6610 statement must not be the last statement in the block. */
6611 stmt = gsi_stmt (loc->si);
6612 if (!stmt_ends_bb_p (stmt))
6614 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6615 return false;
6618 /* If STMT must be the last statement in BB, we can only insert new
6619 assertions on the non-abnormal edge out of BB. Note that since
6620 STMT is not control flow, there may only be one non-abnormal/eh edge
6621 out of BB. */
6622 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6623 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
6625 gsi_insert_on_edge (e, assert_stmt);
6626 return true;
6629 gcc_unreachable ();
6632 /* Qsort helper for sorting assert locations. */
6634 static int
6635 compare_assert_loc (const void *pa, const void *pb)
6637 assert_locus * const a = *(assert_locus * const *)pa;
6638 assert_locus * const b = *(assert_locus * const *)pb;
6639 if (! a->e && b->e)
6640 return 1;
6641 else if (a->e && ! b->e)
6642 return -1;
6644 /* Sort after destination index. */
6645 if (! a->e && ! b->e)
6647 else if (a->e->dest->index > b->e->dest->index)
6648 return 1;
6649 else if (a->e->dest->index < b->e->dest->index)
6650 return -1;
6652 /* Sort after comp_code. */
6653 if (a->comp_code > b->comp_code)
6654 return 1;
6655 else if (a->comp_code < b->comp_code)
6656 return -1;
6658 /* Break the tie using hashing and source/bb index. */
6659 hashval_t ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
6660 hashval_t hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
6661 if (ha == hb)
6662 return (a->e && b->e
6663 ? a->e->src->index - b->e->src->index
6664 : a->bb->index - b->bb->index);
6665 return ha - hb;
6668 /* Process all the insertions registered for every name N_i registered
6669 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6670 found in ASSERTS_FOR[i]. */
6672 static void
6673 process_assert_insertions (void)
6675 unsigned i;
6676 bitmap_iterator bi;
6677 bool update_edges_p = false;
6678 int num_asserts = 0;
6680 if (dump_file && (dump_flags & TDF_DETAILS))
6681 dump_all_asserts (dump_file);
6683 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6685 assert_locus *loc = asserts_for[i];
6686 gcc_assert (loc);
6688 auto_vec<assert_locus *, 16> asserts;
6689 for (; loc; loc = loc->next)
6690 asserts.safe_push (loc);
6691 asserts.qsort (compare_assert_loc);
6693 /* Push down common asserts to successors and remove redundant ones. */
6694 unsigned ecnt = 0;
6695 assert_locus *common = NULL;
6696 unsigned commonj = 0;
6697 for (unsigned j = 0; j < asserts.length (); ++j)
6699 loc = asserts[j];
6700 if (! loc->e)
6701 common = NULL;
6702 else if (! common
6703 || loc->e->dest != common->e->dest
6704 || loc->comp_code != common->comp_code
6705 || ! operand_equal_p (loc->val, common->val, 0)
6706 || ! operand_equal_p (loc->expr, common->expr, 0))
6708 commonj = j;
6709 common = loc;
6710 ecnt = 1;
6712 else if (loc->e == asserts[j-1]->e)
6714 /* Remove duplicate asserts. */
6715 if (commonj == j - 1)
6717 commonj = j;
6718 common = loc;
6720 free (asserts[j-1]);
6721 asserts[j-1] = NULL;
6723 else
6725 ecnt++;
6726 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
6728 /* We have the same assertion on all incoming edges of a BB.
6729 Insert it at the beginning of that block. */
6730 loc->bb = loc->e->dest;
6731 loc->e = NULL;
6732 loc->si = gsi_none ();
6733 common = NULL;
6734 /* Clear asserts commoned. */
6735 for (; commonj != j; ++commonj)
6736 if (asserts[commonj])
6738 free (asserts[commonj]);
6739 asserts[commonj] = NULL;
6745 for (unsigned j = 0; j < asserts.length (); ++j)
6747 loc = asserts[j];
6748 if (! loc)
6749 continue;
6750 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6751 num_asserts++;
6752 free (loc);
6756 if (update_edges_p)
6757 gsi_commit_edge_inserts ();
6759 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6760 num_asserts);
6764 /* Traverse the flowgraph looking for conditional jumps to insert range
6765 expressions. These range expressions are meant to provide information
6766 to optimizations that need to reason in terms of value ranges. They
6767 will not be expanded into RTL. For instance, given:
6769 x = ...
6770 y = ...
6771 if (x < y)
6772 y = x - 2;
6773 else
6774 x = y + 3;
6776 this pass will transform the code into:
6778 x = ...
6779 y = ...
6780 if (x < y)
6782 x = ASSERT_EXPR <x, x < y>
6783 y = x - 2
6785 else
6787 y = ASSERT_EXPR <y, x >= y>
6788 x = y + 3
6791 The idea is that once copy and constant propagation have run, other
6792 optimizations will be able to determine what ranges of values can 'x'
6793 take in different paths of the code, simply by checking the reaching
6794 definition of 'x'. */
6796 static void
6797 insert_range_assertions (void)
6799 need_assert_for = BITMAP_ALLOC (NULL);
6800 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6802 calculate_dominance_info (CDI_DOMINATORS);
6804 find_assert_locations ();
6805 if (!bitmap_empty_p (need_assert_for))
6807 process_assert_insertions ();
6808 update_ssa (TODO_update_ssa_no_phi);
6811 if (dump_file && (dump_flags & TDF_DETAILS))
6813 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6814 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6817 free (asserts_for);
6818 BITMAP_FREE (need_assert_for);
6821 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6822 and "struct" hacks. If VRP can determine that the
6823 array subscript is a constant, check if it is outside valid
6824 range. If the array subscript is a RANGE, warn if it is
6825 non-overlapping with valid range.
6826 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6828 static void
6829 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6831 value_range *vr = NULL;
6832 tree low_sub, up_sub;
6833 tree low_bound, up_bound, up_bound_p1;
6835 if (TREE_NO_WARNING (ref))
6836 return;
6838 low_sub = up_sub = TREE_OPERAND (ref, 1);
6839 up_bound = array_ref_up_bound (ref);
6841 /* Can not check flexible arrays. */
6842 if (!up_bound
6843 || TREE_CODE (up_bound) != INTEGER_CST)
6844 return;
6846 /* Accesses to trailing arrays via pointers may access storage
6847 beyond the types array bounds. */
6848 if (warn_array_bounds < 2
6849 && array_at_struct_end_p (ref))
6850 return;
6852 low_bound = array_ref_low_bound (ref);
6853 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6854 build_int_cst (TREE_TYPE (up_bound), 1));
6856 /* Empty array. */
6857 if (tree_int_cst_equal (low_bound, up_bound_p1))
6859 warning_at (location, OPT_Warray_bounds,
6860 "array subscript is above array bounds");
6861 TREE_NO_WARNING (ref) = 1;
6864 if (TREE_CODE (low_sub) == SSA_NAME)
6866 vr = get_value_range (low_sub);
6867 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6869 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6870 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6874 if (vr && vr->type == VR_ANTI_RANGE)
6876 if (TREE_CODE (up_sub) == INTEGER_CST
6877 && (ignore_off_by_one
6878 ? tree_int_cst_lt (up_bound, up_sub)
6879 : tree_int_cst_le (up_bound, up_sub))
6880 && TREE_CODE (low_sub) == INTEGER_CST
6881 && tree_int_cst_le (low_sub, low_bound))
6883 warning_at (location, OPT_Warray_bounds,
6884 "array subscript is outside array bounds");
6885 TREE_NO_WARNING (ref) = 1;
6888 else if (TREE_CODE (up_sub) == INTEGER_CST
6889 && (ignore_off_by_one
6890 ? !tree_int_cst_le (up_sub, up_bound_p1)
6891 : !tree_int_cst_le (up_sub, up_bound)))
6893 if (dump_file && (dump_flags & TDF_DETAILS))
6895 fprintf (dump_file, "Array bound warning for ");
6896 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6897 fprintf (dump_file, "\n");
6899 warning_at (location, OPT_Warray_bounds,
6900 "array subscript is above array bounds");
6901 TREE_NO_WARNING (ref) = 1;
6903 else if (TREE_CODE (low_sub) == INTEGER_CST
6904 && tree_int_cst_lt (low_sub, low_bound))
6906 if (dump_file && (dump_flags & TDF_DETAILS))
6908 fprintf (dump_file, "Array bound warning for ");
6909 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6910 fprintf (dump_file, "\n");
6912 warning_at (location, OPT_Warray_bounds,
6913 "array subscript is below array bounds");
6914 TREE_NO_WARNING (ref) = 1;
6918 /* Searches if the expr T, located at LOCATION computes
6919 address of an ARRAY_REF, and call check_array_ref on it. */
6921 static void
6922 search_for_addr_array (tree t, location_t location)
6924 /* Check each ARRAY_REFs in the reference chain. */
6927 if (TREE_CODE (t) == ARRAY_REF)
6928 check_array_ref (location, t, true /*ignore_off_by_one*/);
6930 t = TREE_OPERAND (t, 0);
6932 while (handled_component_p (t));
6934 if (TREE_CODE (t) == MEM_REF
6935 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6936 && !TREE_NO_WARNING (t))
6938 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6939 tree low_bound, up_bound, el_sz;
6940 offset_int idx;
6941 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6942 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6943 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6944 return;
6946 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6947 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6948 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6949 if (!low_bound
6950 || TREE_CODE (low_bound) != INTEGER_CST
6951 || !up_bound
6952 || TREE_CODE (up_bound) != INTEGER_CST
6953 || !el_sz
6954 || TREE_CODE (el_sz) != INTEGER_CST)
6955 return;
6957 idx = mem_ref_offset (t);
6958 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6959 if (idx < 0)
6961 if (dump_file && (dump_flags & TDF_DETAILS))
6963 fprintf (dump_file, "Array bound warning for ");
6964 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6965 fprintf (dump_file, "\n");
6967 warning_at (location, OPT_Warray_bounds,
6968 "array subscript is below array bounds");
6969 TREE_NO_WARNING (t) = 1;
6971 else if (idx > (wi::to_offset (up_bound)
6972 - wi::to_offset (low_bound) + 1))
6974 if (dump_file && (dump_flags & TDF_DETAILS))
6976 fprintf (dump_file, "Array bound warning for ");
6977 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6978 fprintf (dump_file, "\n");
6980 warning_at (location, OPT_Warray_bounds,
6981 "array subscript is above array bounds");
6982 TREE_NO_WARNING (t) = 1;
6987 /* walk_tree() callback that checks if *TP is
6988 an ARRAY_REF inside an ADDR_EXPR (in which an array
6989 subscript one outside the valid range is allowed). Call
6990 check_array_ref for each ARRAY_REF found. The location is
6991 passed in DATA. */
6993 static tree
6994 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6996 tree t = *tp;
6997 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6998 location_t location;
7000 if (EXPR_HAS_LOCATION (t))
7001 location = EXPR_LOCATION (t);
7002 else
7004 location_t *locp = (location_t *) wi->info;
7005 location = *locp;
7008 *walk_subtree = TRUE;
7010 if (TREE_CODE (t) == ARRAY_REF)
7011 check_array_ref (location, t, false /*ignore_off_by_one*/);
7013 else if (TREE_CODE (t) == ADDR_EXPR)
7015 search_for_addr_array (t, location);
7016 *walk_subtree = FALSE;
7019 return NULL_TREE;
7022 /* Walk over all statements of all reachable BBs and call check_array_bounds
7023 on them. */
7025 static void
7026 check_all_array_refs (void)
7028 basic_block bb;
7029 gimple_stmt_iterator si;
7031 FOR_EACH_BB_FN (bb, cfun)
7033 edge_iterator ei;
7034 edge e;
7035 bool executable = false;
7037 /* Skip blocks that were found to be unreachable. */
7038 FOR_EACH_EDGE (e, ei, bb->preds)
7039 executable |= !!(e->flags & EDGE_EXECUTABLE);
7040 if (!executable)
7041 continue;
7043 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7045 gimple *stmt = gsi_stmt (si);
7046 struct walk_stmt_info wi;
7047 if (!gimple_has_location (stmt)
7048 || is_gimple_debug (stmt))
7049 continue;
7051 memset (&wi, 0, sizeof (wi));
7053 location_t loc = gimple_location (stmt);
7054 wi.info = &loc;
7056 walk_gimple_op (gsi_stmt (si),
7057 check_array_bounds,
7058 &wi);
7063 /* Return true if all imm uses of VAR are either in STMT, or
7064 feed (optionally through a chain of single imm uses) GIMPLE_COND
7065 in basic block COND_BB. */
7067 static bool
7068 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
7070 use_operand_p use_p, use2_p;
7071 imm_use_iterator iter;
7073 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
7074 if (USE_STMT (use_p) != stmt)
7076 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
7077 if (is_gimple_debug (use_stmt))
7078 continue;
7079 while (is_gimple_assign (use_stmt)
7080 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
7081 && single_imm_use (gimple_assign_lhs (use_stmt),
7082 &use2_p, &use_stmt2))
7083 use_stmt = use_stmt2;
7084 if (gimple_code (use_stmt) != GIMPLE_COND
7085 || gimple_bb (use_stmt) != cond_bb)
7086 return false;
7088 return true;
7091 /* Handle
7092 _4 = x_3 & 31;
7093 if (_4 != 0)
7094 goto <bb 6>;
7095 else
7096 goto <bb 7>;
7097 <bb 6>:
7098 __builtin_unreachable ();
7099 <bb 7>:
7100 x_5 = ASSERT_EXPR <x_3, ...>;
7101 If x_3 has no other immediate uses (checked by caller),
7102 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
7103 from the non-zero bitmask. */
7105 static void
7106 maybe_set_nonzero_bits (basic_block bb, tree var)
7108 edge e = single_pred_edge (bb);
7109 basic_block cond_bb = e->src;
7110 gimple *stmt = last_stmt (cond_bb);
7111 tree cst;
7113 if (stmt == NULL
7114 || gimple_code (stmt) != GIMPLE_COND
7115 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
7116 ? EQ_EXPR : NE_EXPR)
7117 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
7118 || !integer_zerop (gimple_cond_rhs (stmt)))
7119 return;
7121 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
7122 if (!is_gimple_assign (stmt)
7123 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
7124 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
7125 return;
7126 if (gimple_assign_rhs1 (stmt) != var)
7128 gimple *stmt2;
7130 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
7131 return;
7132 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
7133 if (!gimple_assign_cast_p (stmt2)
7134 || gimple_assign_rhs1 (stmt2) != var
7135 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
7136 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7137 != TYPE_PRECISION (TREE_TYPE (var))))
7138 return;
7140 cst = gimple_assign_rhs2 (stmt);
7141 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
7144 /* Convert range assertion expressions into the implied copies and
7145 copy propagate away the copies. Doing the trivial copy propagation
7146 here avoids the need to run the full copy propagation pass after
7147 VRP.
7149 FIXME, this will eventually lead to copy propagation removing the
7150 names that had useful range information attached to them. For
7151 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
7152 then N_i will have the range [3, +INF].
7154 However, by converting the assertion into the implied copy
7155 operation N_i = N_j, we will then copy-propagate N_j into the uses
7156 of N_i and lose the range information. We may want to hold on to
7157 ASSERT_EXPRs a little while longer as the ranges could be used in
7158 things like jump threading.
7160 The problem with keeping ASSERT_EXPRs around is that passes after
7161 VRP need to handle them appropriately.
7163 Another approach would be to make the range information a first
7164 class property of the SSA_NAME so that it can be queried from
7165 any pass. This is made somewhat more complex by the need for
7166 multiple ranges to be associated with one SSA_NAME. */
7168 static void
7169 remove_range_assertions (void)
7171 basic_block bb;
7172 gimple_stmt_iterator si;
7173 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
7174 a basic block preceeded by GIMPLE_COND branching to it and
7175 __builtin_trap, -1 if not yet checked, 0 otherwise. */
7176 int is_unreachable;
7178 /* Note that the BSI iterator bump happens at the bottom of the
7179 loop and no bump is necessary if we're removing the statement
7180 referenced by the current BSI. */
7181 FOR_EACH_BB_FN (bb, cfun)
7182 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
7184 gimple *stmt = gsi_stmt (si);
7186 if (is_gimple_assign (stmt)
7187 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
7189 tree lhs = gimple_assign_lhs (stmt);
7190 tree rhs = gimple_assign_rhs1 (stmt);
7191 tree var;
7193 var = ASSERT_EXPR_VAR (rhs);
7195 if (TREE_CODE (var) == SSA_NAME
7196 && !POINTER_TYPE_P (TREE_TYPE (lhs))
7197 && SSA_NAME_RANGE_INFO (lhs))
7199 if (is_unreachable == -1)
7201 is_unreachable = 0;
7202 if (single_pred_p (bb)
7203 && assert_unreachable_fallthru_edge_p
7204 (single_pred_edge (bb)))
7205 is_unreachable = 1;
7207 /* Handle
7208 if (x_7 >= 10 && x_7 < 20)
7209 __builtin_unreachable ();
7210 x_8 = ASSERT_EXPR <x_7, ...>;
7211 if the only uses of x_7 are in the ASSERT_EXPR and
7212 in the condition. In that case, we can copy the
7213 range info from x_8 computed in this pass also
7214 for x_7. */
7215 if (is_unreachable
7216 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
7217 single_pred (bb)))
7219 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
7220 SSA_NAME_RANGE_INFO (lhs)->get_min (),
7221 SSA_NAME_RANGE_INFO (lhs)->get_max ());
7222 maybe_set_nonzero_bits (bb, var);
7226 /* Propagate the RHS into every use of the LHS. For SSA names
7227 also propagate abnormals as it merely restores the original
7228 IL in this case (an replace_uses_by would assert). */
7229 if (TREE_CODE (var) == SSA_NAME)
7231 imm_use_iterator iter;
7232 use_operand_p use_p;
7233 gimple *use_stmt;
7234 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7235 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7236 SET_USE (use_p, var);
7238 else
7239 replace_uses_by (lhs, var);
7241 /* And finally, remove the copy, it is not needed. */
7242 gsi_remove (&si, true);
7243 release_defs (stmt);
7245 else
7247 if (!is_gimple_debug (gsi_stmt (si)))
7248 is_unreachable = 0;
7249 gsi_next (&si);
7255 /* Return true if STMT is interesting for VRP. */
7257 static bool
7258 stmt_interesting_for_vrp (gimple *stmt)
7260 if (gimple_code (stmt) == GIMPLE_PHI)
7262 tree res = gimple_phi_result (stmt);
7263 return (!virtual_operand_p (res)
7264 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
7265 || POINTER_TYPE_P (TREE_TYPE (res))));
7267 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7269 tree lhs = gimple_get_lhs (stmt);
7271 /* In general, assignments with virtual operands are not useful
7272 for deriving ranges, with the obvious exception of calls to
7273 builtin functions. */
7274 if (lhs && TREE_CODE (lhs) == SSA_NAME
7275 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7276 || POINTER_TYPE_P (TREE_TYPE (lhs)))
7277 && (is_gimple_call (stmt)
7278 || !gimple_vuse (stmt)))
7279 return true;
7280 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7281 switch (gimple_call_internal_fn (stmt))
7283 case IFN_ADD_OVERFLOW:
7284 case IFN_SUB_OVERFLOW:
7285 case IFN_MUL_OVERFLOW:
7286 /* These internal calls return _Complex integer type,
7287 but are interesting to VRP nevertheless. */
7288 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7289 return true;
7290 break;
7291 default:
7292 break;
7295 else if (gimple_code (stmt) == GIMPLE_COND
7296 || gimple_code (stmt) == GIMPLE_SWITCH)
7297 return true;
7299 return false;
7302 /* Initialize VRP lattice. */
7304 static void
7305 vrp_initialize_lattice ()
7307 values_propagated = false;
7308 num_vr_values = num_ssa_names;
7309 vr_value = XCNEWVEC (value_range *, num_vr_values);
7310 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
7311 bitmap_obstack_initialize (&vrp_equiv_obstack);
7314 /* Initialization required by ssa_propagate engine. */
7316 static void
7317 vrp_initialize ()
7319 basic_block bb;
7321 FOR_EACH_BB_FN (bb, cfun)
7323 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7324 gsi_next (&si))
7326 gphi *phi = si.phi ();
7327 if (!stmt_interesting_for_vrp (phi))
7329 tree lhs = PHI_RESULT (phi);
7330 set_value_range_to_varying (get_value_range (lhs));
7331 prop_set_simulate_again (phi, false);
7333 else
7334 prop_set_simulate_again (phi, true);
7337 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
7338 gsi_next (&si))
7340 gimple *stmt = gsi_stmt (si);
7342 /* If the statement is a control insn, then we do not
7343 want to avoid simulating the statement once. Failure
7344 to do so means that those edges will never get added. */
7345 if (stmt_ends_bb_p (stmt))
7346 prop_set_simulate_again (stmt, true);
7347 else if (!stmt_interesting_for_vrp (stmt))
7349 set_defs_to_varying (stmt);
7350 prop_set_simulate_again (stmt, false);
7352 else
7353 prop_set_simulate_again (stmt, true);
7358 /* Return the singleton value-range for NAME or NAME. */
7360 static inline tree
7361 vrp_valueize (tree name)
7363 if (TREE_CODE (name) == SSA_NAME)
7365 value_range *vr = get_value_range (name);
7366 if (vr->type == VR_RANGE
7367 && (TREE_CODE (vr->min) == SSA_NAME
7368 || is_gimple_min_invariant (vr->min))
7369 && vrp_operand_equal_p (vr->min, vr->max))
7370 return vr->min;
7372 return name;
7375 /* Return the singleton value-range for NAME if that is a constant
7376 but signal to not follow SSA edges. */
7378 static inline tree
7379 vrp_valueize_1 (tree name)
7381 if (TREE_CODE (name) == SSA_NAME)
7383 /* If the definition may be simulated again we cannot follow
7384 this SSA edge as the SSA propagator does not necessarily
7385 re-visit the use. */
7386 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7387 if (!gimple_nop_p (def_stmt)
7388 && prop_simulate_again_p (def_stmt))
7389 return NULL_TREE;
7390 value_range *vr = get_value_range (name);
7391 if (range_int_cst_singleton_p (vr))
7392 return vr->min;
7394 return name;
7397 /* Visit assignment STMT. If it produces an interesting range, record
7398 the range in VR and set LHS to OUTPUT_P. */
7400 static void
7401 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
7403 tree lhs;
7404 enum gimple_code code = gimple_code (stmt);
7405 lhs = gimple_get_lhs (stmt);
7406 *output_p = NULL_TREE;
7408 /* We only keep track of ranges in integral and pointer types. */
7409 if (TREE_CODE (lhs) == SSA_NAME
7410 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7411 /* It is valid to have NULL MIN/MAX values on a type. See
7412 build_range_type. */
7413 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7414 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7415 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7417 *output_p = lhs;
7419 /* Try folding the statement to a constant first. */
7420 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7421 vrp_valueize_1);
7422 if (tem)
7424 if (TREE_CODE (tem) == SSA_NAME
7425 && (SSA_NAME_IS_DEFAULT_DEF (tem)
7426 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
7428 extract_range_from_ssa_name (vr, tem);
7429 return;
7431 else if (is_gimple_min_invariant (tem))
7433 set_value_range_to_value (vr, tem, NULL);
7434 return;
7437 /* Then dispatch to value-range extracting functions. */
7438 if (code == GIMPLE_CALL)
7439 extract_range_basic (vr, stmt);
7440 else
7441 extract_range_from_assignment (vr, as_a <gassign *> (stmt));
7445 /* Helper that gets the value range of the SSA_NAME with version I
7446 or a symbolic range containing the SSA_NAME only if the value range
7447 is varying or undefined. */
7449 static inline value_range
7450 get_vr_for_comparison (int i)
7452 value_range vr = *get_value_range (ssa_name (i));
7454 /* If name N_i does not have a valid range, use N_i as its own
7455 range. This allows us to compare against names that may
7456 have N_i in their ranges. */
7457 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7459 vr.type = VR_RANGE;
7460 vr.min = ssa_name (i);
7461 vr.max = ssa_name (i);
7464 return vr;
7467 /* Compare all the value ranges for names equivalent to VAR with VAL
7468 using comparison code COMP. Return the same value returned by
7469 compare_range_with_value, including the setting of
7470 *STRICT_OVERFLOW_P. */
7472 static tree
7473 compare_name_with_value (enum tree_code comp, tree var, tree val,
7474 bool *strict_overflow_p, bool use_equiv_p)
7476 bitmap_iterator bi;
7477 unsigned i;
7478 bitmap e;
7479 tree retval, t;
7480 int used_strict_overflow;
7481 bool sop;
7482 value_range equiv_vr;
7484 /* Get the set of equivalences for VAR. */
7485 e = get_value_range (var)->equiv;
7487 /* Start at -1. Set it to 0 if we do a comparison without relying
7488 on overflow, or 1 if all comparisons rely on overflow. */
7489 used_strict_overflow = -1;
7491 /* Compare vars' value range with val. */
7492 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7493 sop = false;
7494 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7495 if (retval)
7496 used_strict_overflow = sop ? 1 : 0;
7498 /* If the equiv set is empty we have done all work we need to do. */
7499 if (e == NULL)
7501 if (retval
7502 && used_strict_overflow > 0)
7503 *strict_overflow_p = true;
7504 return retval;
7507 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7509 tree name = ssa_name (i);
7510 if (! name)
7511 continue;
7513 if (! use_equiv_p
7514 && ! SSA_NAME_IS_DEFAULT_DEF (name)
7515 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
7516 continue;
7518 equiv_vr = get_vr_for_comparison (i);
7519 sop = false;
7520 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7521 if (t)
7523 /* If we get different answers from different members
7524 of the equivalence set this check must be in a dead
7525 code region. Folding it to a trap representation
7526 would be correct here. For now just return don't-know. */
7527 if (retval != NULL
7528 && t != retval)
7530 retval = NULL_TREE;
7531 break;
7533 retval = t;
7535 if (!sop)
7536 used_strict_overflow = 0;
7537 else if (used_strict_overflow < 0)
7538 used_strict_overflow = 1;
7542 if (retval
7543 && used_strict_overflow > 0)
7544 *strict_overflow_p = true;
7546 return retval;
7550 /* Given a comparison code COMP and names N1 and N2, compare all the
7551 ranges equivalent to N1 against all the ranges equivalent to N2
7552 to determine the value of N1 COMP N2. Return the same value
7553 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7554 whether we relied on an overflow infinity in the comparison. */
7557 static tree
7558 compare_names (enum tree_code comp, tree n1, tree n2,
7559 bool *strict_overflow_p)
7561 tree t, retval;
7562 bitmap e1, e2;
7563 bitmap_iterator bi1, bi2;
7564 unsigned i1, i2;
7565 int used_strict_overflow;
7566 static bitmap_obstack *s_obstack = NULL;
7567 static bitmap s_e1 = NULL, s_e2 = NULL;
7569 /* Compare the ranges of every name equivalent to N1 against the
7570 ranges of every name equivalent to N2. */
7571 e1 = get_value_range (n1)->equiv;
7572 e2 = get_value_range (n2)->equiv;
7574 /* Use the fake bitmaps if e1 or e2 are not available. */
7575 if (s_obstack == NULL)
7577 s_obstack = XNEW (bitmap_obstack);
7578 bitmap_obstack_initialize (s_obstack);
7579 s_e1 = BITMAP_ALLOC (s_obstack);
7580 s_e2 = BITMAP_ALLOC (s_obstack);
7582 if (e1 == NULL)
7583 e1 = s_e1;
7584 if (e2 == NULL)
7585 e2 = s_e2;
7587 /* Add N1 and N2 to their own set of equivalences to avoid
7588 duplicating the body of the loop just to check N1 and N2
7589 ranges. */
7590 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7591 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7593 /* If the equivalence sets have a common intersection, then the two
7594 names can be compared without checking their ranges. */
7595 if (bitmap_intersect_p (e1, e2))
7597 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7598 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7600 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7601 ? boolean_true_node
7602 : boolean_false_node;
7605 /* Start at -1. Set it to 0 if we do a comparison without relying
7606 on overflow, or 1 if all comparisons rely on overflow. */
7607 used_strict_overflow = -1;
7609 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7610 N2 to their own set of equivalences to avoid duplicating the body
7611 of the loop just to check N1 and N2 ranges. */
7612 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7614 if (! ssa_name (i1))
7615 continue;
7617 value_range vr1 = get_vr_for_comparison (i1);
7619 t = retval = NULL_TREE;
7620 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7622 if (! ssa_name (i2))
7623 continue;
7625 bool sop = false;
7627 value_range vr2 = get_vr_for_comparison (i2);
7629 t = compare_ranges (comp, &vr1, &vr2, &sop);
7630 if (t)
7632 /* If we get different answers from different members
7633 of the equivalence set this check must be in a dead
7634 code region. Folding it to a trap representation
7635 would be correct here. For now just return don't-know. */
7636 if (retval != NULL
7637 && t != retval)
7639 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7640 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7641 return NULL_TREE;
7643 retval = t;
7645 if (!sop)
7646 used_strict_overflow = 0;
7647 else if (used_strict_overflow < 0)
7648 used_strict_overflow = 1;
7652 if (retval)
7654 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7655 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7656 if (used_strict_overflow > 0)
7657 *strict_overflow_p = true;
7658 return retval;
7662 /* None of the equivalent ranges are useful in computing this
7663 comparison. */
7664 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7665 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7666 return NULL_TREE;
7669 /* Helper function for vrp_evaluate_conditional_warnv & other
7670 optimizers. */
7672 static tree
7673 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7674 tree op0, tree op1,
7675 bool * strict_overflow_p)
7677 value_range *vr0, *vr1;
7679 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7680 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7682 tree res = NULL_TREE;
7683 if (vr0 && vr1)
7684 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7685 if (!res && vr0)
7686 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7687 if (!res && vr1)
7688 res = (compare_range_with_value
7689 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7690 return res;
7693 /* Helper function for vrp_evaluate_conditional_warnv. */
7695 static tree
7696 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7697 tree op1, bool use_equiv_p,
7698 bool *strict_overflow_p, bool *only_ranges)
7700 tree ret;
7701 if (only_ranges)
7702 *only_ranges = true;
7704 /* We only deal with integral and pointer types. */
7705 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7706 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7707 return NULL_TREE;
7709 /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
7710 as a simple equality test, then prefer that over its current form
7711 for evaluation.
7713 An overflow test which collapses to an equality test can always be
7714 expressed as a comparison of one argument against zero. Overflow
7715 occurs when the chosen argument is zero and does not occur if the
7716 chosen argument is not zero. */
7717 tree x;
7718 if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
7720 wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
7721 /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
7722 B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
7723 B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
7724 B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
7725 if (integer_zerop (x))
7727 op1 = x;
7728 code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
7730 /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
7731 B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
7732 B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
7733 B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
7734 else if (wi::eq_p (x, max - 1))
7736 op0 = op1;
7737 op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
7738 code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
7742 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7743 (code, op0, op1, strict_overflow_p)))
7744 return ret;
7745 if (only_ranges)
7746 *only_ranges = false;
7747 /* Do not use compare_names during propagation, it's quadratic. */
7748 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7749 && use_equiv_p)
7750 return compare_names (code, op0, op1, strict_overflow_p);
7751 else if (TREE_CODE (op0) == SSA_NAME)
7752 return compare_name_with_value (code, op0, op1,
7753 strict_overflow_p, use_equiv_p);
7754 else if (TREE_CODE (op1) == SSA_NAME)
7755 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7756 strict_overflow_p, use_equiv_p);
7757 return NULL_TREE;
7760 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7761 information. Return NULL if the conditional can not be evaluated.
7762 The ranges of all the names equivalent with the operands in COND
7763 will be used when trying to compute the value. If the result is
7764 based on undefined signed overflow, issue a warning if
7765 appropriate. */
7767 static tree
7768 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7770 bool sop;
7771 tree ret;
7772 bool only_ranges;
7774 /* Some passes and foldings leak constants with overflow flag set
7775 into the IL. Avoid doing wrong things with these and bail out. */
7776 if ((TREE_CODE (op0) == INTEGER_CST
7777 && TREE_OVERFLOW (op0))
7778 || (TREE_CODE (op1) == INTEGER_CST
7779 && TREE_OVERFLOW (op1)))
7780 return NULL_TREE;
7782 sop = false;
7783 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7784 &only_ranges);
7786 if (ret && sop)
7788 enum warn_strict_overflow_code wc;
7789 const char* warnmsg;
7791 if (is_gimple_min_invariant (ret))
7793 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7794 warnmsg = G_("assuming signed overflow does not occur when "
7795 "simplifying conditional to constant");
7797 else
7799 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7800 warnmsg = G_("assuming signed overflow does not occur when "
7801 "simplifying conditional");
7804 if (issue_strict_overflow_warning (wc))
7806 location_t location;
7808 if (!gimple_has_location (stmt))
7809 location = input_location;
7810 else
7811 location = gimple_location (stmt);
7812 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7816 if (warn_type_limits
7817 && ret && only_ranges
7818 && TREE_CODE_CLASS (code) == tcc_comparison
7819 && TREE_CODE (op0) == SSA_NAME)
7821 /* If the comparison is being folded and the operand on the LHS
7822 is being compared against a constant value that is outside of
7823 the natural range of OP0's type, then the predicate will
7824 always fold regardless of the value of OP0. If -Wtype-limits
7825 was specified, emit a warning. */
7826 tree type = TREE_TYPE (op0);
7827 value_range *vr0 = get_value_range (op0);
7829 if (vr0->type == VR_RANGE
7830 && INTEGRAL_TYPE_P (type)
7831 && vrp_val_is_min (vr0->min)
7832 && vrp_val_is_max (vr0->max)
7833 && is_gimple_min_invariant (op1))
7835 location_t location;
7837 if (!gimple_has_location (stmt))
7838 location = input_location;
7839 else
7840 location = gimple_location (stmt);
7842 warning_at (location, OPT_Wtype_limits,
7843 integer_zerop (ret)
7844 ? G_("comparison always false "
7845 "due to limited range of data type")
7846 : G_("comparison always true "
7847 "due to limited range of data type"));
7851 return ret;
7855 /* Visit conditional statement STMT. If we can determine which edge
7856 will be taken out of STMT's basic block, record it in
7857 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
7859 static void
7860 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7862 tree val;
7863 bool sop;
7865 *taken_edge_p = NULL;
7867 if (dump_file && (dump_flags & TDF_DETAILS))
7869 tree use;
7870 ssa_op_iter i;
7872 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7873 print_gimple_stmt (dump_file, stmt, 0, 0);
7874 fprintf (dump_file, "\nWith known ranges\n");
7876 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7878 fprintf (dump_file, "\t");
7879 print_generic_expr (dump_file, use, 0);
7880 fprintf (dump_file, ": ");
7881 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7884 fprintf (dump_file, "\n");
7887 /* Compute the value of the predicate COND by checking the known
7888 ranges of each of its operands.
7890 Note that we cannot evaluate all the equivalent ranges here
7891 because those ranges may not yet be final and with the current
7892 propagation strategy, we cannot determine when the value ranges
7893 of the names in the equivalence set have changed.
7895 For instance, given the following code fragment
7897 i_5 = PHI <8, i_13>
7899 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7900 if (i_14 == 1)
7903 Assume that on the first visit to i_14, i_5 has the temporary
7904 range [8, 8] because the second argument to the PHI function is
7905 not yet executable. We derive the range ~[0, 0] for i_14 and the
7906 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7907 the first time, since i_14 is equivalent to the range [8, 8], we
7908 determine that the predicate is always false.
7910 On the next round of propagation, i_13 is determined to be
7911 VARYING, which causes i_5 to drop down to VARYING. So, another
7912 visit to i_14 is scheduled. In this second visit, we compute the
7913 exact same range and equivalence set for i_14, namely ~[0, 0] and
7914 { i_5 }. But we did not have the previous range for i_5
7915 registered, so vrp_visit_assignment thinks that the range for
7916 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7917 is not visited again, which stops propagation from visiting
7918 statements in the THEN clause of that if().
7920 To properly fix this we would need to keep the previous range
7921 value for the names in the equivalence set. This way we would've
7922 discovered that from one visit to the other i_5 changed from
7923 range [8, 8] to VR_VARYING.
7925 However, fixing this apparent limitation may not be worth the
7926 additional checking. Testing on several code bases (GCC, DLV,
7927 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7928 4 more predicates folded in SPEC. */
7929 sop = false;
7931 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7932 gimple_cond_lhs (stmt),
7933 gimple_cond_rhs (stmt),
7934 false, &sop, NULL);
7935 if (val)
7937 if (!sop)
7938 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7939 else
7941 if (dump_file && (dump_flags & TDF_DETAILS))
7942 fprintf (dump_file,
7943 "\nIgnoring predicate evaluation because "
7944 "it assumes that signed overflow is undefined");
7945 val = NULL_TREE;
7949 if (dump_file && (dump_flags & TDF_DETAILS))
7951 fprintf (dump_file, "\nPredicate evaluates to: ");
7952 if (val == NULL_TREE)
7953 fprintf (dump_file, "DON'T KNOW\n");
7954 else
7955 print_generic_stmt (dump_file, val, 0);
7959 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7960 that includes the value VAL. The search is restricted to the range
7961 [START_IDX, n - 1] where n is the size of VEC.
7963 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7964 returned.
7966 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7967 it is placed in IDX and false is returned.
7969 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7970 returned. */
7972 static bool
7973 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7975 size_t n = gimple_switch_num_labels (stmt);
7976 size_t low, high;
7978 /* Find case label for minimum of the value range or the next one.
7979 At each iteration we are searching in [low, high - 1]. */
7981 for (low = start_idx, high = n; high != low; )
7983 tree t;
7984 int cmp;
7985 /* Note that i != high, so we never ask for n. */
7986 size_t i = (high + low) / 2;
7987 t = gimple_switch_label (stmt, i);
7989 /* Cache the result of comparing CASE_LOW and val. */
7990 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7992 if (cmp == 0)
7994 /* Ranges cannot be empty. */
7995 *idx = i;
7996 return true;
7998 else if (cmp > 0)
7999 high = i;
8000 else
8002 low = i + 1;
8003 if (CASE_HIGH (t) != NULL
8004 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
8006 *idx = i;
8007 return true;
8012 *idx = high;
8013 return false;
8016 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
8017 for values between MIN and MAX. The first index is placed in MIN_IDX. The
8018 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
8019 then MAX_IDX < MIN_IDX.
8020 Returns true if the default label is not needed. */
8022 static bool
8023 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
8024 size_t *max_idx)
8026 size_t i, j;
8027 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
8028 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
8030 if (i == j
8031 && min_take_default
8032 && max_take_default)
8034 /* Only the default case label reached.
8035 Return an empty range. */
8036 *min_idx = 1;
8037 *max_idx = 0;
8038 return false;
8040 else
8042 bool take_default = min_take_default || max_take_default;
8043 tree low, high;
8044 size_t k;
8046 if (max_take_default)
8047 j--;
8049 /* If the case label range is continuous, we do not need
8050 the default case label. Verify that. */
8051 high = CASE_LOW (gimple_switch_label (stmt, i));
8052 if (CASE_HIGH (gimple_switch_label (stmt, i)))
8053 high = CASE_HIGH (gimple_switch_label (stmt, i));
8054 for (k = i + 1; k <= j; ++k)
8056 low = CASE_LOW (gimple_switch_label (stmt, k));
8057 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
8059 take_default = true;
8060 break;
8062 high = low;
8063 if (CASE_HIGH (gimple_switch_label (stmt, k)))
8064 high = CASE_HIGH (gimple_switch_label (stmt, k));
8067 *min_idx = i;
8068 *max_idx = j;
8069 return !take_default;
8073 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
8074 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
8075 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
8076 Returns true if the default label is not needed. */
8078 static bool
8079 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
8080 size_t *max_idx1, size_t *min_idx2,
8081 size_t *max_idx2)
8083 size_t i, j, k, l;
8084 unsigned int n = gimple_switch_num_labels (stmt);
8085 bool take_default;
8086 tree case_low, case_high;
8087 tree min = vr->min, max = vr->max;
8089 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
8091 take_default = !find_case_label_range (stmt, min, max, &i, &j);
8093 /* Set second range to emtpy. */
8094 *min_idx2 = 1;
8095 *max_idx2 = 0;
8097 if (vr->type == VR_RANGE)
8099 *min_idx1 = i;
8100 *max_idx1 = j;
8101 return !take_default;
8104 /* Set first range to all case labels. */
8105 *min_idx1 = 1;
8106 *max_idx1 = n - 1;
8108 if (i > j)
8109 return false;
8111 /* Make sure all the values of case labels [i , j] are contained in
8112 range [MIN, MAX]. */
8113 case_low = CASE_LOW (gimple_switch_label (stmt, i));
8114 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
8115 if (tree_int_cst_compare (case_low, min) < 0)
8116 i += 1;
8117 if (case_high != NULL_TREE
8118 && tree_int_cst_compare (max, case_high) < 0)
8119 j -= 1;
8121 if (i > j)
8122 return false;
8124 /* If the range spans case labels [i, j], the corresponding anti-range spans
8125 the labels [1, i - 1] and [j + 1, n - 1]. */
8126 k = j + 1;
8127 l = n - 1;
8128 if (k > l)
8130 k = 1;
8131 l = 0;
8134 j = i - 1;
8135 i = 1;
8136 if (i > j)
8138 i = k;
8139 j = l;
8140 k = 1;
8141 l = 0;
8144 *min_idx1 = i;
8145 *max_idx1 = j;
8146 *min_idx2 = k;
8147 *max_idx2 = l;
8148 return false;
8151 /* Visit switch statement STMT. If we can determine which edge
8152 will be taken out of STMT's basic block, record it in
8153 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
8155 static void
8156 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
8158 tree op, val;
8159 value_range *vr;
8160 size_t i = 0, j = 0, k, l;
8161 bool take_default;
8163 *taken_edge_p = NULL;
8164 op = gimple_switch_index (stmt);
8165 if (TREE_CODE (op) != SSA_NAME)
8166 return;
8168 vr = get_value_range (op);
8169 if (dump_file && (dump_flags & TDF_DETAILS))
8171 fprintf (dump_file, "\nVisiting switch expression with operand ");
8172 print_generic_expr (dump_file, op, 0);
8173 fprintf (dump_file, " with known range ");
8174 dump_value_range (dump_file, vr);
8175 fprintf (dump_file, "\n");
8178 if ((vr->type != VR_RANGE
8179 && vr->type != VR_ANTI_RANGE)
8180 || symbolic_range_p (vr))
8181 return;
8183 /* Find the single edge that is taken from the switch expression. */
8184 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8186 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
8187 label */
8188 if (j < i)
8190 gcc_assert (take_default);
8191 val = gimple_switch_default_label (stmt);
8193 else
8195 /* Check if labels with index i to j and maybe the default label
8196 are all reaching the same label. */
8198 val = gimple_switch_label (stmt, i);
8199 if (take_default
8200 && CASE_LABEL (gimple_switch_default_label (stmt))
8201 != CASE_LABEL (val))
8203 if (dump_file && (dump_flags & TDF_DETAILS))
8204 fprintf (dump_file, " not a single destination for this "
8205 "range\n");
8206 return;
8208 for (++i; i <= j; ++i)
8210 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
8212 if (dump_file && (dump_flags & TDF_DETAILS))
8213 fprintf (dump_file, " not a single destination for this "
8214 "range\n");
8215 return;
8218 for (; k <= l; ++k)
8220 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
8222 if (dump_file && (dump_flags & TDF_DETAILS))
8223 fprintf (dump_file, " not a single destination for this "
8224 "range\n");
8225 return;
8230 *taken_edge_p = find_edge (gimple_bb (stmt),
8231 label_to_block (CASE_LABEL (val)));
8233 if (dump_file && (dump_flags & TDF_DETAILS))
8235 fprintf (dump_file, " will take edge to ");
8236 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
8241 /* Evaluate statement STMT. If the statement produces a useful range,
8242 set VR and corepsponding OUTPUT_P.
8244 If STMT is a conditional branch and we can determine its truth
8245 value, the taken edge is recorded in *TAKEN_EDGE_P. */
8247 static void
8248 extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
8249 tree *output_p, value_range *vr)
8252 if (dump_file && (dump_flags & TDF_DETAILS))
8254 fprintf (dump_file, "\nVisiting statement:\n");
8255 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
8258 if (!stmt_interesting_for_vrp (stmt))
8259 gcc_assert (stmt_ends_bb_p (stmt));
8260 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
8261 vrp_visit_assignment_or_call (stmt, output_p, vr);
8262 else if (gimple_code (stmt) == GIMPLE_COND)
8263 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
8264 else if (gimple_code (stmt) == GIMPLE_SWITCH)
8265 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
8268 /* Evaluate statement STMT. If the statement produces a useful range,
8269 return SSA_PROP_INTERESTING and record the SSA name with the
8270 interesting range into *OUTPUT_P.
8272 If STMT is a conditional branch and we can determine its truth
8273 value, the taken edge is recorded in *TAKEN_EDGE_P.
8275 If STMT produces a varying value, return SSA_PROP_VARYING. */
8277 static enum ssa_prop_result
8278 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
8280 value_range vr = VR_INITIALIZER;
8281 tree lhs = gimple_get_lhs (stmt);
8282 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
8284 if (*output_p)
8286 if (update_value_range (*output_p, &vr))
8288 if (dump_file && (dump_flags & TDF_DETAILS))
8290 fprintf (dump_file, "Found new range for ");
8291 print_generic_expr (dump_file, *output_p, 0);
8292 fprintf (dump_file, ": ");
8293 dump_value_range (dump_file, &vr);
8294 fprintf (dump_file, "\n");
8297 if (vr.type == VR_VARYING)
8298 return SSA_PROP_VARYING;
8300 return SSA_PROP_INTERESTING;
8302 return SSA_PROP_NOT_INTERESTING;
8305 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
8306 switch (gimple_call_internal_fn (stmt))
8308 case IFN_ADD_OVERFLOW:
8309 case IFN_SUB_OVERFLOW:
8310 case IFN_MUL_OVERFLOW:
8311 /* These internal calls return _Complex integer type,
8312 which VRP does not track, but the immediate uses
8313 thereof might be interesting. */
8314 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8316 imm_use_iterator iter;
8317 use_operand_p use_p;
8318 enum ssa_prop_result res = SSA_PROP_VARYING;
8320 set_value_range_to_varying (get_value_range (lhs));
8322 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
8324 gimple *use_stmt = USE_STMT (use_p);
8325 if (!is_gimple_assign (use_stmt))
8326 continue;
8327 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
8328 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
8329 continue;
8330 tree rhs1 = gimple_assign_rhs1 (use_stmt);
8331 tree use_lhs = gimple_assign_lhs (use_stmt);
8332 if (TREE_CODE (rhs1) != rhs_code
8333 || TREE_OPERAND (rhs1, 0) != lhs
8334 || TREE_CODE (use_lhs) != SSA_NAME
8335 || !stmt_interesting_for_vrp (use_stmt)
8336 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
8337 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
8338 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
8339 continue;
8341 /* If there is a change in the value range for any of the
8342 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
8343 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
8344 or IMAGPART_EXPR immediate uses, but none of them have
8345 a change in their value ranges, return
8346 SSA_PROP_NOT_INTERESTING. If there are no
8347 {REAL,IMAG}PART_EXPR uses at all,
8348 return SSA_PROP_VARYING. */
8349 value_range new_vr = VR_INITIALIZER;
8350 extract_range_basic (&new_vr, use_stmt);
8351 value_range *old_vr = get_value_range (use_lhs);
8352 if (old_vr->type != new_vr.type
8353 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
8354 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
8355 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
8356 res = SSA_PROP_INTERESTING;
8357 else
8358 res = SSA_PROP_NOT_INTERESTING;
8359 BITMAP_FREE (new_vr.equiv);
8360 if (res == SSA_PROP_INTERESTING)
8362 *output_p = lhs;
8363 return res;
8367 return res;
8369 break;
8370 default:
8371 break;
8374 /* All other statements produce nothing of interest for VRP, so mark
8375 their outputs varying and prevent further simulation. */
8376 set_defs_to_varying (stmt);
8378 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
8381 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8382 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8383 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8384 possible such range. The resulting range is not canonicalized. */
8386 static void
8387 union_ranges (enum value_range_type *vr0type,
8388 tree *vr0min, tree *vr0max,
8389 enum value_range_type vr1type,
8390 tree vr1min, tree vr1max)
8392 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8393 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8395 /* [] is vr0, () is vr1 in the following classification comments. */
8396 if (mineq && maxeq)
8398 /* [( )] */
8399 if (*vr0type == vr1type)
8400 /* Nothing to do for equal ranges. */
8402 else if ((*vr0type == VR_RANGE
8403 && vr1type == VR_ANTI_RANGE)
8404 || (*vr0type == VR_ANTI_RANGE
8405 && vr1type == VR_RANGE))
8407 /* For anti-range with range union the result is varying. */
8408 goto give_up;
8410 else
8411 gcc_unreachable ();
8413 else if (operand_less_p (*vr0max, vr1min) == 1
8414 || operand_less_p (vr1max, *vr0min) == 1)
8416 /* [ ] ( ) or ( ) [ ]
8417 If the ranges have an empty intersection, result of the union
8418 operation is the anti-range or if both are anti-ranges
8419 it covers all. */
8420 if (*vr0type == VR_ANTI_RANGE
8421 && vr1type == VR_ANTI_RANGE)
8422 goto give_up;
8423 else if (*vr0type == VR_ANTI_RANGE
8424 && vr1type == VR_RANGE)
8426 else if (*vr0type == VR_RANGE
8427 && vr1type == VR_ANTI_RANGE)
8429 *vr0type = vr1type;
8430 *vr0min = vr1min;
8431 *vr0max = vr1max;
8433 else if (*vr0type == VR_RANGE
8434 && vr1type == VR_RANGE)
8436 /* The result is the convex hull of both ranges. */
8437 if (operand_less_p (*vr0max, vr1min) == 1)
8439 /* If the result can be an anti-range, create one. */
8440 if (TREE_CODE (*vr0max) == INTEGER_CST
8441 && TREE_CODE (vr1min) == INTEGER_CST
8442 && vrp_val_is_min (*vr0min)
8443 && vrp_val_is_max (vr1max))
8445 tree min = int_const_binop (PLUS_EXPR,
8446 *vr0max,
8447 build_int_cst (TREE_TYPE (*vr0max), 1));
8448 tree max = int_const_binop (MINUS_EXPR,
8449 vr1min,
8450 build_int_cst (TREE_TYPE (vr1min), 1));
8451 if (!operand_less_p (max, min))
8453 *vr0type = VR_ANTI_RANGE;
8454 *vr0min = min;
8455 *vr0max = max;
8457 else
8458 *vr0max = vr1max;
8460 else
8461 *vr0max = vr1max;
8463 else
8465 /* If the result can be an anti-range, create one. */
8466 if (TREE_CODE (vr1max) == INTEGER_CST
8467 && TREE_CODE (*vr0min) == INTEGER_CST
8468 && vrp_val_is_min (vr1min)
8469 && vrp_val_is_max (*vr0max))
8471 tree min = int_const_binop (PLUS_EXPR,
8472 vr1max,
8473 build_int_cst (TREE_TYPE (vr1max), 1));
8474 tree max = int_const_binop (MINUS_EXPR,
8475 *vr0min,
8476 build_int_cst (TREE_TYPE (*vr0min), 1));
8477 if (!operand_less_p (max, min))
8479 *vr0type = VR_ANTI_RANGE;
8480 *vr0min = min;
8481 *vr0max = max;
8483 else
8484 *vr0min = vr1min;
8486 else
8487 *vr0min = vr1min;
8490 else
8491 gcc_unreachable ();
8493 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8494 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8496 /* [ ( ) ] or [( ) ] or [ ( )] */
8497 if (*vr0type == VR_RANGE
8498 && vr1type == VR_RANGE)
8500 else if (*vr0type == VR_ANTI_RANGE
8501 && vr1type == VR_ANTI_RANGE)
8503 *vr0type = vr1type;
8504 *vr0min = vr1min;
8505 *vr0max = vr1max;
8507 else if (*vr0type == VR_ANTI_RANGE
8508 && vr1type == VR_RANGE)
8510 /* Arbitrarily choose the right or left gap. */
8511 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8512 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8513 build_int_cst (TREE_TYPE (vr1min), 1));
8514 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8515 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8516 build_int_cst (TREE_TYPE (vr1max), 1));
8517 else
8518 goto give_up;
8520 else if (*vr0type == VR_RANGE
8521 && vr1type == VR_ANTI_RANGE)
8522 /* The result covers everything. */
8523 goto give_up;
8524 else
8525 gcc_unreachable ();
8527 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8528 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8530 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8531 if (*vr0type == VR_RANGE
8532 && vr1type == VR_RANGE)
8534 *vr0type = vr1type;
8535 *vr0min = vr1min;
8536 *vr0max = vr1max;
8538 else if (*vr0type == VR_ANTI_RANGE
8539 && vr1type == VR_ANTI_RANGE)
8541 else if (*vr0type == VR_RANGE
8542 && vr1type == VR_ANTI_RANGE)
8544 *vr0type = VR_ANTI_RANGE;
8545 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8547 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8548 build_int_cst (TREE_TYPE (*vr0min), 1));
8549 *vr0min = vr1min;
8551 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8553 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8554 build_int_cst (TREE_TYPE (*vr0max), 1));
8555 *vr0max = vr1max;
8557 else
8558 goto give_up;
8560 else if (*vr0type == VR_ANTI_RANGE
8561 && vr1type == VR_RANGE)
8562 /* The result covers everything. */
8563 goto give_up;
8564 else
8565 gcc_unreachable ();
8567 else if ((operand_less_p (vr1min, *vr0max) == 1
8568 || operand_equal_p (vr1min, *vr0max, 0))
8569 && operand_less_p (*vr0min, vr1min) == 1
8570 && operand_less_p (*vr0max, vr1max) == 1)
8572 /* [ ( ] ) or [ ]( ) */
8573 if (*vr0type == VR_RANGE
8574 && vr1type == VR_RANGE)
8575 *vr0max = vr1max;
8576 else if (*vr0type == VR_ANTI_RANGE
8577 && vr1type == VR_ANTI_RANGE)
8578 *vr0min = vr1min;
8579 else if (*vr0type == VR_ANTI_RANGE
8580 && vr1type == VR_RANGE)
8582 if (TREE_CODE (vr1min) == INTEGER_CST)
8583 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8584 build_int_cst (TREE_TYPE (vr1min), 1));
8585 else
8586 goto give_up;
8588 else if (*vr0type == VR_RANGE
8589 && vr1type == VR_ANTI_RANGE)
8591 if (TREE_CODE (*vr0max) == INTEGER_CST)
8593 *vr0type = vr1type;
8594 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8595 build_int_cst (TREE_TYPE (*vr0max), 1));
8596 *vr0max = vr1max;
8598 else
8599 goto give_up;
8601 else
8602 gcc_unreachable ();
8604 else if ((operand_less_p (*vr0min, vr1max) == 1
8605 || operand_equal_p (*vr0min, vr1max, 0))
8606 && operand_less_p (vr1min, *vr0min) == 1
8607 && operand_less_p (vr1max, *vr0max) == 1)
8609 /* ( [ ) ] or ( )[ ] */
8610 if (*vr0type == VR_RANGE
8611 && vr1type == VR_RANGE)
8612 *vr0min = vr1min;
8613 else if (*vr0type == VR_ANTI_RANGE
8614 && vr1type == VR_ANTI_RANGE)
8615 *vr0max = vr1max;
8616 else if (*vr0type == VR_ANTI_RANGE
8617 && vr1type == VR_RANGE)
8619 if (TREE_CODE (vr1max) == INTEGER_CST)
8620 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8621 build_int_cst (TREE_TYPE (vr1max), 1));
8622 else
8623 goto give_up;
8625 else if (*vr0type == VR_RANGE
8626 && vr1type == VR_ANTI_RANGE)
8628 if (TREE_CODE (*vr0min) == INTEGER_CST)
8630 *vr0type = vr1type;
8631 *vr0min = vr1min;
8632 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8633 build_int_cst (TREE_TYPE (*vr0min), 1));
8635 else
8636 goto give_up;
8638 else
8639 gcc_unreachable ();
8641 else
8642 goto give_up;
8644 return;
8646 give_up:
8647 *vr0type = VR_VARYING;
8648 *vr0min = NULL_TREE;
8649 *vr0max = NULL_TREE;
8652 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8653 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8654 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8655 possible such range. The resulting range is not canonicalized. */
8657 static void
8658 intersect_ranges (enum value_range_type *vr0type,
8659 tree *vr0min, tree *vr0max,
8660 enum value_range_type vr1type,
8661 tree vr1min, tree vr1max)
8663 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8664 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8666 /* [] is vr0, () is vr1 in the following classification comments. */
8667 if (mineq && maxeq)
8669 /* [( )] */
8670 if (*vr0type == vr1type)
8671 /* Nothing to do for equal ranges. */
8673 else if ((*vr0type == VR_RANGE
8674 && vr1type == VR_ANTI_RANGE)
8675 || (*vr0type == VR_ANTI_RANGE
8676 && vr1type == VR_RANGE))
8678 /* For anti-range with range intersection the result is empty. */
8679 *vr0type = VR_UNDEFINED;
8680 *vr0min = NULL_TREE;
8681 *vr0max = NULL_TREE;
8683 else
8684 gcc_unreachable ();
8686 else if (operand_less_p (*vr0max, vr1min) == 1
8687 || operand_less_p (vr1max, *vr0min) == 1)
8689 /* [ ] ( ) or ( ) [ ]
8690 If the ranges have an empty intersection, the result of the
8691 intersect operation is the range for intersecting an
8692 anti-range with a range or empty when intersecting two ranges. */
8693 if (*vr0type == VR_RANGE
8694 && vr1type == VR_ANTI_RANGE)
8696 else if (*vr0type == VR_ANTI_RANGE
8697 && vr1type == VR_RANGE)
8699 *vr0type = vr1type;
8700 *vr0min = vr1min;
8701 *vr0max = vr1max;
8703 else if (*vr0type == VR_RANGE
8704 && vr1type == VR_RANGE)
8706 *vr0type = VR_UNDEFINED;
8707 *vr0min = NULL_TREE;
8708 *vr0max = NULL_TREE;
8710 else if (*vr0type == VR_ANTI_RANGE
8711 && vr1type == VR_ANTI_RANGE)
8713 /* If the anti-ranges are adjacent to each other merge them. */
8714 if (TREE_CODE (*vr0max) == INTEGER_CST
8715 && TREE_CODE (vr1min) == INTEGER_CST
8716 && operand_less_p (*vr0max, vr1min) == 1
8717 && integer_onep (int_const_binop (MINUS_EXPR,
8718 vr1min, *vr0max)))
8719 *vr0max = vr1max;
8720 else if (TREE_CODE (vr1max) == INTEGER_CST
8721 && TREE_CODE (*vr0min) == INTEGER_CST
8722 && operand_less_p (vr1max, *vr0min) == 1
8723 && integer_onep (int_const_binop (MINUS_EXPR,
8724 *vr0min, vr1max)))
8725 *vr0min = vr1min;
8726 /* Else arbitrarily take VR0. */
8729 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8730 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8732 /* [ ( ) ] or [( ) ] or [ ( )] */
8733 if (*vr0type == VR_RANGE
8734 && vr1type == VR_RANGE)
8736 /* If both are ranges the result is the inner one. */
8737 *vr0type = vr1type;
8738 *vr0min = vr1min;
8739 *vr0max = vr1max;
8741 else if (*vr0type == VR_RANGE
8742 && vr1type == VR_ANTI_RANGE)
8744 /* Choose the right gap if the left one is empty. */
8745 if (mineq)
8747 if (TREE_CODE (vr1max) == INTEGER_CST)
8748 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8749 build_int_cst (TREE_TYPE (vr1max), 1));
8750 else
8751 *vr0min = vr1max;
8753 /* Choose the left gap if the right one is empty. */
8754 else if (maxeq)
8756 if (TREE_CODE (vr1min) == INTEGER_CST)
8757 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8758 build_int_cst (TREE_TYPE (vr1min), 1));
8759 else
8760 *vr0max = vr1min;
8762 /* Choose the anti-range if the range is effectively varying. */
8763 else if (vrp_val_is_min (*vr0min)
8764 && vrp_val_is_max (*vr0max))
8766 *vr0type = vr1type;
8767 *vr0min = vr1min;
8768 *vr0max = vr1max;
8770 /* Else choose the range. */
8772 else if (*vr0type == VR_ANTI_RANGE
8773 && vr1type == VR_ANTI_RANGE)
8774 /* If both are anti-ranges the result is the outer one. */
8776 else if (*vr0type == VR_ANTI_RANGE
8777 && vr1type == VR_RANGE)
8779 /* The intersection is empty. */
8780 *vr0type = VR_UNDEFINED;
8781 *vr0min = NULL_TREE;
8782 *vr0max = NULL_TREE;
8784 else
8785 gcc_unreachable ();
8787 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8788 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8790 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8791 if (*vr0type == VR_RANGE
8792 && vr1type == VR_RANGE)
8793 /* Choose the inner range. */
8795 else if (*vr0type == VR_ANTI_RANGE
8796 && vr1type == VR_RANGE)
8798 /* Choose the right gap if the left is empty. */
8799 if (mineq)
8801 *vr0type = VR_RANGE;
8802 if (TREE_CODE (*vr0max) == INTEGER_CST)
8803 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8804 build_int_cst (TREE_TYPE (*vr0max), 1));
8805 else
8806 *vr0min = *vr0max;
8807 *vr0max = vr1max;
8809 /* Choose the left gap if the right is empty. */
8810 else if (maxeq)
8812 *vr0type = VR_RANGE;
8813 if (TREE_CODE (*vr0min) == INTEGER_CST)
8814 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8815 build_int_cst (TREE_TYPE (*vr0min), 1));
8816 else
8817 *vr0max = *vr0min;
8818 *vr0min = vr1min;
8820 /* Choose the anti-range if the range is effectively varying. */
8821 else if (vrp_val_is_min (vr1min)
8822 && vrp_val_is_max (vr1max))
8824 /* Choose the anti-range if it is ~[0,0], that range is special
8825 enough to special case when vr1's range is relatively wide. */
8826 else if (*vr0min == *vr0max
8827 && integer_zerop (*vr0min)
8828 && (TYPE_PRECISION (TREE_TYPE (*vr0min))
8829 == TYPE_PRECISION (ptr_type_node))
8830 && TREE_CODE (vr1max) == INTEGER_CST
8831 && TREE_CODE (vr1min) == INTEGER_CST
8832 && (wi::clz (wi::sub (vr1max, vr1min))
8833 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
8835 /* Else choose the range. */
8836 else
8838 *vr0type = vr1type;
8839 *vr0min = vr1min;
8840 *vr0max = vr1max;
8843 else if (*vr0type == VR_ANTI_RANGE
8844 && vr1type == VR_ANTI_RANGE)
8846 /* If both are anti-ranges the result is the outer one. */
8847 *vr0type = vr1type;
8848 *vr0min = vr1min;
8849 *vr0max = vr1max;
8851 else if (vr1type == VR_ANTI_RANGE
8852 && *vr0type == VR_RANGE)
8854 /* The intersection is empty. */
8855 *vr0type = VR_UNDEFINED;
8856 *vr0min = NULL_TREE;
8857 *vr0max = NULL_TREE;
8859 else
8860 gcc_unreachable ();
8862 else if ((operand_less_p (vr1min, *vr0max) == 1
8863 || operand_equal_p (vr1min, *vr0max, 0))
8864 && operand_less_p (*vr0min, vr1min) == 1)
8866 /* [ ( ] ) or [ ]( ) */
8867 if (*vr0type == VR_ANTI_RANGE
8868 && vr1type == VR_ANTI_RANGE)
8869 *vr0max = vr1max;
8870 else if (*vr0type == VR_RANGE
8871 && vr1type == VR_RANGE)
8872 *vr0min = vr1min;
8873 else if (*vr0type == VR_RANGE
8874 && vr1type == VR_ANTI_RANGE)
8876 if (TREE_CODE (vr1min) == INTEGER_CST)
8877 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8878 build_int_cst (TREE_TYPE (vr1min), 1));
8879 else
8880 *vr0max = vr1min;
8882 else if (*vr0type == VR_ANTI_RANGE
8883 && vr1type == VR_RANGE)
8885 *vr0type = VR_RANGE;
8886 if (TREE_CODE (*vr0max) == INTEGER_CST)
8887 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8888 build_int_cst (TREE_TYPE (*vr0max), 1));
8889 else
8890 *vr0min = *vr0max;
8891 *vr0max = vr1max;
8893 else
8894 gcc_unreachable ();
8896 else if ((operand_less_p (*vr0min, vr1max) == 1
8897 || operand_equal_p (*vr0min, vr1max, 0))
8898 && operand_less_p (vr1min, *vr0min) == 1)
8900 /* ( [ ) ] or ( )[ ] */
8901 if (*vr0type == VR_ANTI_RANGE
8902 && vr1type == VR_ANTI_RANGE)
8903 *vr0min = vr1min;
8904 else if (*vr0type == VR_RANGE
8905 && vr1type == VR_RANGE)
8906 *vr0max = vr1max;
8907 else if (*vr0type == VR_RANGE
8908 && vr1type == VR_ANTI_RANGE)
8910 if (TREE_CODE (vr1max) == INTEGER_CST)
8911 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8912 build_int_cst (TREE_TYPE (vr1max), 1));
8913 else
8914 *vr0min = vr1max;
8916 else if (*vr0type == VR_ANTI_RANGE
8917 && vr1type == VR_RANGE)
8919 *vr0type = VR_RANGE;
8920 if (TREE_CODE (*vr0min) == INTEGER_CST)
8921 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8922 build_int_cst (TREE_TYPE (*vr0min), 1));
8923 else
8924 *vr0max = *vr0min;
8925 *vr0min = vr1min;
8927 else
8928 gcc_unreachable ();
8931 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8932 result for the intersection. That's always a conservative
8933 correct estimate unless VR1 is a constant singleton range
8934 in which case we choose that. */
8935 if (vr1type == VR_RANGE
8936 && is_gimple_min_invariant (vr1min)
8937 && vrp_operand_equal_p (vr1min, vr1max))
8939 *vr0type = vr1type;
8940 *vr0min = vr1min;
8941 *vr0max = vr1max;
8944 return;
8948 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8949 in *VR0. This may not be the smallest possible such range. */
8951 static void
8952 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8954 value_range saved;
8956 /* If either range is VR_VARYING the other one wins. */
8957 if (vr1->type == VR_VARYING)
8958 return;
8959 if (vr0->type == VR_VARYING)
8961 copy_value_range (vr0, vr1);
8962 return;
8965 /* When either range is VR_UNDEFINED the resulting range is
8966 VR_UNDEFINED, too. */
8967 if (vr0->type == VR_UNDEFINED)
8968 return;
8969 if (vr1->type == VR_UNDEFINED)
8971 set_value_range_to_undefined (vr0);
8972 return;
8975 /* Save the original vr0 so we can return it as conservative intersection
8976 result when our worker turns things to varying. */
8977 saved = *vr0;
8978 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8979 vr1->type, vr1->min, vr1->max);
8980 /* Make sure to canonicalize the result though as the inversion of a
8981 VR_RANGE can still be a VR_RANGE. */
8982 set_and_canonicalize_value_range (vr0, vr0->type,
8983 vr0->min, vr0->max, vr0->equiv);
8984 /* If that failed, use the saved original VR0. */
8985 if (vr0->type == VR_VARYING)
8987 *vr0 = saved;
8988 return;
8990 /* If the result is VR_UNDEFINED there is no need to mess with
8991 the equivalencies. */
8992 if (vr0->type == VR_UNDEFINED)
8993 return;
8995 /* The resulting set of equivalences for range intersection is the union of
8996 the two sets. */
8997 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8998 bitmap_ior_into (vr0->equiv, vr1->equiv);
8999 else if (vr1->equiv && !vr0->equiv)
9001 vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
9002 bitmap_copy (vr0->equiv, vr1->equiv);
9006 void
9007 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
9009 if (dump_file && (dump_flags & TDF_DETAILS))
9011 fprintf (dump_file, "Intersecting\n ");
9012 dump_value_range (dump_file, vr0);
9013 fprintf (dump_file, "\nand\n ");
9014 dump_value_range (dump_file, vr1);
9015 fprintf (dump_file, "\n");
9017 vrp_intersect_ranges_1 (vr0, vr1);
9018 if (dump_file && (dump_flags & TDF_DETAILS))
9020 fprintf (dump_file, "to\n ");
9021 dump_value_range (dump_file, vr0);
9022 fprintf (dump_file, "\n");
9026 /* Meet operation for value ranges. Given two value ranges VR0 and
9027 VR1, store in VR0 a range that contains both VR0 and VR1. This
9028 may not be the smallest possible such range. */
9030 static void
9031 vrp_meet_1 (value_range *vr0, const value_range *vr1)
9033 value_range saved;
9035 if (vr0->type == VR_UNDEFINED)
9037 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
9038 return;
9041 if (vr1->type == VR_UNDEFINED)
9043 /* VR0 already has the resulting range. */
9044 return;
9047 if (vr0->type == VR_VARYING)
9049 /* Nothing to do. VR0 already has the resulting range. */
9050 return;
9053 if (vr1->type == VR_VARYING)
9055 set_value_range_to_varying (vr0);
9056 return;
9059 saved = *vr0;
9060 union_ranges (&vr0->type, &vr0->min, &vr0->max,
9061 vr1->type, vr1->min, vr1->max);
9062 if (vr0->type == VR_VARYING)
9064 /* Failed to find an efficient meet. Before giving up and setting
9065 the result to VARYING, see if we can at least derive a useful
9066 anti-range. FIXME, all this nonsense about distinguishing
9067 anti-ranges from ranges is necessary because of the odd
9068 semantics of range_includes_zero_p and friends. */
9069 if (((saved.type == VR_RANGE
9070 && range_includes_zero_p (saved.min, saved.max) == 0)
9071 || (saved.type == VR_ANTI_RANGE
9072 && range_includes_zero_p (saved.min, saved.max) == 1))
9073 && ((vr1->type == VR_RANGE
9074 && range_includes_zero_p (vr1->min, vr1->max) == 0)
9075 || (vr1->type == VR_ANTI_RANGE
9076 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
9078 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
9080 /* Since this meet operation did not result from the meeting of
9081 two equivalent names, VR0 cannot have any equivalences. */
9082 if (vr0->equiv)
9083 bitmap_clear (vr0->equiv);
9084 return;
9087 set_value_range_to_varying (vr0);
9088 return;
9090 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
9091 vr0->equiv);
9092 if (vr0->type == VR_VARYING)
9093 return;
9095 /* The resulting set of equivalences is always the intersection of
9096 the two sets. */
9097 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
9098 bitmap_and_into (vr0->equiv, vr1->equiv);
9099 else if (vr0->equiv && !vr1->equiv)
9100 bitmap_clear (vr0->equiv);
9103 void
9104 vrp_meet (value_range *vr0, const value_range *vr1)
9106 if (dump_file && (dump_flags & TDF_DETAILS))
9108 fprintf (dump_file, "Meeting\n ");
9109 dump_value_range (dump_file, vr0);
9110 fprintf (dump_file, "\nand\n ");
9111 dump_value_range (dump_file, vr1);
9112 fprintf (dump_file, "\n");
9114 vrp_meet_1 (vr0, vr1);
9115 if (dump_file && (dump_flags & TDF_DETAILS))
9117 fprintf (dump_file, "to\n ");
9118 dump_value_range (dump_file, vr0);
9119 fprintf (dump_file, "\n");
9124 /* Visit all arguments for PHI node PHI that flow through executable
9125 edges. If a valid value range can be derived from all the incoming
9126 value ranges, set a new range in VR_RESULT. */
9128 static void
9129 extract_range_from_phi_node (gphi *phi, value_range *vr_result)
9131 size_t i;
9132 tree lhs = PHI_RESULT (phi);
9133 value_range *lhs_vr = get_value_range (lhs);
9134 bool first = true;
9135 int edges, old_edges;
9136 struct loop *l;
9138 if (dump_file && (dump_flags & TDF_DETAILS))
9140 fprintf (dump_file, "\nVisiting PHI node: ");
9141 print_gimple_stmt (dump_file, phi, 0, dump_flags);
9144 bool may_simulate_backedge_again = false;
9145 edges = 0;
9146 for (i = 0; i < gimple_phi_num_args (phi); i++)
9148 edge e = gimple_phi_arg_edge (phi, i);
9150 if (dump_file && (dump_flags & TDF_DETAILS))
9152 fprintf (dump_file,
9153 " Argument #%d (%d -> %d %sexecutable)\n",
9154 (int) i, e->src->index, e->dest->index,
9155 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
9158 if (e->flags & EDGE_EXECUTABLE)
9160 tree arg = PHI_ARG_DEF (phi, i);
9161 value_range vr_arg;
9163 ++edges;
9165 if (TREE_CODE (arg) == SSA_NAME)
9167 /* See if we are eventually going to change one of the args. */
9168 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
9169 if (! gimple_nop_p (def_stmt)
9170 && prop_simulate_again_p (def_stmt)
9171 && e->flags & EDGE_DFS_BACK)
9172 may_simulate_backedge_again = true;
9174 vr_arg = *(get_value_range (arg));
9175 /* Do not allow equivalences or symbolic ranges to leak in from
9176 backedges. That creates invalid equivalencies.
9177 See PR53465 and PR54767. */
9178 if (e->flags & EDGE_DFS_BACK)
9180 if (vr_arg.type == VR_RANGE
9181 || vr_arg.type == VR_ANTI_RANGE)
9183 vr_arg.equiv = NULL;
9184 if (symbolic_range_p (&vr_arg))
9186 vr_arg.type = VR_VARYING;
9187 vr_arg.min = NULL_TREE;
9188 vr_arg.max = NULL_TREE;
9192 else
9194 /* If the non-backedge arguments range is VR_VARYING then
9195 we can still try recording a simple equivalence. */
9196 if (vr_arg.type == VR_VARYING)
9198 vr_arg.type = VR_RANGE;
9199 vr_arg.min = arg;
9200 vr_arg.max = arg;
9201 vr_arg.equiv = NULL;
9205 else
9207 if (TREE_OVERFLOW_P (arg))
9208 arg = drop_tree_overflow (arg);
9210 vr_arg.type = VR_RANGE;
9211 vr_arg.min = arg;
9212 vr_arg.max = arg;
9213 vr_arg.equiv = NULL;
9216 if (dump_file && (dump_flags & TDF_DETAILS))
9218 fprintf (dump_file, "\t");
9219 print_generic_expr (dump_file, arg, dump_flags);
9220 fprintf (dump_file, ": ");
9221 dump_value_range (dump_file, &vr_arg);
9222 fprintf (dump_file, "\n");
9225 if (first)
9226 copy_value_range (vr_result, &vr_arg);
9227 else
9228 vrp_meet (vr_result, &vr_arg);
9229 first = false;
9231 if (vr_result->type == VR_VARYING)
9232 break;
9236 if (vr_result->type == VR_VARYING)
9237 goto varying;
9238 else if (vr_result->type == VR_UNDEFINED)
9239 goto update_range;
9241 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
9242 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
9244 /* To prevent infinite iterations in the algorithm, derive ranges
9245 when the new value is slightly bigger or smaller than the
9246 previous one. We don't do this if we have seen a new executable
9247 edge; this helps us avoid an overflow infinity for conditionals
9248 which are not in a loop. If the old value-range was VR_UNDEFINED
9249 use the updated range and iterate one more time. If we will not
9250 simulate this PHI again via the backedge allow us to iterate. */
9251 if (edges > 0
9252 && gimple_phi_num_args (phi) > 1
9253 && edges == old_edges
9254 && lhs_vr->type != VR_UNDEFINED
9255 && may_simulate_backedge_again)
9257 /* Compare old and new ranges, fall back to varying if the
9258 values are not comparable. */
9259 int cmp_min = compare_values (lhs_vr->min, vr_result->min);
9260 if (cmp_min == -2)
9261 goto varying;
9262 int cmp_max = compare_values (lhs_vr->max, vr_result->max);
9263 if (cmp_max == -2)
9264 goto varying;
9266 /* For non VR_RANGE or for pointers fall back to varying if
9267 the range changed. */
9268 if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
9269 || POINTER_TYPE_P (TREE_TYPE (lhs)))
9270 && (cmp_min != 0 || cmp_max != 0))
9271 goto varying;
9273 /* If the new minimum is larger than the previous one
9274 retain the old value. If the new minimum value is smaller
9275 than the previous one and not -INF go all the way to -INF + 1.
9276 In the first case, to avoid infinite bouncing between different
9277 minimums, and in the other case to avoid iterating millions of
9278 times to reach -INF. Going to -INF + 1 also lets the following
9279 iteration compute whether there will be any overflow, at the
9280 expense of one additional iteration. */
9281 if (cmp_min < 0)
9282 vr_result->min = lhs_vr->min;
9283 else if (cmp_min > 0
9284 && !vrp_val_is_min (vr_result->min))
9285 vr_result->min
9286 = int_const_binop (PLUS_EXPR,
9287 vrp_val_min (TREE_TYPE (vr_result->min)),
9288 build_int_cst (TREE_TYPE (vr_result->min), 1));
9290 /* Similarly for the maximum value. */
9291 if (cmp_max > 0)
9292 vr_result->max = lhs_vr->max;
9293 else if (cmp_max < 0
9294 && !vrp_val_is_max (vr_result->max))
9295 vr_result->max
9296 = int_const_binop (MINUS_EXPR,
9297 vrp_val_max (TREE_TYPE (vr_result->min)),
9298 build_int_cst (TREE_TYPE (vr_result->min), 1));
9300 /* If we dropped either bound to +-INF then if this is a loop
9301 PHI node SCEV may known more about its value-range. */
9302 if (cmp_min > 0 || cmp_min < 0
9303 || cmp_max < 0 || cmp_max > 0)
9304 goto scev_check;
9306 goto infinite_check;
9309 goto update_range;
9311 varying:
9312 set_value_range_to_varying (vr_result);
9314 scev_check:
9315 /* If this is a loop PHI node SCEV may known more about its value-range.
9316 scev_check can be reached from two paths, one is a fall through from above
9317 "varying" label, the other is direct goto from code block which tries to
9318 avoid infinite simulation. */
9319 if ((l = loop_containing_stmt (phi))
9320 && l->header == gimple_bb (phi))
9321 adjust_range_with_scev (vr_result, l, phi, lhs);
9323 infinite_check:
9324 /* If we will end up with a (-INF, +INF) range, set it to
9325 VARYING. Same if the previous max value was invalid for
9326 the type and we end up with vr_result.min > vr_result.max. */
9327 if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
9328 && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
9329 || compare_values (vr_result->min, vr_result->max) > 0))
9331 else
9332 set_value_range_to_varying (vr_result);
9334 /* If the new range is different than the previous value, keep
9335 iterating. */
9336 update_range:
9337 return;
9340 /* Visit all arguments for PHI node PHI that flow through executable
9341 edges. If a valid value range can be derived from all the incoming
9342 value ranges, set a new range for the LHS of PHI. */
9344 static enum ssa_prop_result
9345 vrp_visit_phi_node (gphi *phi)
9347 tree lhs = PHI_RESULT (phi);
9348 value_range vr_result = VR_INITIALIZER;
9349 extract_range_from_phi_node (phi, &vr_result);
9350 if (update_value_range (lhs, &vr_result))
9352 if (dump_file && (dump_flags & TDF_DETAILS))
9354 fprintf (dump_file, "Found new range for ");
9355 print_generic_expr (dump_file, lhs, 0);
9356 fprintf (dump_file, ": ");
9357 dump_value_range (dump_file, &vr_result);
9358 fprintf (dump_file, "\n");
9361 if (vr_result.type == VR_VARYING)
9362 return SSA_PROP_VARYING;
9364 return SSA_PROP_INTERESTING;
9367 /* Nothing changed, don't add outgoing edges. */
9368 return SSA_PROP_NOT_INTERESTING;
9371 /* Simplify boolean operations if the source is known
9372 to be already a boolean. */
9373 static bool
9374 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9376 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9377 tree lhs, op0, op1;
9378 bool need_conversion;
9380 /* We handle only !=/== case here. */
9381 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
9383 op0 = gimple_assign_rhs1 (stmt);
9384 if (!op_with_boolean_value_range_p (op0))
9385 return false;
9387 op1 = gimple_assign_rhs2 (stmt);
9388 if (!op_with_boolean_value_range_p (op1))
9389 return false;
9391 /* Reduce number of cases to handle to NE_EXPR. As there is no
9392 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
9393 if (rhs_code == EQ_EXPR)
9395 if (TREE_CODE (op1) == INTEGER_CST)
9396 op1 = int_const_binop (BIT_XOR_EXPR, op1,
9397 build_int_cst (TREE_TYPE (op1), 1));
9398 else
9399 return false;
9402 lhs = gimple_assign_lhs (stmt);
9403 need_conversion
9404 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
9406 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
9407 if (need_conversion
9408 && !TYPE_UNSIGNED (TREE_TYPE (op0))
9409 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
9410 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
9411 return false;
9413 /* For A != 0 we can substitute A itself. */
9414 if (integer_zerop (op1))
9415 gimple_assign_set_rhs_with_ops (gsi,
9416 need_conversion
9417 ? NOP_EXPR : TREE_CODE (op0), op0);
9418 /* For A != B we substitute A ^ B. Either with conversion. */
9419 else if (need_conversion)
9421 tree tem = make_ssa_name (TREE_TYPE (op0));
9422 gassign *newop
9423 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
9424 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
9425 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
9426 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
9427 set_range_info (tem, VR_RANGE,
9428 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
9429 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
9430 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
9432 /* Or without. */
9433 else
9434 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
9435 update_stmt (gsi_stmt (*gsi));
9436 fold_stmt (gsi, follow_single_use_edges);
9438 return true;
9441 /* Simplify a division or modulo operator to a right shift or bitwise and
9442 if the first operand is unsigned or is greater than zero and the second
9443 operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
9444 constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
9445 optimize it into just op0 if op0's range is known to be a subset of
9446 [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
9447 modulo. */
9449 static bool
9450 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9452 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9453 tree val = NULL;
9454 tree op0 = gimple_assign_rhs1 (stmt);
9455 tree op1 = gimple_assign_rhs2 (stmt);
9456 tree op0min = NULL_TREE, op0max = NULL_TREE;
9457 tree op1min = op1;
9458 value_range *vr = NULL;
9460 if (TREE_CODE (op0) == INTEGER_CST)
9462 op0min = op0;
9463 op0max = op0;
9465 else
9467 vr = get_value_range (op0);
9468 if (range_int_cst_p (vr))
9470 op0min = vr->min;
9471 op0max = vr->max;
9475 if (rhs_code == TRUNC_MOD_EXPR
9476 && TREE_CODE (op1) == SSA_NAME)
9478 value_range *vr1 = get_value_range (op1);
9479 if (range_int_cst_p (vr1))
9480 op1min = vr1->min;
9482 if (rhs_code == TRUNC_MOD_EXPR
9483 && TREE_CODE (op1min) == INTEGER_CST
9484 && tree_int_cst_sgn (op1min) == 1
9485 && op0max
9486 && tree_int_cst_lt (op0max, op1min))
9488 if (TYPE_UNSIGNED (TREE_TYPE (op0))
9489 || tree_int_cst_sgn (op0min) >= 0
9490 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
9491 op0min))
9493 /* If op0 already has the range op0 % op1 has,
9494 then TRUNC_MOD_EXPR won't change anything. */
9495 gimple_assign_set_rhs_from_tree (gsi, op0);
9496 return true;
9500 if (TREE_CODE (op0) != SSA_NAME)
9501 return false;
9503 if (!integer_pow2p (op1))
9505 /* X % -Y can be only optimized into X % Y either if
9506 X is not INT_MIN, or Y is not -1. Fold it now, as after
9507 remove_range_assertions the range info might be not available
9508 anymore. */
9509 if (rhs_code == TRUNC_MOD_EXPR
9510 && fold_stmt (gsi, follow_single_use_edges))
9511 return true;
9512 return false;
9515 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9516 val = integer_one_node;
9517 else
9519 bool sop = false;
9521 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9523 if (val
9524 && sop
9525 && integer_onep (val)
9526 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9528 location_t location;
9530 if (!gimple_has_location (stmt))
9531 location = input_location;
9532 else
9533 location = gimple_location (stmt);
9534 warning_at (location, OPT_Wstrict_overflow,
9535 "assuming signed overflow does not occur when "
9536 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9540 if (val && integer_onep (val))
9542 tree t;
9544 if (rhs_code == TRUNC_DIV_EXPR)
9546 t = build_int_cst (integer_type_node, tree_log2 (op1));
9547 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9548 gimple_assign_set_rhs1 (stmt, op0);
9549 gimple_assign_set_rhs2 (stmt, t);
9551 else
9553 t = build_int_cst (TREE_TYPE (op1), 1);
9554 t = int_const_binop (MINUS_EXPR, op1, t);
9555 t = fold_convert (TREE_TYPE (op0), t);
9557 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9558 gimple_assign_set_rhs1 (stmt, op0);
9559 gimple_assign_set_rhs2 (stmt, t);
9562 update_stmt (stmt);
9563 fold_stmt (gsi, follow_single_use_edges);
9564 return true;
9567 return false;
9570 /* Simplify a min or max if the ranges of the two operands are
9571 disjoint. Return true if we do simplify. */
9573 static bool
9574 simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9576 tree op0 = gimple_assign_rhs1 (stmt);
9577 tree op1 = gimple_assign_rhs2 (stmt);
9578 bool sop = false;
9579 tree val;
9581 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9582 (LE_EXPR, op0, op1, &sop));
9583 if (!val)
9585 sop = false;
9586 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9587 (LT_EXPR, op0, op1, &sop));
9590 if (val)
9592 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9594 location_t location;
9596 if (!gimple_has_location (stmt))
9597 location = input_location;
9598 else
9599 location = gimple_location (stmt);
9600 warning_at (location, OPT_Wstrict_overflow,
9601 "assuming signed overflow does not occur when "
9602 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9605 /* VAL == TRUE -> OP0 < or <= op1
9606 VAL == FALSE -> OP0 > or >= op1. */
9607 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9608 == integer_zerop (val)) ? op0 : op1;
9609 gimple_assign_set_rhs_from_tree (gsi, res);
9610 return true;
9613 return false;
9616 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9617 ABS_EXPR. If the operand is <= 0, then simplify the
9618 ABS_EXPR into a NEGATE_EXPR. */
9620 static bool
9621 simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9623 tree op = gimple_assign_rhs1 (stmt);
9624 value_range *vr = get_value_range (op);
9626 if (vr)
9628 tree val = NULL;
9629 bool sop = false;
9631 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9632 if (!val)
9634 /* The range is neither <= 0 nor > 0. Now see if it is
9635 either < 0 or >= 0. */
9636 sop = false;
9637 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9638 &sop);
9641 if (val)
9643 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9645 location_t location;
9647 if (!gimple_has_location (stmt))
9648 location = input_location;
9649 else
9650 location = gimple_location (stmt);
9651 warning_at (location, OPT_Wstrict_overflow,
9652 "assuming signed overflow does not occur when "
9653 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9656 gimple_assign_set_rhs1 (stmt, op);
9657 if (integer_zerop (val))
9658 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9659 else
9660 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9661 update_stmt (stmt);
9662 fold_stmt (gsi, follow_single_use_edges);
9663 return true;
9667 return false;
9670 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9671 If all the bits that are being cleared by & are already
9672 known to be zero from VR, or all the bits that are being
9673 set by | are already known to be one from VR, the bit
9674 operation is redundant. */
9676 static bool
9677 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9679 tree op0 = gimple_assign_rhs1 (stmt);
9680 tree op1 = gimple_assign_rhs2 (stmt);
9681 tree op = NULL_TREE;
9682 value_range vr0 = VR_INITIALIZER;
9683 value_range vr1 = VR_INITIALIZER;
9684 wide_int may_be_nonzero0, may_be_nonzero1;
9685 wide_int must_be_nonzero0, must_be_nonzero1;
9686 wide_int mask;
9688 if (TREE_CODE (op0) == SSA_NAME)
9689 vr0 = *(get_value_range (op0));
9690 else if (is_gimple_min_invariant (op0))
9691 set_value_range_to_value (&vr0, op0, NULL);
9692 else
9693 return false;
9695 if (TREE_CODE (op1) == SSA_NAME)
9696 vr1 = *(get_value_range (op1));
9697 else if (is_gimple_min_invariant (op1))
9698 set_value_range_to_value (&vr1, op1, NULL);
9699 else
9700 return false;
9702 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9703 &must_be_nonzero0))
9704 return false;
9705 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9706 &must_be_nonzero1))
9707 return false;
9709 switch (gimple_assign_rhs_code (stmt))
9711 case BIT_AND_EXPR:
9712 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9713 if (mask == 0)
9715 op = op0;
9716 break;
9718 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9719 if (mask == 0)
9721 op = op1;
9722 break;
9724 break;
9725 case BIT_IOR_EXPR:
9726 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9727 if (mask == 0)
9729 op = op1;
9730 break;
9732 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9733 if (mask == 0)
9735 op = op0;
9736 break;
9738 break;
9739 default:
9740 gcc_unreachable ();
9743 if (op == NULL_TREE)
9744 return false;
9746 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9747 update_stmt (gsi_stmt (*gsi));
9748 return true;
9751 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9752 a known value range VR.
9754 If there is one and only one value which will satisfy the
9755 conditional, then return that value. Else return NULL.
9757 If signed overflow must be undefined for the value to satisfy
9758 the conditional, then set *STRICT_OVERFLOW_P to true. */
9760 static tree
9761 test_for_singularity (enum tree_code cond_code, tree op0,
9762 tree op1, value_range *vr,
9763 bool *strict_overflow_p)
9765 tree min = NULL;
9766 tree max = NULL;
9768 /* Extract minimum/maximum values which satisfy the conditional as it was
9769 written. */
9770 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9772 /* This should not be negative infinity; there is no overflow
9773 here. */
9774 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9776 max = op1;
9777 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9779 tree one = build_int_cst (TREE_TYPE (op0), 1);
9780 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9781 if (EXPR_P (max))
9782 TREE_NO_WARNING (max) = 1;
9785 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9787 /* This should not be positive infinity; there is no overflow
9788 here. */
9789 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9791 min = op1;
9792 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9794 tree one = build_int_cst (TREE_TYPE (op0), 1);
9795 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9796 if (EXPR_P (min))
9797 TREE_NO_WARNING (min) = 1;
9801 /* Now refine the minimum and maximum values using any
9802 value range information we have for op0. */
9803 if (min && max)
9805 if (compare_values (vr->min, min) == 1)
9806 min = vr->min;
9807 if (compare_values (vr->max, max) == -1)
9808 max = vr->max;
9810 /* If the new min/max values have converged to a single value,
9811 then there is only one value which can satisfy the condition,
9812 return that value. */
9813 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9815 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9816 && is_overflow_infinity (vr->max))
9817 *strict_overflow_p = true;
9818 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9819 && is_overflow_infinity (vr->min))
9820 *strict_overflow_p = true;
9822 return min;
9825 return NULL;
9828 /* Return whether the value range *VR fits in an integer type specified
9829 by PRECISION and UNSIGNED_P. */
9831 static bool
9832 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9834 tree src_type;
9835 unsigned src_precision;
9836 widest_int tem;
9837 signop src_sgn;
9839 /* We can only handle integral and pointer types. */
9840 src_type = TREE_TYPE (vr->min);
9841 if (!INTEGRAL_TYPE_P (src_type)
9842 && !POINTER_TYPE_P (src_type))
9843 return false;
9845 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9846 and so is an identity transform. */
9847 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9848 src_sgn = TYPE_SIGN (src_type);
9849 if ((src_precision < dest_precision
9850 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9851 || (src_precision == dest_precision && src_sgn == dest_sgn))
9852 return true;
9854 /* Now we can only handle ranges with constant bounds. */
9855 if (vr->type != VR_RANGE
9856 || TREE_CODE (vr->min) != INTEGER_CST
9857 || TREE_CODE (vr->max) != INTEGER_CST)
9858 return false;
9860 /* For sign changes, the MSB of the wide_int has to be clear.
9861 An unsigned value with its MSB set cannot be represented by
9862 a signed wide_int, while a negative value cannot be represented
9863 by an unsigned wide_int. */
9864 if (src_sgn != dest_sgn
9865 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9866 return false;
9868 /* Then we can perform the conversion on both ends and compare
9869 the result for equality. */
9870 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9871 if (tem != wi::to_widest (vr->min))
9872 return false;
9873 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9874 if (tem != wi::to_widest (vr->max))
9875 return false;
9877 return true;
9880 /* Simplify a conditional using a relational operator to an equality
9881 test if the range information indicates only one value can satisfy
9882 the original conditional. */
9884 static bool
9885 simplify_cond_using_ranges (gcond *stmt)
9887 tree op0 = gimple_cond_lhs (stmt);
9888 tree op1 = gimple_cond_rhs (stmt);
9889 enum tree_code cond_code = gimple_cond_code (stmt);
9891 if (cond_code != NE_EXPR
9892 && cond_code != EQ_EXPR
9893 && TREE_CODE (op0) == SSA_NAME
9894 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9895 && is_gimple_min_invariant (op1))
9897 value_range *vr = get_value_range (op0);
9899 /* If we have range information for OP0, then we might be
9900 able to simplify this conditional. */
9901 if (vr->type == VR_RANGE)
9903 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9904 bool sop = false;
9905 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9907 if (new_tree
9908 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9910 if (dump_file)
9912 fprintf (dump_file, "Simplified relational ");
9913 print_gimple_stmt (dump_file, stmt, 0, 0);
9914 fprintf (dump_file, " into ");
9917 gimple_cond_set_code (stmt, EQ_EXPR);
9918 gimple_cond_set_lhs (stmt, op0);
9919 gimple_cond_set_rhs (stmt, new_tree);
9921 update_stmt (stmt);
9923 if (dump_file)
9925 print_gimple_stmt (dump_file, stmt, 0, 0);
9926 fprintf (dump_file, "\n");
9929 if (sop && issue_strict_overflow_warning (wc))
9931 location_t location = input_location;
9932 if (gimple_has_location (stmt))
9933 location = gimple_location (stmt);
9935 warning_at (location, OPT_Wstrict_overflow,
9936 "assuming signed overflow does not occur when "
9937 "simplifying conditional");
9940 return true;
9943 /* Try again after inverting the condition. We only deal
9944 with integral types here, so no need to worry about
9945 issues with inverting FP comparisons. */
9946 sop = false;
9947 new_tree = test_for_singularity
9948 (invert_tree_comparison (cond_code, false),
9949 op0, op1, vr, &sop);
9951 if (new_tree
9952 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9954 if (dump_file)
9956 fprintf (dump_file, "Simplified relational ");
9957 print_gimple_stmt (dump_file, stmt, 0, 0);
9958 fprintf (dump_file, " into ");
9961 gimple_cond_set_code (stmt, NE_EXPR);
9962 gimple_cond_set_lhs (stmt, op0);
9963 gimple_cond_set_rhs (stmt, new_tree);
9965 update_stmt (stmt);
9967 if (dump_file)
9969 print_gimple_stmt (dump_file, stmt, 0, 0);
9970 fprintf (dump_file, "\n");
9973 if (sop && issue_strict_overflow_warning (wc))
9975 location_t location = input_location;
9976 if (gimple_has_location (stmt))
9977 location = gimple_location (stmt);
9979 warning_at (location, OPT_Wstrict_overflow,
9980 "assuming signed overflow does not occur when "
9981 "simplifying conditional");
9984 return true;
9989 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9990 see if OP0 was set by a type conversion where the source of
9991 the conversion is another SSA_NAME with a range that fits
9992 into the range of OP0's type.
9994 If so, the conversion is redundant as the earlier SSA_NAME can be
9995 used for the comparison directly if we just massage the constant in the
9996 comparison. */
9997 if (TREE_CODE (op0) == SSA_NAME
9998 && TREE_CODE (op1) == INTEGER_CST)
10000 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
10001 tree innerop;
10003 if (!is_gimple_assign (def_stmt)
10004 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
10005 return false;
10007 innerop = gimple_assign_rhs1 (def_stmt);
10009 if (TREE_CODE (innerop) == SSA_NAME
10010 && !POINTER_TYPE_P (TREE_TYPE (innerop))
10011 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
10012 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
10014 value_range *vr = get_value_range (innerop);
10016 if (range_int_cst_p (vr)
10017 && range_fits_type_p (vr,
10018 TYPE_PRECISION (TREE_TYPE (op0)),
10019 TYPE_SIGN (TREE_TYPE (op0)))
10020 && int_fits_type_p (op1, TREE_TYPE (innerop))
10021 /* The range must not have overflowed, or if it did overflow
10022 we must not be wrapping/trapping overflow and optimizing
10023 with strict overflow semantics. */
10024 && ((!is_negative_overflow_infinity (vr->min)
10025 && !is_positive_overflow_infinity (vr->max))
10026 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
10028 /* If the range overflowed and the user has asked for warnings
10029 when strict overflow semantics were used to optimize code,
10030 issue an appropriate warning. */
10031 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
10032 && (is_negative_overflow_infinity (vr->min)
10033 || is_positive_overflow_infinity (vr->max))
10034 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
10036 location_t location;
10038 if (!gimple_has_location (stmt))
10039 location = input_location;
10040 else
10041 location = gimple_location (stmt);
10042 warning_at (location, OPT_Wstrict_overflow,
10043 "assuming signed overflow does not occur when "
10044 "simplifying conditional");
10047 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
10048 gimple_cond_set_lhs (stmt, innerop);
10049 gimple_cond_set_rhs (stmt, newconst);
10050 return true;
10055 return false;
10058 /* Simplify a switch statement using the value range of the switch
10059 argument. */
10061 static bool
10062 simplify_switch_using_ranges (gswitch *stmt)
10064 tree op = gimple_switch_index (stmt);
10065 value_range *vr = NULL;
10066 bool take_default;
10067 edge e;
10068 edge_iterator ei;
10069 size_t i = 0, j = 0, n, n2;
10070 tree vec2;
10071 switch_update su;
10072 size_t k = 1, l = 0;
10074 if (TREE_CODE (op) == SSA_NAME)
10076 vr = get_value_range (op);
10078 /* We can only handle integer ranges. */
10079 if ((vr->type != VR_RANGE
10080 && vr->type != VR_ANTI_RANGE)
10081 || symbolic_range_p (vr))
10082 return false;
10084 /* Find case label for min/max of the value range. */
10085 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
10087 else if (TREE_CODE (op) == INTEGER_CST)
10089 take_default = !find_case_label_index (stmt, 1, op, &i);
10090 if (take_default)
10092 i = 1;
10093 j = 0;
10095 else
10097 j = i;
10100 else
10101 return false;
10103 n = gimple_switch_num_labels (stmt);
10105 /* We can truncate the case label ranges that partially overlap with OP's
10106 value range. */
10107 size_t min_idx = 1, max_idx = 0;
10108 if (vr != NULL)
10109 find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
10110 if (min_idx <= max_idx)
10112 tree min_label = gimple_switch_label (stmt, min_idx);
10113 tree max_label = gimple_switch_label (stmt, max_idx);
10115 /* Avoid changing the type of the case labels when truncating. */
10116 tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
10117 tree vr_min = fold_convert (case_label_type, vr->min);
10118 tree vr_max = fold_convert (case_label_type, vr->max);
10120 if (vr->type == VR_RANGE)
10122 /* If OP's value range is [2,8] and the low label range is
10123 0 ... 3, truncate the label's range to 2 .. 3. */
10124 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10125 && CASE_HIGH (min_label) != NULL_TREE
10126 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
10127 CASE_LOW (min_label) = vr_min;
10129 /* If OP's value range is [2,8] and the high label range is
10130 7 ... 10, truncate the label's range to 7 .. 8. */
10131 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
10132 && CASE_HIGH (max_label) != NULL_TREE
10133 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
10134 CASE_HIGH (max_label) = vr_max;
10136 else if (vr->type == VR_ANTI_RANGE)
10138 tree one_cst = build_one_cst (case_label_type);
10140 if (min_label == max_label)
10142 /* If OP's value range is ~[7,8] and the label's range is
10143 7 ... 10, truncate the label's range to 9 ... 10. */
10144 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
10145 && CASE_HIGH (min_label) != NULL_TREE
10146 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
10147 CASE_LOW (min_label)
10148 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
10150 /* If OP's value range is ~[7,8] and the label's range is
10151 5 ... 8, truncate the label's range to 5 ... 6. */
10152 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10153 && CASE_HIGH (min_label) != NULL_TREE
10154 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
10155 CASE_HIGH (min_label)
10156 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
10158 else
10160 /* If OP's value range is ~[2,8] and the low label range is
10161 0 ... 3, truncate the label's range to 0 ... 1. */
10162 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
10163 && CASE_HIGH (min_label) != NULL_TREE
10164 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
10165 CASE_HIGH (min_label)
10166 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
10168 /* If OP's value range is ~[2,8] and the high label range is
10169 7 ... 10, truncate the label's range to 9 ... 10. */
10170 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
10171 && CASE_HIGH (max_label) != NULL_TREE
10172 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
10173 CASE_LOW (max_label)
10174 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
10178 /* Canonicalize singleton case ranges. */
10179 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
10180 CASE_HIGH (min_label) = NULL_TREE;
10181 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
10182 CASE_HIGH (max_label) = NULL_TREE;
10185 /* We can also eliminate case labels that lie completely outside OP's value
10186 range. */
10188 /* Bail out if this is just all edges taken. */
10189 if (i == 1
10190 && j == n - 1
10191 && take_default)
10192 return false;
10194 /* Build a new vector of taken case labels. */
10195 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
10196 n2 = 0;
10198 /* Add the default edge, if necessary. */
10199 if (take_default)
10200 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
10202 for (; i <= j; ++i, ++n2)
10203 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
10205 for (; k <= l; ++k, ++n2)
10206 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
10208 /* Mark needed edges. */
10209 for (i = 0; i < n2; ++i)
10211 e = find_edge (gimple_bb (stmt),
10212 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
10213 e->aux = (void *)-1;
10216 /* Queue not needed edges for later removal. */
10217 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
10219 if (e->aux == (void *)-1)
10221 e->aux = NULL;
10222 continue;
10225 if (dump_file && (dump_flags & TDF_DETAILS))
10227 fprintf (dump_file, "removing unreachable case label\n");
10229 to_remove_edges.safe_push (e);
10230 e->flags &= ~EDGE_EXECUTABLE;
10233 /* And queue an update for the stmt. */
10234 su.stmt = stmt;
10235 su.vec = vec2;
10236 to_update_switch_stmts.safe_push (su);
10237 return false;
10240 /* Simplify an integral conversion from an SSA name in STMT. */
10242 static bool
10243 simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10245 tree innerop, middleop, finaltype;
10246 gimple *def_stmt;
10247 signop inner_sgn, middle_sgn, final_sgn;
10248 unsigned inner_prec, middle_prec, final_prec;
10249 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
10251 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
10252 if (!INTEGRAL_TYPE_P (finaltype))
10253 return false;
10254 middleop = gimple_assign_rhs1 (stmt);
10255 def_stmt = SSA_NAME_DEF_STMT (middleop);
10256 if (!is_gimple_assign (def_stmt)
10257 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
10258 return false;
10259 innerop = gimple_assign_rhs1 (def_stmt);
10260 if (TREE_CODE (innerop) != SSA_NAME
10261 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
10262 return false;
10264 /* Get the value-range of the inner operand. Use get_range_info in
10265 case innerop was created during substitute-and-fold. */
10266 wide_int imin, imax;
10267 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
10268 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
10269 return false;
10270 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
10271 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
10273 /* Simulate the conversion chain to check if the result is equal if
10274 the middle conversion is removed. */
10275 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
10276 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
10277 final_prec = TYPE_PRECISION (finaltype);
10279 /* If the first conversion is not injective, the second must not
10280 be widening. */
10281 if (wi::gtu_p (innermax - innermin,
10282 wi::mask <widest_int> (middle_prec, false))
10283 && middle_prec < final_prec)
10284 return false;
10285 /* We also want a medium value so that we can track the effect that
10286 narrowing conversions with sign change have. */
10287 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
10288 if (inner_sgn == UNSIGNED)
10289 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
10290 else
10291 innermed = 0;
10292 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
10293 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
10294 innermed = innermin;
10296 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
10297 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
10298 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
10299 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
10301 /* Require that the final conversion applied to both the original
10302 and the intermediate range produces the same result. */
10303 final_sgn = TYPE_SIGN (finaltype);
10304 if (wi::ext (middlemin, final_prec, final_sgn)
10305 != wi::ext (innermin, final_prec, final_sgn)
10306 || wi::ext (middlemed, final_prec, final_sgn)
10307 != wi::ext (innermed, final_prec, final_sgn)
10308 || wi::ext (middlemax, final_prec, final_sgn)
10309 != wi::ext (innermax, final_prec, final_sgn))
10310 return false;
10312 gimple_assign_set_rhs1 (stmt, innerop);
10313 fold_stmt (gsi, follow_single_use_edges);
10314 return true;
10317 /* Simplify a conversion from integral SSA name to float in STMT. */
10319 static bool
10320 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
10321 gimple *stmt)
10323 tree rhs1 = gimple_assign_rhs1 (stmt);
10324 value_range *vr = get_value_range (rhs1);
10325 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
10326 machine_mode mode;
10327 tree tem;
10328 gassign *conv;
10330 /* We can only handle constant ranges. */
10331 if (vr->type != VR_RANGE
10332 || TREE_CODE (vr->min) != INTEGER_CST
10333 || TREE_CODE (vr->max) != INTEGER_CST)
10334 return false;
10336 /* First check if we can use a signed type in place of an unsigned. */
10337 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
10338 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
10339 != CODE_FOR_nothing)
10340 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
10341 mode = TYPE_MODE (TREE_TYPE (rhs1));
10342 /* If we can do the conversion in the current input mode do nothing. */
10343 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
10344 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
10345 return false;
10346 /* Otherwise search for a mode we can use, starting from the narrowest
10347 integer mode available. */
10348 else
10350 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
10353 /* If we cannot do a signed conversion to float from mode
10354 or if the value-range does not fit in the signed type
10355 try with a wider mode. */
10356 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
10357 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
10358 break;
10360 mode = GET_MODE_WIDER_MODE (mode);
10361 /* But do not widen the input. Instead leave that to the
10362 optabs expansion code. */
10363 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
10364 return false;
10366 while (mode != VOIDmode);
10367 if (mode == VOIDmode)
10368 return false;
10371 /* It works, insert a truncation or sign-change before the
10372 float conversion. */
10373 tem = make_ssa_name (build_nonstandard_integer_type
10374 (GET_MODE_PRECISION (mode), 0));
10375 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
10376 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
10377 gimple_assign_set_rhs1 (stmt, tem);
10378 fold_stmt (gsi, follow_single_use_edges);
10380 return true;
10383 /* Simplify an internal fn call using ranges if possible. */
10385 static bool
10386 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10388 enum tree_code subcode;
10389 bool is_ubsan = false;
10390 bool ovf = false;
10391 switch (gimple_call_internal_fn (stmt))
10393 case IFN_UBSAN_CHECK_ADD:
10394 subcode = PLUS_EXPR;
10395 is_ubsan = true;
10396 break;
10397 case IFN_UBSAN_CHECK_SUB:
10398 subcode = MINUS_EXPR;
10399 is_ubsan = true;
10400 break;
10401 case IFN_UBSAN_CHECK_MUL:
10402 subcode = MULT_EXPR;
10403 is_ubsan = true;
10404 break;
10405 case IFN_ADD_OVERFLOW:
10406 subcode = PLUS_EXPR;
10407 break;
10408 case IFN_SUB_OVERFLOW:
10409 subcode = MINUS_EXPR;
10410 break;
10411 case IFN_MUL_OVERFLOW:
10412 subcode = MULT_EXPR;
10413 break;
10414 default:
10415 return false;
10418 tree op0 = gimple_call_arg (stmt, 0);
10419 tree op1 = gimple_call_arg (stmt, 1);
10420 tree type;
10421 if (is_ubsan)
10423 type = TREE_TYPE (op0);
10424 if (VECTOR_TYPE_P (type))
10425 return false;
10427 else if (gimple_call_lhs (stmt) == NULL_TREE)
10428 return false;
10429 else
10430 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
10431 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
10432 || (is_ubsan && ovf))
10433 return false;
10435 gimple *g;
10436 location_t loc = gimple_location (stmt);
10437 if (is_ubsan)
10438 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
10439 else
10441 int prec = TYPE_PRECISION (type);
10442 tree utype = type;
10443 if (ovf
10444 || !useless_type_conversion_p (type, TREE_TYPE (op0))
10445 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
10446 utype = build_nonstandard_integer_type (prec, 1);
10447 if (TREE_CODE (op0) == INTEGER_CST)
10448 op0 = fold_convert (utype, op0);
10449 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
10451 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
10452 gimple_set_location (g, loc);
10453 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10454 op0 = gimple_assign_lhs (g);
10456 if (TREE_CODE (op1) == INTEGER_CST)
10457 op1 = fold_convert (utype, op1);
10458 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
10460 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
10461 gimple_set_location (g, loc);
10462 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10463 op1 = gimple_assign_lhs (g);
10465 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
10466 gimple_set_location (g, loc);
10467 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10468 if (utype != type)
10470 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
10471 gimple_assign_lhs (g));
10472 gimple_set_location (g, loc);
10473 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10475 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
10476 gimple_assign_lhs (g),
10477 build_int_cst (type, ovf));
10479 gimple_set_location (g, loc);
10480 gsi_replace (gsi, g, false);
10481 return true;
10484 /* Return true if VAR is a two-valued variable. Set a and b with the
10485 two-values when it is true. Return false otherwise. */
10487 static bool
10488 two_valued_val_range_p (tree var, tree *a, tree *b)
10490 value_range *vr = get_value_range (var);
10491 if ((vr->type != VR_RANGE
10492 && vr->type != VR_ANTI_RANGE)
10493 || TREE_CODE (vr->min) != INTEGER_CST
10494 || TREE_CODE (vr->max) != INTEGER_CST)
10495 return false;
10497 if (vr->type == VR_RANGE
10498 && wi::sub (vr->max, vr->min) == 1)
10500 *a = vr->min;
10501 *b = vr->max;
10502 return true;
10505 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
10506 if (vr->type == VR_ANTI_RANGE
10507 && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
10508 && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
10510 *a = vrp_val_min (TREE_TYPE (var));
10511 *b = vrp_val_max (TREE_TYPE (var));
10512 return true;
10515 return false;
10518 /* Simplify STMT using ranges if possible. */
10520 static bool
10521 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
10523 gimple *stmt = gsi_stmt (*gsi);
10524 if (is_gimple_assign (stmt))
10526 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
10527 tree rhs1 = gimple_assign_rhs1 (stmt);
10528 tree rhs2 = gimple_assign_rhs2 (stmt);
10529 tree lhs = gimple_assign_lhs (stmt);
10530 tree val1 = NULL_TREE, val2 = NULL_TREE;
10531 use_operand_p use_p;
10532 gimple *use_stmt;
10534 /* Convert:
10535 LHS = CST BINOP VAR
10536 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10538 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
10540 Also handles:
10541 LHS = VAR BINOP CST
10542 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10544 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
10546 if (TREE_CODE_CLASS (rhs_code) == tcc_binary
10547 && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10548 && ((TREE_CODE (rhs1) == INTEGER_CST
10549 && TREE_CODE (rhs2) == SSA_NAME)
10550 || (TREE_CODE (rhs2) == INTEGER_CST
10551 && TREE_CODE (rhs1) == SSA_NAME))
10552 && single_imm_use (lhs, &use_p, &use_stmt)
10553 && gimple_code (use_stmt) == GIMPLE_COND)
10556 tree new_rhs1 = NULL_TREE;
10557 tree new_rhs2 = NULL_TREE;
10558 tree cmp_var = NULL_TREE;
10560 if (TREE_CODE (rhs2) == SSA_NAME
10561 && two_valued_val_range_p (rhs2, &val1, &val2))
10563 /* Optimize RHS1 OP [VAL1, VAL2]. */
10564 new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
10565 new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
10566 cmp_var = rhs2;
10568 else if (TREE_CODE (rhs1) == SSA_NAME
10569 && two_valued_val_range_p (rhs1, &val1, &val2))
10571 /* Optimize [VAL1, VAL2] OP RHS2. */
10572 new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
10573 new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
10574 cmp_var = rhs1;
10577 /* If we could not find two-vals or the optimzation is invalid as
10578 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
10579 if (new_rhs1 && new_rhs2)
10581 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
10582 gimple_assign_set_rhs_with_ops (gsi,
10583 COND_EXPR, cond,
10584 new_rhs1,
10585 new_rhs2);
10586 update_stmt (gsi_stmt (*gsi));
10587 fold_stmt (gsi, follow_single_use_edges);
10588 return true;
10592 switch (rhs_code)
10594 case EQ_EXPR:
10595 case NE_EXPR:
10596 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
10597 if the RHS is zero or one, and the LHS are known to be boolean
10598 values. */
10599 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10600 return simplify_truth_ops_using_ranges (gsi, stmt);
10601 break;
10603 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
10604 and BIT_AND_EXPR respectively if the first operand is greater
10605 than zero and the second operand is an exact power of two.
10606 Also optimize TRUNC_MOD_EXPR away if the second operand is
10607 constant and the first operand already has the right value
10608 range. */
10609 case TRUNC_DIV_EXPR:
10610 case TRUNC_MOD_EXPR:
10611 if ((TREE_CODE (rhs1) == SSA_NAME
10612 || TREE_CODE (rhs1) == INTEGER_CST)
10613 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10614 return simplify_div_or_mod_using_ranges (gsi, stmt);
10615 break;
10617 /* Transform ABS (X) into X or -X as appropriate. */
10618 case ABS_EXPR:
10619 if (TREE_CODE (rhs1) == SSA_NAME
10620 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10621 return simplify_abs_using_ranges (gsi, stmt);
10622 break;
10624 case BIT_AND_EXPR:
10625 case BIT_IOR_EXPR:
10626 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
10627 if all the bits being cleared are already cleared or
10628 all the bits being set are already set. */
10629 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10630 return simplify_bit_ops_using_ranges (gsi, stmt);
10631 break;
10633 CASE_CONVERT:
10634 if (TREE_CODE (rhs1) == SSA_NAME
10635 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10636 return simplify_conversion_using_ranges (gsi, stmt);
10637 break;
10639 case FLOAT_EXPR:
10640 if (TREE_CODE (rhs1) == SSA_NAME
10641 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10642 return simplify_float_conversion_using_ranges (gsi, stmt);
10643 break;
10645 case MIN_EXPR:
10646 case MAX_EXPR:
10647 return simplify_min_or_max_using_ranges (gsi, stmt);
10649 default:
10650 break;
10653 else if (gimple_code (stmt) == GIMPLE_COND)
10654 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
10655 else if (gimple_code (stmt) == GIMPLE_SWITCH)
10656 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
10657 else if (is_gimple_call (stmt)
10658 && gimple_call_internal_p (stmt))
10659 return simplify_internal_call_using_ranges (gsi, stmt);
10661 return false;
10664 /* If the statement pointed by SI has a predicate whose value can be
10665 computed using the value range information computed by VRP, compute
10666 its value and return true. Otherwise, return false. */
10668 static bool
10669 fold_predicate_in (gimple_stmt_iterator *si)
10671 bool assignment_p = false;
10672 tree val;
10673 gimple *stmt = gsi_stmt (*si);
10675 if (is_gimple_assign (stmt)
10676 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10678 assignment_p = true;
10679 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10680 gimple_assign_rhs1 (stmt),
10681 gimple_assign_rhs2 (stmt),
10682 stmt);
10684 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10685 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10686 gimple_cond_lhs (cond_stmt),
10687 gimple_cond_rhs (cond_stmt),
10688 stmt);
10689 else
10690 return false;
10692 if (val)
10694 if (assignment_p)
10695 val = fold_convert (gimple_expr_type (stmt), val);
10697 if (dump_file)
10699 fprintf (dump_file, "Folding predicate ");
10700 print_gimple_expr (dump_file, stmt, 0, 0);
10701 fprintf (dump_file, " to ");
10702 print_generic_expr (dump_file, val, 0);
10703 fprintf (dump_file, "\n");
10706 if (is_gimple_assign (stmt))
10707 gimple_assign_set_rhs_from_tree (si, val);
10708 else
10710 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10711 gcond *cond_stmt = as_a <gcond *> (stmt);
10712 if (integer_zerop (val))
10713 gimple_cond_make_false (cond_stmt);
10714 else if (integer_onep (val))
10715 gimple_cond_make_true (cond_stmt);
10716 else
10717 gcc_unreachable ();
10720 return true;
10723 return false;
10726 /* Callback for substitute_and_fold folding the stmt at *SI. */
10728 static bool
10729 vrp_fold_stmt (gimple_stmt_iterator *si)
10731 if (fold_predicate_in (si))
10732 return true;
10734 return simplify_stmt_using_ranges (si);
10737 /* Unwindable const/copy equivalences. */
10738 const_and_copies *equiv_stack;
10740 /* A trivial wrapper so that we can present the generic jump threading
10741 code with a simple API for simplifying statements. STMT is the
10742 statement we want to simplify, WITHIN_STMT provides the location
10743 for any overflow warnings. */
10745 static tree
10746 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10747 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10749 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10750 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10751 gimple_cond_lhs (cond_stmt),
10752 gimple_cond_rhs (cond_stmt),
10753 within_stmt);
10755 /* We simplify a switch statement by trying to determine which case label
10756 will be taken. If we are successful then we return the corresponding
10757 CASE_LABEL_EXPR. */
10758 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
10760 tree op = gimple_switch_index (switch_stmt);
10761 if (TREE_CODE (op) != SSA_NAME)
10762 return NULL_TREE;
10764 value_range *vr = get_value_range (op);
10765 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
10766 || symbolic_range_p (vr))
10767 return NULL_TREE;
10769 if (vr->type == VR_RANGE)
10771 size_t i, j;
10772 /* Get the range of labels that contain a part of the operand's
10773 value range. */
10774 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
10776 /* Is there only one such label? */
10777 if (i == j)
10779 tree label = gimple_switch_label (switch_stmt, i);
10781 /* The i'th label will be taken only if the value range of the
10782 operand is entirely within the bounds of this label. */
10783 if (CASE_HIGH (label) != NULL_TREE
10784 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
10785 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
10786 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
10787 && tree_int_cst_equal (vr->min, vr->max)))
10788 return label;
10791 /* If there are no such labels then the default label will be
10792 taken. */
10793 if (i > j)
10794 return gimple_switch_label (switch_stmt, 0);
10797 if (vr->type == VR_ANTI_RANGE)
10799 unsigned n = gimple_switch_num_labels (switch_stmt);
10800 tree min_label = gimple_switch_label (switch_stmt, 1);
10801 tree max_label = gimple_switch_label (switch_stmt, n - 1);
10803 /* The default label will be taken only if the anti-range of the
10804 operand is entirely outside the bounds of all the (non-default)
10805 case labels. */
10806 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
10807 && (CASE_HIGH (max_label) != NULL_TREE
10808 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
10809 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
10810 return gimple_switch_label (switch_stmt, 0);
10813 return NULL_TREE;
10816 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10818 value_range new_vr = VR_INITIALIZER;
10819 tree lhs = gimple_assign_lhs (assign_stmt);
10821 if (TREE_CODE (lhs) == SSA_NAME
10822 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10823 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10825 extract_range_from_assignment (&new_vr, assign_stmt);
10826 if (range_int_cst_singleton_p (&new_vr))
10827 return new_vr.min;
10831 return NULL_TREE;
10834 /* Blocks which have more than one predecessor and more than
10835 one successor present jump threading opportunities, i.e.,
10836 when the block is reached from a specific predecessor, we
10837 may be able to determine which of the outgoing edges will
10838 be traversed. When this optimization applies, we are able
10839 to avoid conditionals at runtime and we may expose secondary
10840 optimization opportunities.
10842 This routine is effectively a driver for the generic jump
10843 threading code. It basically just presents the generic code
10844 with edges that may be suitable for jump threading.
10846 Unlike DOM, we do not iterate VRP if jump threading was successful.
10847 While iterating may expose new opportunities for VRP, it is expected
10848 those opportunities would be very limited and the compile time cost
10849 to expose those opportunities would be significant.
10851 As jump threading opportunities are discovered, they are registered
10852 for later realization. */
10854 static void
10855 identify_jump_threads (void)
10857 basic_block bb;
10858 gcond *dummy;
10859 int i;
10860 edge e;
10862 /* Ugh. When substituting values earlier in this pass we can
10863 wipe the dominance information. So rebuild the dominator
10864 information as we need it within the jump threading code. */
10865 calculate_dominance_info (CDI_DOMINATORS);
10867 /* We do not allow VRP information to be used for jump threading
10868 across a back edge in the CFG. Otherwise it becomes too
10869 difficult to avoid eliminating loop exit tests. Of course
10870 EDGE_DFS_BACK is not accurate at this time so we have to
10871 recompute it. */
10872 mark_dfs_back_edges ();
10874 /* Do not thread across edges we are about to remove. Just marking
10875 them as EDGE_IGNORE will do. */
10876 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10877 e->flags |= EDGE_IGNORE;
10879 /* Allocate our unwinder stack to unwind any temporary equivalences
10880 that might be recorded. */
10881 equiv_stack = new const_and_copies ();
10883 /* To avoid lots of silly node creation, we create a single
10884 conditional and just modify it in-place when attempting to
10885 thread jumps. */
10886 dummy = gimple_build_cond (EQ_EXPR,
10887 integer_zero_node, integer_zero_node,
10888 NULL, NULL);
10890 /* Walk through all the blocks finding those which present a
10891 potential jump threading opportunity. We could set this up
10892 as a dominator walker and record data during the walk, but
10893 I doubt it's worth the effort for the classes of jump
10894 threading opportunities we are trying to identify at this
10895 point in compilation. */
10896 FOR_EACH_BB_FN (bb, cfun)
10898 gimple *last;
10900 /* If the generic jump threading code does not find this block
10901 interesting, then there is nothing to do. */
10902 if (! potentially_threadable_block (bb))
10903 continue;
10905 last = last_stmt (bb);
10907 /* We're basically looking for a switch or any kind of conditional with
10908 integral or pointer type arguments. Note the type of the second
10909 argument will be the same as the first argument, so no need to
10910 check it explicitly.
10912 We also handle the case where there are no statements in the
10913 block. This come up with forwarder blocks that are not
10914 optimized away because they lead to a loop header. But we do
10915 want to thread through them as we can sometimes thread to the
10916 loop exit which is obviously profitable. */
10917 if (!last
10918 || gimple_code (last) == GIMPLE_SWITCH
10919 || (gimple_code (last) == GIMPLE_COND
10920 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10921 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10922 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10923 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10924 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10926 edge_iterator ei;
10928 /* We've got a block with multiple predecessors and multiple
10929 successors which also ends in a suitable conditional or
10930 switch statement. For each predecessor, see if we can thread
10931 it to a specific successor. */
10932 FOR_EACH_EDGE (e, ei, bb->preds)
10934 /* Do not thread across edges marked to ignoreor abnormal
10935 edges in the CFG. */
10936 if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
10937 continue;
10939 thread_across_edge (dummy, e, true, equiv_stack, NULL,
10940 simplify_stmt_for_jump_threading);
10945 /* Clear EDGE_IGNORE. */
10946 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10947 e->flags &= ~EDGE_IGNORE;
10949 /* We do not actually update the CFG or SSA graphs at this point as
10950 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10951 handle ASSERT_EXPRs gracefully. */
10954 /* We identified all the jump threading opportunities earlier, but could
10955 not transform the CFG at that time. This routine transforms the
10956 CFG and arranges for the dominator tree to be rebuilt if necessary.
10958 Note the SSA graph update will occur during the normal TODO
10959 processing by the pass manager. */
10960 static void
10961 finalize_jump_threads (void)
10963 thread_through_all_blocks (false);
10964 delete equiv_stack;
10967 /* Free VRP lattice. */
10969 static void
10970 vrp_free_lattice ()
10972 /* Free allocated memory. */
10973 free (vr_value);
10974 free (vr_phi_edge_counts);
10975 bitmap_obstack_release (&vrp_equiv_obstack);
10976 vrp_value_range_pool.release ();
10978 /* So that we can distinguish between VRP data being available
10979 and not available. */
10980 vr_value = NULL;
10981 vr_phi_edge_counts = NULL;
10984 /* Traverse all the blocks folding conditionals with known ranges. */
10986 static void
10987 vrp_finalize (bool warn_array_bounds_p)
10989 size_t i;
10991 values_propagated = true;
10993 if (dump_file)
10995 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10996 dump_all_value_ranges (dump_file);
10997 fprintf (dump_file, "\n");
11000 /* Set value range to non pointer SSA_NAMEs. */
11001 for (i = 0; i < num_vr_values; i++)
11002 if (vr_value[i])
11004 tree name = ssa_name (i);
11006 if (!name
11007 || (vr_value[i]->type == VR_VARYING)
11008 || (vr_value[i]->type == VR_UNDEFINED)
11009 || (TREE_CODE (vr_value[i]->min) != INTEGER_CST)
11010 || (TREE_CODE (vr_value[i]->max) != INTEGER_CST))
11011 continue;
11013 if (POINTER_TYPE_P (TREE_TYPE (name))
11014 && ((vr_value[i]->type == VR_RANGE
11015 && range_includes_zero_p (vr_value[i]->min,
11016 vr_value[i]->max) == 0)
11017 || (vr_value[i]->type == VR_ANTI_RANGE
11018 && range_includes_zero_p (vr_value[i]->min,
11019 vr_value[i]->max) == 1)))
11020 set_ptr_nonnull (name);
11021 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
11022 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
11023 vr_value[i]->max);
11026 substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
11028 if (warn_array_bounds && warn_array_bounds_p)
11029 check_all_array_refs ();
11031 /* We must identify jump threading opportunities before we release
11032 the datastructures built by VRP. */
11033 identify_jump_threads ();
11036 /* evrp_dom_walker visits the basic blocks in the dominance order and set
11037 the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
11038 discover more VRs. */
11040 class evrp_dom_walker : public dom_walker
11042 public:
11043 evrp_dom_walker ()
11044 : dom_walker (CDI_DOMINATORS), stack (10)
11046 need_eh_cleanup = BITMAP_ALLOC (NULL);
11048 ~evrp_dom_walker ()
11050 BITMAP_FREE (need_eh_cleanup);
11052 virtual edge before_dom_children (basic_block);
11053 virtual void after_dom_children (basic_block);
11054 void push_value_range (tree var, value_range *vr);
11055 value_range *pop_value_range (tree var);
11056 value_range *try_find_new_range (tree op, tree_code code, tree limit);
11058 /* Cond_stack holds the old VR. */
11059 auto_vec<std::pair <tree, value_range*> > stack;
11060 bitmap need_eh_cleanup;
11061 auto_vec<gimple *> stmts_to_fixup;
11062 auto_vec<gimple *> stmts_to_remove;
11065 /* Find new range for OP such that (OP CODE LIMIT) is true. */
11067 value_range *
11068 evrp_dom_walker::try_find_new_range (tree op, tree_code code, tree limit)
11070 value_range vr = VR_INITIALIZER;
11071 value_range *old_vr = get_value_range (op);
11073 /* Discover VR when condition is true. */
11074 extract_range_for_var_from_comparison_expr (op, code, op,
11075 limit, &vr);
11076 if (old_vr->type == VR_RANGE || old_vr->type == VR_ANTI_RANGE)
11077 vrp_intersect_ranges (&vr, old_vr);
11078 /* If we found any usable VR, set the VR to ssa_name and create a
11079 PUSH old value in the stack with the old VR. */
11080 if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
11082 if (old_vr->type == vr.type
11083 && vrp_operand_equal_p (old_vr->min, vr.min)
11084 && vrp_operand_equal_p (old_vr->max, vr.max))
11085 return NULL;
11086 value_range *new_vr = vrp_value_range_pool.allocate ();
11087 *new_vr = vr;
11088 return new_vr;
11090 return NULL;
11093 /* See if there is any new scope is entered with new VR and set that VR to
11094 ssa_name before visiting the statements in the scope. */
11096 edge
11097 evrp_dom_walker::before_dom_children (basic_block bb)
11099 tree op0 = NULL_TREE;
11100 edge_iterator ei;
11101 edge e;
11103 if (dump_file && (dump_flags & TDF_DETAILS))
11104 fprintf (dump_file, "Visiting BB%d\n", bb->index);
11106 stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
11108 edge pred_e = NULL;
11109 FOR_EACH_EDGE (e, ei, bb->preds)
11111 /* Ignore simple backedges from this to allow recording conditions
11112 in loop headers. */
11113 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
11114 continue;
11115 if (! pred_e)
11116 pred_e = e;
11117 else
11119 pred_e = NULL;
11120 break;
11123 if (pred_e)
11125 gimple *stmt = last_stmt (pred_e->src);
11126 if (stmt
11127 && gimple_code (stmt) == GIMPLE_COND
11128 && (op0 = gimple_cond_lhs (stmt))
11129 && TREE_CODE (op0) == SSA_NAME
11130 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
11131 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
11133 if (dump_file && (dump_flags & TDF_DETAILS))
11135 fprintf (dump_file, "Visiting controlling predicate ");
11136 print_gimple_stmt (dump_file, stmt, 0, 0);
11138 /* Entering a new scope. Try to see if we can find a VR
11139 here. */
11140 tree op1 = gimple_cond_rhs (stmt);
11141 tree_code code = gimple_cond_code (stmt);
11143 if (TREE_OVERFLOW_P (op1))
11144 op1 = drop_tree_overflow (op1);
11146 /* If condition is false, invert the cond. */
11147 if (pred_e->flags & EDGE_FALSE_VALUE)
11148 code = invert_tree_comparison (gimple_cond_code (stmt),
11149 HONOR_NANS (op0));
11150 /* Add VR when (OP0 CODE OP1) condition is true. */
11151 value_range *op0_range = try_find_new_range (op0, code, op1);
11153 /* Register ranges for y in x < y where
11154 y might have ranges that are useful. */
11155 tree limit;
11156 tree_code new_code;
11157 if (TREE_CODE (op1) == SSA_NAME
11158 && extract_code_and_val_from_cond_with_ops (op1, code,
11159 op0, op1,
11160 false,
11161 &new_code, &limit))
11163 /* Add VR when (OP1 NEW_CODE LIMIT) condition is true. */
11164 value_range *op1_range = try_find_new_range (op1, new_code, limit);
11165 if (op1_range)
11166 push_value_range (op1, op1_range);
11169 if (op0_range)
11170 push_value_range (op0, op0_range);
11174 /* Visit PHI stmts and discover any new VRs possible. */
11175 bool has_unvisited_preds = false;
11176 FOR_EACH_EDGE (e, ei, bb->preds)
11177 if (e->flags & EDGE_EXECUTABLE
11178 && !(e->src->flags & BB_VISITED))
11180 has_unvisited_preds = true;
11181 break;
11184 for (gphi_iterator gpi = gsi_start_phis (bb);
11185 !gsi_end_p (gpi); gsi_next (&gpi))
11187 gphi *phi = gpi.phi ();
11188 tree lhs = PHI_RESULT (phi);
11189 if (virtual_operand_p (lhs))
11190 continue;
11191 value_range vr_result = VR_INITIALIZER;
11192 bool interesting = stmt_interesting_for_vrp (phi);
11193 if (interesting && dump_file && (dump_flags & TDF_DETAILS))
11195 fprintf (dump_file, "Visiting PHI node ");
11196 print_gimple_stmt (dump_file, phi, 0, 0);
11198 if (!has_unvisited_preds
11199 && interesting)
11200 extract_range_from_phi_node (phi, &vr_result);
11201 else
11203 set_value_range_to_varying (&vr_result);
11204 /* When we have an unvisited executable predecessor we can't
11205 use PHI arg ranges which may be still UNDEFINED but have
11206 to use VARYING for them. But we can still resort to
11207 SCEV for loop header PHIs. */
11208 struct loop *l;
11209 if (interesting
11210 && (l = loop_containing_stmt (phi))
11211 && l->header == gimple_bb (phi))
11212 adjust_range_with_scev (&vr_result, l, phi, lhs);
11214 update_value_range (lhs, &vr_result);
11216 /* Mark PHIs whose lhs we fully propagate for removal. */
11217 tree val = op_with_constant_singleton_value_range (lhs);
11218 if (val && may_propagate_copy (lhs, val))
11220 stmts_to_remove.safe_push (phi);
11221 continue;
11224 /* Set the SSA with the value range. */
11225 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
11227 if ((vr_result.type == VR_RANGE
11228 || vr_result.type == VR_ANTI_RANGE)
11229 && (TREE_CODE (vr_result.min) == INTEGER_CST)
11230 && (TREE_CODE (vr_result.max) == INTEGER_CST))
11231 set_range_info (lhs,
11232 vr_result.type, vr_result.min, vr_result.max);
11234 else if (POINTER_TYPE_P (TREE_TYPE (lhs))
11235 && ((vr_result.type == VR_RANGE
11236 && range_includes_zero_p (vr_result.min,
11237 vr_result.max) == 0)
11238 || (vr_result.type == VR_ANTI_RANGE
11239 && range_includes_zero_p (vr_result.min,
11240 vr_result.max) == 1)))
11241 set_ptr_nonnull (lhs);
11244 edge taken_edge = NULL;
11246 /* Visit all other stmts and discover any new VRs possible. */
11247 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
11248 !gsi_end_p (gsi); gsi_next (&gsi))
11250 gimple *stmt = gsi_stmt (gsi);
11251 tree output = NULL_TREE;
11252 gimple *old_stmt = stmt;
11253 bool was_noreturn = (is_gimple_call (stmt)
11254 && gimple_call_noreturn_p (stmt));
11256 if (dump_file && (dump_flags & TDF_DETAILS))
11258 fprintf (dump_file, "Visiting stmt ");
11259 print_gimple_stmt (dump_file, stmt, 0, 0);
11262 if (gcond *cond = dyn_cast <gcond *> (stmt))
11264 vrp_visit_cond_stmt (cond, &taken_edge);
11265 if (taken_edge)
11267 if (taken_edge->flags & EDGE_TRUE_VALUE)
11268 gimple_cond_make_true (cond);
11269 else if (taken_edge->flags & EDGE_FALSE_VALUE)
11270 gimple_cond_make_false (cond);
11271 else
11272 gcc_unreachable ();
11273 update_stmt (stmt);
11276 else if (stmt_interesting_for_vrp (stmt))
11278 edge taken_edge;
11279 value_range vr = VR_INITIALIZER;
11280 extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
11281 if (output
11282 && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
11284 update_value_range (output, &vr);
11285 vr = *get_value_range (output);
11287 /* Mark stmts whose output we fully propagate for removal. */
11288 tree val;
11289 if ((val = op_with_constant_singleton_value_range (output))
11290 && may_propagate_copy (output, val)
11291 && !stmt_could_throw_p (stmt)
11292 && !gimple_has_side_effects (stmt))
11294 stmts_to_remove.safe_push (stmt);
11295 continue;
11298 /* Set the SSA with the value range. */
11299 if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
11301 if ((vr.type == VR_RANGE
11302 || vr.type == VR_ANTI_RANGE)
11303 && (TREE_CODE (vr.min) == INTEGER_CST)
11304 && (TREE_CODE (vr.max) == INTEGER_CST))
11305 set_range_info (output, vr.type, vr.min, vr.max);
11307 else if (POINTER_TYPE_P (TREE_TYPE (output))
11308 && ((vr.type == VR_RANGE
11309 && range_includes_zero_p (vr.min,
11310 vr.max) == 0)
11311 || (vr.type == VR_ANTI_RANGE
11312 && range_includes_zero_p (vr.min,
11313 vr.max) == 1)))
11314 set_ptr_nonnull (output);
11316 else
11317 set_defs_to_varying (stmt);
11319 else
11320 set_defs_to_varying (stmt);
11322 /* See if we can derive a range for any of STMT's operands. */
11323 tree op;
11324 ssa_op_iter i;
11325 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
11327 tree value;
11328 enum tree_code comp_code;
11330 /* If OP is used in such a way that we can infer a value
11331 range for it, and we don't find a previous assertion for
11332 it, create a new assertion location node for OP. */
11333 if (infer_value_range (stmt, op, &comp_code, &value))
11335 /* If we are able to infer a nonzero value range for OP,
11336 then walk backwards through the use-def chain to see if OP
11337 was set via a typecast.
11338 If so, then we can also infer a nonzero value range
11339 for the operand of the NOP_EXPR. */
11340 if (comp_code == NE_EXPR && integer_zerop (value))
11342 tree t = op;
11343 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
11344 while (is_gimple_assign (def_stmt)
11345 && CONVERT_EXPR_CODE_P
11346 (gimple_assign_rhs_code (def_stmt))
11347 && TREE_CODE
11348 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
11349 && POINTER_TYPE_P
11350 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
11352 t = gimple_assign_rhs1 (def_stmt);
11353 def_stmt = SSA_NAME_DEF_STMT (t);
11355 /* Add VR when (T COMP_CODE value) condition is
11356 true. */
11357 value_range *op_range
11358 = try_find_new_range (t, comp_code, value);
11359 if (op_range)
11360 push_value_range (t, op_range);
11363 /* Add VR when (OP COMP_CODE value) condition is true. */
11364 value_range *op_range = try_find_new_range (op,
11365 comp_code, value);
11366 if (op_range)
11367 push_value_range (op, op_range);
11371 /* Try folding stmts with the VR discovered. */
11372 bool did_replace
11373 = replace_uses_in (stmt, op_with_constant_singleton_value_range);
11374 if (fold_stmt (&gsi, follow_single_use_edges)
11375 || did_replace)
11377 stmt = gsi_stmt (gsi);
11378 update_stmt (stmt);
11379 did_replace = true;
11382 if (did_replace)
11384 /* If we cleaned up EH information from the statement,
11385 remove EH edges. */
11386 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
11387 bitmap_set_bit (need_eh_cleanup, bb->index);
11389 /* If we turned a not noreturn call into a noreturn one
11390 schedule it for fixup. */
11391 if (!was_noreturn
11392 && is_gimple_call (stmt)
11393 && gimple_call_noreturn_p (stmt))
11394 stmts_to_fixup.safe_push (stmt);
11396 if (gimple_assign_single_p (stmt))
11398 tree rhs = gimple_assign_rhs1 (stmt);
11399 if (TREE_CODE (rhs) == ADDR_EXPR)
11400 recompute_tree_invariant_for_addr_expr (rhs);
11405 /* Visit BB successor PHI nodes and replace PHI args. */
11406 FOR_EACH_EDGE (e, ei, bb->succs)
11408 for (gphi_iterator gpi = gsi_start_phis (e->dest);
11409 !gsi_end_p (gpi); gsi_next (&gpi))
11411 gphi *phi = gpi.phi ();
11412 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
11413 tree arg = USE_FROM_PTR (use_p);
11414 if (TREE_CODE (arg) != SSA_NAME
11415 || virtual_operand_p (arg))
11416 continue;
11417 tree val = op_with_constant_singleton_value_range (arg);
11418 if (val && may_propagate_copy (arg, val))
11419 propagate_value (use_p, val);
11423 bb->flags |= BB_VISITED;
11425 return taken_edge;
11428 /* Restore/pop VRs valid only for BB when we leave BB. */
11430 void
11431 evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
11433 gcc_checking_assert (!stack.is_empty ());
11434 while (stack.last ().first != NULL_TREE)
11435 pop_value_range (stack.last ().first);
11436 stack.pop ();
11439 /* Push the Value Range of VAR to the stack and update it with new VR. */
11441 void
11442 evrp_dom_walker::push_value_range (tree var, value_range *vr)
11444 if (SSA_NAME_VERSION (var) >= num_vr_values)
11445 return;
11446 if (dump_file && (dump_flags & TDF_DETAILS))
11448 fprintf (dump_file, "pushing new range for ");
11449 print_generic_expr (dump_file, var, 0);
11450 fprintf (dump_file, ": ");
11451 dump_value_range (dump_file, vr);
11452 fprintf (dump_file, "\n");
11454 stack.safe_push (std::make_pair (var, get_value_range (var)));
11455 vr_value[SSA_NAME_VERSION (var)] = vr;
11458 /* Pop the Value Range from the vrp_stack and update VAR with it. */
11460 value_range *
11461 evrp_dom_walker::pop_value_range (tree var)
11463 value_range *vr = stack.last ().second;
11464 gcc_checking_assert (var == stack.last ().first);
11465 if (dump_file && (dump_flags & TDF_DETAILS))
11467 fprintf (dump_file, "popping range for ");
11468 print_generic_expr (dump_file, var, 0);
11469 fprintf (dump_file, ", restoring ");
11470 dump_value_range (dump_file, vr);
11471 fprintf (dump_file, "\n");
11473 vr_value[SSA_NAME_VERSION (var)] = vr;
11474 stack.pop ();
11475 return vr;
11479 /* Main entry point for the early vrp pass which is a simplified non-iterative
11480 version of vrp where basic blocks are visited in dominance order. Value
11481 ranges discovered in early vrp will also be used by ipa-vrp. */
11483 static unsigned int
11484 execute_early_vrp ()
11486 edge e;
11487 edge_iterator ei;
11488 basic_block bb;
11490 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11491 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11492 scev_initialize ();
11493 calculate_dominance_info (CDI_DOMINATORS);
11494 FOR_EACH_BB_FN (bb, cfun)
11496 bb->flags &= ~BB_VISITED;
11497 FOR_EACH_EDGE (e, ei, bb->preds)
11498 e->flags |= EDGE_EXECUTABLE;
11500 vrp_initialize_lattice ();
11502 /* Walk stmts in dominance order and propagate VRP. */
11503 evrp_dom_walker walker;
11504 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11506 if (dump_file)
11508 fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
11509 dump_all_value_ranges (dump_file);
11510 fprintf (dump_file, "\n");
11513 /* Remove stmts in reverse order to make debug stmt creation possible. */
11514 while (! walker.stmts_to_remove.is_empty ())
11516 gimple *stmt = walker.stmts_to_remove.pop ();
11517 if (dump_file && dump_flags & TDF_DETAILS)
11519 fprintf (dump_file, "Removing dead stmt ");
11520 print_gimple_stmt (dump_file, stmt, 0, 0);
11521 fprintf (dump_file, "\n");
11523 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
11524 if (gimple_code (stmt) == GIMPLE_PHI)
11525 remove_phi_node (&gsi, true);
11526 else
11528 unlink_stmt_vdef (stmt);
11529 gsi_remove (&gsi, true);
11530 release_defs (stmt);
11534 if (!bitmap_empty_p (walker.need_eh_cleanup))
11535 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
11537 /* Fixup stmts that became noreturn calls. This may require splitting
11538 blocks and thus isn't possible during the dominator walk. Do this
11539 in reverse order so we don't inadvertedly remove a stmt we want to
11540 fixup by visiting a dominating now noreturn call first. */
11541 while (!walker.stmts_to_fixup.is_empty ())
11543 gimple *stmt = walker.stmts_to_fixup.pop ();
11544 fixup_noreturn_call (stmt);
11547 vrp_free_lattice ();
11548 scev_finalize ();
11549 loop_optimizer_finalize ();
11550 return 0;
11554 /* Main entry point to VRP (Value Range Propagation). This pass is
11555 loosely based on J. R. C. Patterson, ``Accurate Static Branch
11556 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
11557 Programming Language Design and Implementation, pp. 67-78, 1995.
11558 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
11560 This is essentially an SSA-CCP pass modified to deal with ranges
11561 instead of constants.
11563 While propagating ranges, we may find that two or more SSA name
11564 have equivalent, though distinct ranges. For instance,
11566 1 x_9 = p_3->a;
11567 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
11568 3 if (p_4 == q_2)
11569 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
11570 5 endif
11571 6 if (q_2)
11573 In the code above, pointer p_5 has range [q_2, q_2], but from the
11574 code we can also determine that p_5 cannot be NULL and, if q_2 had
11575 a non-varying range, p_5's range should also be compatible with it.
11577 These equivalences are created by two expressions: ASSERT_EXPR and
11578 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
11579 result of another assertion, then we can use the fact that p_5 and
11580 p_4 are equivalent when evaluating p_5's range.
11582 Together with value ranges, we also propagate these equivalences
11583 between names so that we can take advantage of information from
11584 multiple ranges when doing final replacement. Note that this
11585 equivalency relation is transitive but not symmetric.
11587 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
11588 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
11589 in contexts where that assertion does not hold (e.g., in line 6).
11591 TODO, the main difference between this pass and Patterson's is that
11592 we do not propagate edge probabilities. We only compute whether
11593 edges can be taken or not. That is, instead of having a spectrum
11594 of jump probabilities between 0 and 1, we only deal with 0, 1 and
11595 DON'T KNOW. In the future, it may be worthwhile to propagate
11596 probabilities to aid branch prediction. */
11598 static unsigned int
11599 execute_vrp (bool warn_array_bounds_p)
11601 int i;
11602 edge e;
11603 switch_update *su;
11605 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11606 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11607 scev_initialize ();
11609 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
11610 Inserting assertions may split edges which will invalidate
11611 EDGE_DFS_BACK. */
11612 insert_range_assertions ();
11614 to_remove_edges.create (10);
11615 to_update_switch_stmts.create (5);
11616 threadedge_initialize_values ();
11618 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
11619 mark_dfs_back_edges ();
11621 vrp_initialize_lattice ();
11622 vrp_initialize ();
11623 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
11624 vrp_finalize (warn_array_bounds_p);
11625 vrp_free_lattice ();
11627 free_numbers_of_iterations_estimates (cfun);
11629 /* ASSERT_EXPRs must be removed before finalizing jump threads
11630 as finalizing jump threads calls the CFG cleanup code which
11631 does not properly handle ASSERT_EXPRs. */
11632 remove_range_assertions ();
11634 /* If we exposed any new variables, go ahead and put them into
11635 SSA form now, before we handle jump threading. This simplifies
11636 interactions between rewriting of _DECL nodes into SSA form
11637 and rewriting SSA_NAME nodes into SSA form after block
11638 duplication and CFG manipulation. */
11639 update_ssa (TODO_update_ssa);
11641 finalize_jump_threads ();
11643 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
11644 CFG in a broken state and requires a cfg_cleanup run. */
11645 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
11646 remove_edge (e);
11647 /* Update SWITCH_EXPR case label vector. */
11648 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
11650 size_t j;
11651 size_t n = TREE_VEC_LENGTH (su->vec);
11652 tree label;
11653 gimple_switch_set_num_labels (su->stmt, n);
11654 for (j = 0; j < n; j++)
11655 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
11656 /* As we may have replaced the default label with a regular one
11657 make sure to make it a real default label again. This ensures
11658 optimal expansion. */
11659 label = gimple_switch_label (su->stmt, 0);
11660 CASE_LOW (label) = NULL_TREE;
11661 CASE_HIGH (label) = NULL_TREE;
11664 if (to_remove_edges.length () > 0)
11666 free_dominance_info (CDI_DOMINATORS);
11667 loops_state_set (LOOPS_NEED_FIXUP);
11670 to_remove_edges.release ();
11671 to_update_switch_stmts.release ();
11672 threadedge_finalize_values ();
11674 scev_finalize ();
11675 loop_optimizer_finalize ();
11676 return 0;
11679 namespace {
11681 const pass_data pass_data_vrp =
11683 GIMPLE_PASS, /* type */
11684 "vrp", /* name */
11685 OPTGROUP_NONE, /* optinfo_flags */
11686 TV_TREE_VRP, /* tv_id */
11687 PROP_ssa, /* properties_required */
11688 0, /* properties_provided */
11689 0, /* properties_destroyed */
11690 0, /* todo_flags_start */
11691 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
11694 class pass_vrp : public gimple_opt_pass
11696 public:
11697 pass_vrp (gcc::context *ctxt)
11698 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
11701 /* opt_pass methods: */
11702 opt_pass * clone () { return new pass_vrp (m_ctxt); }
11703 void set_pass_param (unsigned int n, bool param)
11705 gcc_assert (n == 0);
11706 warn_array_bounds_p = param;
11708 virtual bool gate (function *) { return flag_tree_vrp != 0; }
11709 virtual unsigned int execute (function *)
11710 { return execute_vrp (warn_array_bounds_p); }
11712 private:
11713 bool warn_array_bounds_p;
11714 }; // class pass_vrp
11716 } // anon namespace
11718 gimple_opt_pass *
11719 make_pass_vrp (gcc::context *ctxt)
11721 return new pass_vrp (ctxt);
11724 namespace {
11726 const pass_data pass_data_early_vrp =
11728 GIMPLE_PASS, /* type */
11729 "evrp", /* name */
11730 OPTGROUP_NONE, /* optinfo_flags */
11731 TV_TREE_EARLY_VRP, /* tv_id */
11732 PROP_ssa, /* properties_required */
11733 0, /* properties_provided */
11734 0, /* properties_destroyed */
11735 0, /* todo_flags_start */
11736 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
11739 class pass_early_vrp : public gimple_opt_pass
11741 public:
11742 pass_early_vrp (gcc::context *ctxt)
11743 : gimple_opt_pass (pass_data_early_vrp, ctxt)
11746 /* opt_pass methods: */
11747 opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
11748 virtual bool gate (function *)
11750 return flag_tree_vrp != 0;
11752 virtual unsigned int execute (function *)
11753 { return execute_early_vrp (); }
11755 }; // class pass_vrp
11756 } // anon namespace
11758 gimple_opt_pass *
11759 make_pass_early_vrp (gcc::context *ctxt)
11761 return new pass_early_vrp (ctxt);