* g++.dg/debug/dwarf2/pr44641.C: Revert line number change. Remove
[official-gcc.git] / gcc / tree-vrp.c
blob2cd71a2eab5abb2d227a74f6ba05c9d19018ed50
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "rtl.h"
29 #include "ssa.h"
30 #include "flags.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "calls.h"
35 #include "cfganal.h"
36 #include "internal-fn.h"
37 #include "gimple-fold.h"
38 #include "tree-eh.h"
39 #include "gimple-iterator.h"
40 #include "gimple-walk.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-into-ssa.h"
46 #include "tree-ssa.h"
47 #include "tree-pass.h"
48 #include "tree-dump.h"
49 #include "gimple-pretty-print.h"
50 #include "diagnostic-core.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "insn-codes.h"
58 #include "optabs-tree.h"
59 #include "tree-ssa-scopedtables.h"
60 #include "tree-ssa-threadedge.h"
64 /* Range of values that can be associated with an SSA_NAME after VRP
65 has executed. */
66 struct value_range_d
68 /* Lattice value represented by this range. */
69 enum value_range_type type;
71 /* Minimum and maximum values represented by this range. These
72 values should be interpreted as follows:
74 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
75 be NULL.
77 - If TYPE == VR_RANGE then MIN holds the minimum value and
78 MAX holds the maximum value of the range [MIN, MAX].
80 - If TYPE == ANTI_RANGE the variable is known to NOT
81 take any values in the range [MIN, MAX]. */
82 tree min;
83 tree max;
85 /* Set of SSA names whose value ranges are equivalent to this one.
86 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
87 bitmap equiv;
90 typedef struct value_range_d value_range_t;
92 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
94 /* Set of SSA names found live during the RPO traversal of the function
95 for still active basic-blocks. */
96 static sbitmap *live;
98 /* Return true if the SSA name NAME is live on the edge E. */
100 static bool
101 live_on_edge (edge e, tree name)
103 return (live[e->dest->index]
104 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
107 /* Local functions. */
108 static int compare_values (tree val1, tree val2);
109 static int compare_values_warnv (tree val1, tree val2, bool *);
110 static void vrp_meet (value_range_t *, value_range_t *);
111 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
112 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
113 tree, tree, bool, bool *,
114 bool *);
116 /* Location information for ASSERT_EXPRs. Each instance of this
117 structure describes an ASSERT_EXPR for an SSA name. Since a single
118 SSA name may have more than one assertion associated with it, these
119 locations are kept in a linked list attached to the corresponding
120 SSA name. */
121 struct assert_locus
123 /* Basic block where the assertion would be inserted. */
124 basic_block bb;
126 /* Some assertions need to be inserted on an edge (e.g., assertions
127 generated by COND_EXPRs). In those cases, BB will be NULL. */
128 edge e;
130 /* Pointer to the statement that generated this assertion. */
131 gimple_stmt_iterator si;
133 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
134 enum tree_code comp_code;
136 /* Value being compared against. */
137 tree val;
139 /* Expression to compare. */
140 tree expr;
142 /* Next node in the linked list. */
143 assert_locus *next;
146 /* If bit I is present, it means that SSA name N_i has a list of
147 assertions that should be inserted in the IL. */
148 static bitmap need_assert_for;
150 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
151 holds a list of ASSERT_LOCUS_T nodes that describe where
152 ASSERT_EXPRs for SSA name N_I should be inserted. */
153 static assert_locus **asserts_for;
155 /* Value range array. After propagation, VR_VALUE[I] holds the range
156 of values that SSA name N_I may take. */
157 static unsigned num_vr_values;
158 static value_range_t **vr_value;
159 static bool values_propagated;
161 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
162 number of executable edges we saw the last time we visited the
163 node. */
164 static int *vr_phi_edge_counts;
166 struct switch_update {
167 gswitch *stmt;
168 tree vec;
171 static vec<edge> to_remove_edges;
172 static vec<switch_update> to_update_switch_stmts;
175 /* Return the maximum value for TYPE. */
177 static inline tree
178 vrp_val_max (const_tree type)
180 if (!INTEGRAL_TYPE_P (type))
181 return NULL_TREE;
183 return TYPE_MAX_VALUE (type);
186 /* Return the minimum value for TYPE. */
188 static inline tree
189 vrp_val_min (const_tree type)
191 if (!INTEGRAL_TYPE_P (type))
192 return NULL_TREE;
194 return TYPE_MIN_VALUE (type);
197 /* Return whether VAL is equal to the maximum value of its type. This
198 will be true for a positive overflow infinity. We can't do a
199 simple equality comparison with TYPE_MAX_VALUE because C typedefs
200 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
201 to the integer constant with the same value in the type. */
203 static inline bool
204 vrp_val_is_max (const_tree val)
206 tree type_max = vrp_val_max (TREE_TYPE (val));
207 return (val == type_max
208 || (type_max != NULL_TREE
209 && operand_equal_p (val, type_max, 0)));
212 /* Return whether VAL is equal to the minimum value of its type. This
213 will be true for a negative overflow infinity. */
215 static inline bool
216 vrp_val_is_min (const_tree val)
218 tree type_min = vrp_val_min (TREE_TYPE (val));
219 return (val == type_min
220 || (type_min != NULL_TREE
221 && operand_equal_p (val, type_min, 0)));
225 /* Return whether TYPE should use an overflow infinity distinct from
226 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
227 represent a signed overflow during VRP computations. An infinity
228 is distinct from a half-range, which will go from some number to
229 TYPE_{MIN,MAX}_VALUE. */
231 static inline bool
232 needs_overflow_infinity (const_tree type)
234 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
237 /* Return whether TYPE can support our overflow infinity
238 representation: we use the TREE_OVERFLOW flag, which only exists
239 for constants. If TYPE doesn't support this, we don't optimize
240 cases which would require signed overflow--we drop them to
241 VARYING. */
243 static inline bool
244 supports_overflow_infinity (const_tree type)
246 tree min = vrp_val_min (type), max = vrp_val_max (type);
247 #ifdef ENABLE_CHECKING
248 gcc_assert (needs_overflow_infinity (type));
249 #endif
250 return (min != NULL_TREE
251 && CONSTANT_CLASS_P (min)
252 && max != NULL_TREE
253 && CONSTANT_CLASS_P (max));
256 /* VAL is the maximum or minimum value of a type. Return a
257 corresponding overflow infinity. */
259 static inline tree
260 make_overflow_infinity (tree val)
262 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
263 val = copy_node (val);
264 TREE_OVERFLOW (val) = 1;
265 return val;
268 /* Return a negative overflow infinity for TYPE. */
270 static inline tree
271 negative_overflow_infinity (tree type)
273 gcc_checking_assert (supports_overflow_infinity (type));
274 return make_overflow_infinity (vrp_val_min (type));
277 /* Return a positive overflow infinity for TYPE. */
279 static inline tree
280 positive_overflow_infinity (tree type)
282 gcc_checking_assert (supports_overflow_infinity (type));
283 return make_overflow_infinity (vrp_val_max (type));
286 /* Return whether VAL is a negative overflow infinity. */
288 static inline bool
289 is_negative_overflow_infinity (const_tree val)
291 return (TREE_OVERFLOW_P (val)
292 && needs_overflow_infinity (TREE_TYPE (val))
293 && vrp_val_is_min (val));
296 /* Return whether VAL is a positive overflow infinity. */
298 static inline bool
299 is_positive_overflow_infinity (const_tree val)
301 return (TREE_OVERFLOW_P (val)
302 && needs_overflow_infinity (TREE_TYPE (val))
303 && vrp_val_is_max (val));
306 /* Return whether VAL is a positive or negative overflow infinity. */
308 static inline bool
309 is_overflow_infinity (const_tree val)
311 return (TREE_OVERFLOW_P (val)
312 && needs_overflow_infinity (TREE_TYPE (val))
313 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
316 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
318 static inline bool
319 stmt_overflow_infinity (gimple *stmt)
321 if (is_gimple_assign (stmt)
322 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
323 GIMPLE_SINGLE_RHS)
324 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
325 return false;
328 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
329 the same value with TREE_OVERFLOW clear. This can be used to avoid
330 confusing a regular value with an overflow value. */
332 static inline tree
333 avoid_overflow_infinity (tree val)
335 if (!is_overflow_infinity (val))
336 return val;
338 if (vrp_val_is_max (val))
339 return vrp_val_max (TREE_TYPE (val));
340 else
342 gcc_checking_assert (vrp_val_is_min (val));
343 return vrp_val_min (TREE_TYPE (val));
348 /* Set value range VR to VR_UNDEFINED. */
350 static inline void
351 set_value_range_to_undefined (value_range_t *vr)
353 vr->type = VR_UNDEFINED;
354 vr->min = vr->max = NULL_TREE;
355 if (vr->equiv)
356 bitmap_clear (vr->equiv);
360 /* Set value range VR to VR_VARYING. */
362 static inline void
363 set_value_range_to_varying (value_range_t *vr)
365 vr->type = VR_VARYING;
366 vr->min = vr->max = NULL_TREE;
367 if (vr->equiv)
368 bitmap_clear (vr->equiv);
372 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
374 static void
375 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
376 tree max, bitmap equiv)
378 #if defined ENABLE_CHECKING
379 /* Check the validity of the range. */
380 if (t == VR_RANGE || t == VR_ANTI_RANGE)
382 int cmp;
384 gcc_assert (min && max);
386 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
387 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
389 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
390 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
392 cmp = compare_values (min, max);
393 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
395 if (needs_overflow_infinity (TREE_TYPE (min)))
396 gcc_assert (!is_overflow_infinity (min)
397 || !is_overflow_infinity (max));
400 if (t == VR_UNDEFINED || t == VR_VARYING)
401 gcc_assert (min == NULL_TREE && max == NULL_TREE);
403 if (t == VR_UNDEFINED || t == VR_VARYING)
404 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
405 #endif
407 vr->type = t;
408 vr->min = min;
409 vr->max = max;
411 /* Since updating the equivalence set involves deep copying the
412 bitmaps, only do it if absolutely necessary. */
413 if (vr->equiv == NULL
414 && equiv != NULL)
415 vr->equiv = BITMAP_ALLOC (NULL);
417 if (equiv != vr->equiv)
419 if (equiv && !bitmap_empty_p (equiv))
420 bitmap_copy (vr->equiv, equiv);
421 else
422 bitmap_clear (vr->equiv);
427 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
428 This means adjusting T, MIN and MAX representing the case of a
429 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
430 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
431 In corner cases where MAX+1 or MIN-1 wraps this will fall back
432 to varying.
433 This routine exists to ease canonicalization in the case where we
434 extract ranges from var + CST op limit. */
436 static void
437 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
438 tree min, tree max, bitmap equiv)
440 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
441 if (t == VR_UNDEFINED)
443 set_value_range_to_undefined (vr);
444 return;
446 else if (t == VR_VARYING)
448 set_value_range_to_varying (vr);
449 return;
452 /* Nothing to canonicalize for symbolic ranges. */
453 if (TREE_CODE (min) != INTEGER_CST
454 || TREE_CODE (max) != INTEGER_CST)
456 set_value_range (vr, t, min, max, equiv);
457 return;
460 /* Wrong order for min and max, to swap them and the VR type we need
461 to adjust them. */
462 if (tree_int_cst_lt (max, min))
464 tree one, tmp;
466 /* For one bit precision if max < min, then the swapped
467 range covers all values, so for VR_RANGE it is varying and
468 for VR_ANTI_RANGE empty range, so drop to varying as well. */
469 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
471 set_value_range_to_varying (vr);
472 return;
475 one = build_int_cst (TREE_TYPE (min), 1);
476 tmp = int_const_binop (PLUS_EXPR, max, one);
477 max = int_const_binop (MINUS_EXPR, min, one);
478 min = tmp;
480 /* There's one corner case, if we had [C+1, C] before we now have
481 that again. But this represents an empty value range, so drop
482 to varying in this case. */
483 if (tree_int_cst_lt (max, min))
485 set_value_range_to_varying (vr);
486 return;
489 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
492 /* Anti-ranges that can be represented as ranges should be so. */
493 if (t == VR_ANTI_RANGE)
495 bool is_min = vrp_val_is_min (min);
496 bool is_max = vrp_val_is_max (max);
498 if (is_min && is_max)
500 /* We cannot deal with empty ranges, drop to varying.
501 ??? This could be VR_UNDEFINED instead. */
502 set_value_range_to_varying (vr);
503 return;
505 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
506 && (is_min || is_max))
508 /* Non-empty boolean ranges can always be represented
509 as a singleton range. */
510 if (is_min)
511 min = max = vrp_val_max (TREE_TYPE (min));
512 else
513 min = max = vrp_val_min (TREE_TYPE (min));
514 t = VR_RANGE;
516 else if (is_min
517 /* As a special exception preserve non-null ranges. */
518 && !(TYPE_UNSIGNED (TREE_TYPE (min))
519 && integer_zerop (max)))
521 tree one = build_int_cst (TREE_TYPE (max), 1);
522 min = int_const_binop (PLUS_EXPR, max, one);
523 max = vrp_val_max (TREE_TYPE (max));
524 t = VR_RANGE;
526 else if (is_max)
528 tree one = build_int_cst (TREE_TYPE (min), 1);
529 max = int_const_binop (MINUS_EXPR, min, one);
530 min = vrp_val_min (TREE_TYPE (min));
531 t = VR_RANGE;
535 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
536 if (needs_overflow_infinity (TREE_TYPE (min))
537 && is_overflow_infinity (min)
538 && is_overflow_infinity (max))
540 set_value_range_to_varying (vr);
541 return;
544 set_value_range (vr, t, min, max, equiv);
547 /* Copy value range FROM into value range TO. */
549 static inline void
550 copy_value_range (value_range_t *to, value_range_t *from)
552 set_value_range (to, from->type, from->min, from->max, from->equiv);
555 /* Set value range VR to a single value. This function is only called
556 with values we get from statements, and exists to clear the
557 TREE_OVERFLOW flag so that we don't think we have an overflow
558 infinity when we shouldn't. */
560 static inline void
561 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
563 gcc_assert (is_gimple_min_invariant (val));
564 if (TREE_OVERFLOW_P (val))
565 val = drop_tree_overflow (val);
566 set_value_range (vr, VR_RANGE, val, val, equiv);
569 /* Set value range VR to a non-negative range of type TYPE.
570 OVERFLOW_INFINITY indicates whether to use an overflow infinity
571 rather than TYPE_MAX_VALUE; this should be true if we determine
572 that the range is nonnegative based on the assumption that signed
573 overflow does not occur. */
575 static inline void
576 set_value_range_to_nonnegative (value_range_t *vr, tree type,
577 bool overflow_infinity)
579 tree zero;
581 if (overflow_infinity && !supports_overflow_infinity (type))
583 set_value_range_to_varying (vr);
584 return;
587 zero = build_int_cst (type, 0);
588 set_value_range (vr, VR_RANGE, zero,
589 (overflow_infinity
590 ? positive_overflow_infinity (type)
591 : TYPE_MAX_VALUE (type)),
592 vr->equiv);
595 /* Set value range VR to a non-NULL range of type TYPE. */
597 static inline void
598 set_value_range_to_nonnull (value_range_t *vr, tree type)
600 tree zero = build_int_cst (type, 0);
601 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
605 /* Set value range VR to a NULL range of type TYPE. */
607 static inline void
608 set_value_range_to_null (value_range_t *vr, tree type)
610 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
614 /* Set value range VR to a range of a truthvalue of type TYPE. */
616 static inline void
617 set_value_range_to_truthvalue (value_range_t *vr, tree type)
619 if (TYPE_PRECISION (type) == 1)
620 set_value_range_to_varying (vr);
621 else
622 set_value_range (vr, VR_RANGE,
623 build_int_cst (type, 0), build_int_cst (type, 1),
624 vr->equiv);
628 /* If abs (min) < abs (max), set VR to [-max, max], if
629 abs (min) >= abs (max), set VR to [-min, min]. */
631 static void
632 abs_extent_range (value_range_t *vr, tree min, tree max)
634 int cmp;
636 gcc_assert (TREE_CODE (min) == INTEGER_CST);
637 gcc_assert (TREE_CODE (max) == INTEGER_CST);
638 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
639 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
640 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
641 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
642 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
644 set_value_range_to_varying (vr);
645 return;
647 cmp = compare_values (min, max);
648 if (cmp == -1)
649 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
650 else if (cmp == 0 || cmp == 1)
652 max = min;
653 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
655 else
657 set_value_range_to_varying (vr);
658 return;
660 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
664 /* Return value range information for VAR.
666 If we have no values ranges recorded (ie, VRP is not running), then
667 return NULL. Otherwise create an empty range if none existed for VAR. */
669 static value_range_t *
670 get_value_range (const_tree var)
672 static const struct value_range_d vr_const_varying
673 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
674 value_range_t *vr;
675 tree sym;
676 unsigned ver = SSA_NAME_VERSION (var);
678 /* If we have no recorded ranges, then return NULL. */
679 if (! vr_value)
680 return NULL;
682 /* If we query the range for a new SSA name return an unmodifiable VARYING.
683 We should get here at most from the substitute-and-fold stage which
684 will never try to change values. */
685 if (ver >= num_vr_values)
686 return CONST_CAST (value_range_t *, &vr_const_varying);
688 vr = vr_value[ver];
689 if (vr)
690 return vr;
692 /* After propagation finished do not allocate new value-ranges. */
693 if (values_propagated)
694 return CONST_CAST (value_range_t *, &vr_const_varying);
696 /* Create a default value range. */
697 vr_value[ver] = vr = XCNEW (value_range_t);
699 /* Defer allocating the equivalence set. */
700 vr->equiv = NULL;
702 /* If VAR is a default definition of a parameter, the variable can
703 take any value in VAR's type. */
704 if (SSA_NAME_IS_DEFAULT_DEF (var))
706 sym = SSA_NAME_VAR (var);
707 if (TREE_CODE (sym) == PARM_DECL)
709 /* Try to use the "nonnull" attribute to create ~[0, 0]
710 anti-ranges for pointers. Note that this is only valid with
711 default definitions of PARM_DECLs. */
712 if (POINTER_TYPE_P (TREE_TYPE (sym))
713 && nonnull_arg_p (sym))
714 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
715 else
716 set_value_range_to_varying (vr);
718 else if (TREE_CODE (sym) == RESULT_DECL
719 && DECL_BY_REFERENCE (sym))
720 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
723 return vr;
726 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
728 static inline bool
729 vrp_operand_equal_p (const_tree val1, const_tree val2)
731 if (val1 == val2)
732 return true;
733 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
734 return false;
735 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
738 /* Return true, if the bitmaps B1 and B2 are equal. */
740 static inline bool
741 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
743 return (b1 == b2
744 || ((!b1 || bitmap_empty_p (b1))
745 && (!b2 || bitmap_empty_p (b2)))
746 || (b1 && b2
747 && bitmap_equal_p (b1, b2)));
750 /* Update the value range and equivalence set for variable VAR to
751 NEW_VR. Return true if NEW_VR is different from VAR's previous
752 value.
754 NOTE: This function assumes that NEW_VR is a temporary value range
755 object created for the sole purpose of updating VAR's range. The
756 storage used by the equivalence set from NEW_VR will be freed by
757 this function. Do not call update_value_range when NEW_VR
758 is the range object associated with another SSA name. */
760 static inline bool
761 update_value_range (const_tree var, value_range_t *new_vr)
763 value_range_t *old_vr;
764 bool is_new;
766 /* If there is a value-range on the SSA name from earlier analysis
767 factor that in. */
768 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
770 wide_int min, max;
771 value_range_type rtype = get_range_info (var, &min, &max);
772 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
774 value_range_d nr;
775 nr.type = rtype;
776 nr.min = wide_int_to_tree (TREE_TYPE (var), min);
777 nr.max = wide_int_to_tree (TREE_TYPE (var), max);
778 nr.equiv = NULL;
779 vrp_intersect_ranges (new_vr, &nr);
783 /* Update the value range, if necessary. */
784 old_vr = get_value_range (var);
785 is_new = old_vr->type != new_vr->type
786 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
787 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
788 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
790 if (is_new)
792 /* Do not allow transitions up the lattice. The following
793 is slightly more awkward than just new_vr->type < old_vr->type
794 because VR_RANGE and VR_ANTI_RANGE need to be considered
795 the same. We may not have is_new when transitioning to
796 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
797 called. */
798 if (new_vr->type == VR_UNDEFINED)
800 BITMAP_FREE (new_vr->equiv);
801 set_value_range_to_varying (old_vr);
802 set_value_range_to_varying (new_vr);
803 return true;
805 else
806 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
807 new_vr->equiv);
810 BITMAP_FREE (new_vr->equiv);
812 return is_new;
816 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
817 point where equivalence processing can be turned on/off. */
819 static void
820 add_equivalence (bitmap *equiv, const_tree var)
822 unsigned ver = SSA_NAME_VERSION (var);
823 value_range_t *vr = vr_value[ver];
825 if (*equiv == NULL)
826 *equiv = BITMAP_ALLOC (NULL);
827 bitmap_set_bit (*equiv, ver);
828 if (vr && vr->equiv)
829 bitmap_ior_into (*equiv, vr->equiv);
833 /* Return true if VR is ~[0, 0]. */
835 static inline bool
836 range_is_nonnull (value_range_t *vr)
838 return vr->type == VR_ANTI_RANGE
839 && integer_zerop (vr->min)
840 && integer_zerop (vr->max);
844 /* Return true if VR is [0, 0]. */
846 static inline bool
847 range_is_null (value_range_t *vr)
849 return vr->type == VR_RANGE
850 && integer_zerop (vr->min)
851 && integer_zerop (vr->max);
854 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
855 a singleton. */
857 static inline bool
858 range_int_cst_p (value_range_t *vr)
860 return (vr->type == VR_RANGE
861 && TREE_CODE (vr->max) == INTEGER_CST
862 && TREE_CODE (vr->min) == INTEGER_CST);
865 /* Return true if VR is a INTEGER_CST singleton. */
867 static inline bool
868 range_int_cst_singleton_p (value_range_t *vr)
870 return (range_int_cst_p (vr)
871 && !is_overflow_infinity (vr->min)
872 && !is_overflow_infinity (vr->max)
873 && tree_int_cst_equal (vr->min, vr->max));
876 /* Return true if value range VR involves at least one symbol. */
878 static inline bool
879 symbolic_range_p (value_range_t *vr)
881 return (!is_gimple_min_invariant (vr->min)
882 || !is_gimple_min_invariant (vr->max));
885 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
886 otherwise. We only handle additive operations and set NEG to true if the
887 symbol is negated and INV to the invariant part, if any. */
889 static tree
890 get_single_symbol (tree t, bool *neg, tree *inv)
892 bool neg_;
893 tree inv_;
895 if (TREE_CODE (t) == PLUS_EXPR
896 || TREE_CODE (t) == POINTER_PLUS_EXPR
897 || TREE_CODE (t) == MINUS_EXPR)
899 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
901 neg_ = (TREE_CODE (t) == MINUS_EXPR);
902 inv_ = TREE_OPERAND (t, 0);
903 t = TREE_OPERAND (t, 1);
905 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
907 neg_ = false;
908 inv_ = TREE_OPERAND (t, 1);
909 t = TREE_OPERAND (t, 0);
911 else
912 return NULL_TREE;
914 else
916 neg_ = false;
917 inv_ = NULL_TREE;
920 if (TREE_CODE (t) == NEGATE_EXPR)
922 t = TREE_OPERAND (t, 0);
923 neg_ = !neg_;
926 if (TREE_CODE (t) != SSA_NAME)
927 return NULL_TREE;
929 *neg = neg_;
930 *inv = inv_;
931 return t;
934 /* The reverse operation: build a symbolic expression with TYPE
935 from symbol SYM, negated according to NEG, and invariant INV. */
937 static tree
938 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
940 const bool pointer_p = POINTER_TYPE_P (type);
941 tree t = sym;
943 if (neg)
944 t = build1 (NEGATE_EXPR, type, t);
946 if (integer_zerop (inv))
947 return t;
949 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
952 /* Return true if value range VR involves exactly one symbol SYM. */
954 static bool
955 symbolic_range_based_on_p (value_range_t *vr, const_tree sym)
957 bool neg, min_has_symbol, max_has_symbol;
958 tree inv;
960 if (is_gimple_min_invariant (vr->min))
961 min_has_symbol = false;
962 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
963 min_has_symbol = true;
964 else
965 return false;
967 if (is_gimple_min_invariant (vr->max))
968 max_has_symbol = false;
969 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
970 max_has_symbol = true;
971 else
972 return false;
974 return (min_has_symbol || max_has_symbol);
977 /* Return true if value range VR uses an overflow infinity. */
979 static inline bool
980 overflow_infinity_range_p (value_range_t *vr)
982 return (vr->type == VR_RANGE
983 && (is_overflow_infinity (vr->min)
984 || is_overflow_infinity (vr->max)));
987 /* Return false if we can not make a valid comparison based on VR;
988 this will be the case if it uses an overflow infinity and overflow
989 is not undefined (i.e., -fno-strict-overflow is in effect).
990 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
991 uses an overflow infinity. */
993 static bool
994 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
996 gcc_assert (vr->type == VR_RANGE);
997 if (is_overflow_infinity (vr->min))
999 *strict_overflow_p = true;
1000 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
1001 return false;
1003 if (is_overflow_infinity (vr->max))
1005 *strict_overflow_p = true;
1006 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1007 return false;
1009 return true;
1013 /* Return true if the result of assignment STMT is know to be non-negative.
1014 If the return value is based on the assumption that signed overflow is
1015 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1016 *STRICT_OVERFLOW_P.*/
1018 static bool
1019 gimple_assign_nonnegative_warnv_p (gimple *stmt, bool *strict_overflow_p)
1021 enum tree_code code = gimple_assign_rhs_code (stmt);
1022 switch (get_gimple_rhs_class (code))
1024 case GIMPLE_UNARY_RHS:
1025 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
1026 gimple_expr_type (stmt),
1027 gimple_assign_rhs1 (stmt),
1028 strict_overflow_p);
1029 case GIMPLE_BINARY_RHS:
1030 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
1031 gimple_expr_type (stmt),
1032 gimple_assign_rhs1 (stmt),
1033 gimple_assign_rhs2 (stmt),
1034 strict_overflow_p);
1035 case GIMPLE_TERNARY_RHS:
1036 return false;
1037 case GIMPLE_SINGLE_RHS:
1038 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
1039 strict_overflow_p);
1040 case GIMPLE_INVALID_RHS:
1041 gcc_unreachable ();
1042 default:
1043 gcc_unreachable ();
1047 /* Return true if return value of call STMT is know to be non-negative.
1048 If the return value is based on the assumption that signed overflow is
1049 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1050 *STRICT_OVERFLOW_P.*/
1052 static bool
1053 gimple_call_nonnegative_warnv_p (gimple *stmt, bool *strict_overflow_p)
1055 tree arg0 = gimple_call_num_args (stmt) > 0 ?
1056 gimple_call_arg (stmt, 0) : NULL_TREE;
1057 tree arg1 = gimple_call_num_args (stmt) > 1 ?
1058 gimple_call_arg (stmt, 1) : NULL_TREE;
1060 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
1061 gimple_call_fndecl (stmt),
1062 arg0,
1063 arg1,
1064 strict_overflow_p);
1067 /* Return true if STMT is know to compute a non-negative value.
1068 If the return value is based on the assumption that signed overflow is
1069 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1070 *STRICT_OVERFLOW_P.*/
1072 static bool
1073 gimple_stmt_nonnegative_warnv_p (gimple *stmt, bool *strict_overflow_p)
1075 switch (gimple_code (stmt))
1077 case GIMPLE_ASSIGN:
1078 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1079 case GIMPLE_CALL:
1080 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1081 default:
1082 gcc_unreachable ();
1086 /* Return true if the result of assignment STMT is know to be non-zero.
1087 If the return value is based on the assumption that signed overflow is
1088 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1089 *STRICT_OVERFLOW_P.*/
1091 static bool
1092 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1094 enum tree_code code = gimple_assign_rhs_code (stmt);
1095 switch (get_gimple_rhs_class (code))
1097 case GIMPLE_UNARY_RHS:
1098 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1099 gimple_expr_type (stmt),
1100 gimple_assign_rhs1 (stmt),
1101 strict_overflow_p);
1102 case GIMPLE_BINARY_RHS:
1103 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1104 gimple_expr_type (stmt),
1105 gimple_assign_rhs1 (stmt),
1106 gimple_assign_rhs2 (stmt),
1107 strict_overflow_p);
1108 case GIMPLE_TERNARY_RHS:
1109 return false;
1110 case GIMPLE_SINGLE_RHS:
1111 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1112 strict_overflow_p);
1113 case GIMPLE_INVALID_RHS:
1114 gcc_unreachable ();
1115 default:
1116 gcc_unreachable ();
1120 /* Return true if STMT is known to compute a non-zero value.
1121 If the return value is based on the assumption that signed overflow is
1122 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1123 *STRICT_OVERFLOW_P.*/
1125 static bool
1126 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1128 switch (gimple_code (stmt))
1130 case GIMPLE_ASSIGN:
1131 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1132 case GIMPLE_CALL:
1134 tree fndecl = gimple_call_fndecl (stmt);
1135 if (!fndecl) return false;
1136 if (flag_delete_null_pointer_checks && !flag_check_new
1137 && DECL_IS_OPERATOR_NEW (fndecl)
1138 && !TREE_NOTHROW (fndecl))
1139 return true;
1140 /* References are always non-NULL. */
1141 if (flag_delete_null_pointer_checks
1142 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1143 return true;
1144 if (flag_delete_null_pointer_checks &&
1145 lookup_attribute ("returns_nonnull",
1146 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1147 return true;
1148 return gimple_alloca_call_p (stmt);
1150 default:
1151 gcc_unreachable ();
1155 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1156 obtained so far. */
1158 static bool
1159 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1161 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1162 return true;
1164 /* If we have an expression of the form &X->a, then the expression
1165 is nonnull if X is nonnull. */
1166 if (is_gimple_assign (stmt)
1167 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1169 tree expr = gimple_assign_rhs1 (stmt);
1170 tree base = get_base_address (TREE_OPERAND (expr, 0));
1172 if (base != NULL_TREE
1173 && TREE_CODE (base) == MEM_REF
1174 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1176 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1177 if (range_is_nonnull (vr))
1178 return true;
1182 return false;
1185 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1186 a gimple invariant, or SSA_NAME +- CST. */
1188 static bool
1189 valid_value_p (tree expr)
1191 if (TREE_CODE (expr) == SSA_NAME)
1192 return true;
1194 if (TREE_CODE (expr) == PLUS_EXPR
1195 || TREE_CODE (expr) == MINUS_EXPR)
1196 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1197 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1199 return is_gimple_min_invariant (expr);
1202 /* Return
1203 1 if VAL < VAL2
1204 0 if !(VAL < VAL2)
1205 -2 if those are incomparable. */
1206 static inline int
1207 operand_less_p (tree val, tree val2)
1209 /* LT is folded faster than GE and others. Inline the common case. */
1210 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1211 return tree_int_cst_lt (val, val2);
1212 else
1214 tree tcmp;
1216 fold_defer_overflow_warnings ();
1218 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1220 fold_undefer_and_ignore_overflow_warnings ();
1222 if (!tcmp
1223 || TREE_CODE (tcmp) != INTEGER_CST)
1224 return -2;
1226 if (!integer_zerop (tcmp))
1227 return 1;
1230 /* val >= val2, not considering overflow infinity. */
1231 if (is_negative_overflow_infinity (val))
1232 return is_negative_overflow_infinity (val2) ? 0 : 1;
1233 else if (is_positive_overflow_infinity (val2))
1234 return is_positive_overflow_infinity (val) ? 0 : 1;
1236 return 0;
1239 /* Compare two values VAL1 and VAL2. Return
1241 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1242 -1 if VAL1 < VAL2,
1243 0 if VAL1 == VAL2,
1244 +1 if VAL1 > VAL2, and
1245 +2 if VAL1 != VAL2
1247 This is similar to tree_int_cst_compare but supports pointer values
1248 and values that cannot be compared at compile time.
1250 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1251 true if the return value is only valid if we assume that signed
1252 overflow is undefined. */
1254 static int
1255 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1257 if (val1 == val2)
1258 return 0;
1260 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1261 both integers. */
1262 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1263 == POINTER_TYPE_P (TREE_TYPE (val2)));
1265 /* Convert the two values into the same type. This is needed because
1266 sizetype causes sign extension even for unsigned types. */
1267 val2 = fold_convert (TREE_TYPE (val1), val2);
1268 STRIP_USELESS_TYPE_CONVERSION (val2);
1270 if ((TREE_CODE (val1) == SSA_NAME
1271 || (TREE_CODE (val1) == NEGATE_EXPR
1272 && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME)
1273 || TREE_CODE (val1) == PLUS_EXPR
1274 || TREE_CODE (val1) == MINUS_EXPR)
1275 && (TREE_CODE (val2) == SSA_NAME
1276 || (TREE_CODE (val2) == NEGATE_EXPR
1277 && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME)
1278 || TREE_CODE (val2) == PLUS_EXPR
1279 || TREE_CODE (val2) == MINUS_EXPR))
1281 tree n1, c1, n2, c2;
1282 enum tree_code code1, code2;
1284 /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME',
1285 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1286 same name, return -2. */
1287 if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR)
1289 code1 = SSA_NAME;
1290 n1 = val1;
1291 c1 = NULL_TREE;
1293 else
1295 code1 = TREE_CODE (val1);
1296 n1 = TREE_OPERAND (val1, 0);
1297 c1 = TREE_OPERAND (val1, 1);
1298 if (tree_int_cst_sgn (c1) == -1)
1300 if (is_negative_overflow_infinity (c1))
1301 return -2;
1302 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1303 if (!c1)
1304 return -2;
1305 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1309 if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR)
1311 code2 = SSA_NAME;
1312 n2 = val2;
1313 c2 = NULL_TREE;
1315 else
1317 code2 = TREE_CODE (val2);
1318 n2 = TREE_OPERAND (val2, 0);
1319 c2 = TREE_OPERAND (val2, 1);
1320 if (tree_int_cst_sgn (c2) == -1)
1322 if (is_negative_overflow_infinity (c2))
1323 return -2;
1324 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1325 if (!c2)
1326 return -2;
1327 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1331 /* Both values must use the same name. */
1332 if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR)
1334 n1 = TREE_OPERAND (n1, 0);
1335 n2 = TREE_OPERAND (n2, 0);
1337 if (n1 != n2)
1338 return -2;
1340 if (code1 == SSA_NAME && code2 == SSA_NAME)
1341 /* NAME == NAME */
1342 return 0;
1344 /* If overflow is defined we cannot simplify more. */
1345 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1346 return -2;
1348 if (strict_overflow_p != NULL
1349 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1350 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1351 *strict_overflow_p = true;
1353 if (code1 == SSA_NAME)
1355 if (code2 == PLUS_EXPR)
1356 /* NAME < NAME + CST */
1357 return -1;
1358 else if (code2 == MINUS_EXPR)
1359 /* NAME > NAME - CST */
1360 return 1;
1362 else if (code1 == PLUS_EXPR)
1364 if (code2 == SSA_NAME)
1365 /* NAME + CST > NAME */
1366 return 1;
1367 else if (code2 == PLUS_EXPR)
1368 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1369 return compare_values_warnv (c1, c2, strict_overflow_p);
1370 else if (code2 == MINUS_EXPR)
1371 /* NAME + CST1 > NAME - CST2 */
1372 return 1;
1374 else if (code1 == MINUS_EXPR)
1376 if (code2 == SSA_NAME)
1377 /* NAME - CST < NAME */
1378 return -1;
1379 else if (code2 == PLUS_EXPR)
1380 /* NAME - CST1 < NAME + CST2 */
1381 return -1;
1382 else if (code2 == MINUS_EXPR)
1383 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1384 C1 and C2 are swapped in the call to compare_values. */
1385 return compare_values_warnv (c2, c1, strict_overflow_p);
1388 gcc_unreachable ();
1391 /* We cannot compare non-constants. */
1392 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1393 return -2;
1395 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1397 /* We cannot compare overflowed values, except for overflow
1398 infinities. */
1399 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1401 if (strict_overflow_p != NULL)
1402 *strict_overflow_p = true;
1403 if (is_negative_overflow_infinity (val1))
1404 return is_negative_overflow_infinity (val2) ? 0 : -1;
1405 else if (is_negative_overflow_infinity (val2))
1406 return 1;
1407 else if (is_positive_overflow_infinity (val1))
1408 return is_positive_overflow_infinity (val2) ? 0 : 1;
1409 else if (is_positive_overflow_infinity (val2))
1410 return -1;
1411 return -2;
1414 return tree_int_cst_compare (val1, val2);
1416 else
1418 tree t;
1420 /* First see if VAL1 and VAL2 are not the same. */
1421 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1422 return 0;
1424 /* If VAL1 is a lower address than VAL2, return -1. */
1425 if (operand_less_p (val1, val2) == 1)
1426 return -1;
1428 /* If VAL1 is a higher address than VAL2, return +1. */
1429 if (operand_less_p (val2, val1) == 1)
1430 return 1;
1432 /* If VAL1 is different than VAL2, return +2.
1433 For integer constants we either have already returned -1 or 1
1434 or they are equivalent. We still might succeed in proving
1435 something about non-trivial operands. */
1436 if (TREE_CODE (val1) != INTEGER_CST
1437 || TREE_CODE (val2) != INTEGER_CST)
1439 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1440 if (t && integer_onep (t))
1441 return 2;
1444 return -2;
1448 /* Compare values like compare_values_warnv, but treat comparisons of
1449 nonconstants which rely on undefined overflow as incomparable. */
1451 static int
1452 compare_values (tree val1, tree val2)
1454 bool sop;
1455 int ret;
1457 sop = false;
1458 ret = compare_values_warnv (val1, val2, &sop);
1459 if (sop
1460 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1461 ret = -2;
1462 return ret;
1466 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1467 0 if VAL is not inside [MIN, MAX],
1468 -2 if we cannot tell either way.
1470 Benchmark compile/20001226-1.c compilation time after changing this
1471 function. */
1473 static inline int
1474 value_inside_range (tree val, tree min, tree max)
1476 int cmp1, cmp2;
1478 cmp1 = operand_less_p (val, min);
1479 if (cmp1 == -2)
1480 return -2;
1481 if (cmp1 == 1)
1482 return 0;
1484 cmp2 = operand_less_p (max, val);
1485 if (cmp2 == -2)
1486 return -2;
1488 return !cmp2;
1492 /* Return true if value ranges VR0 and VR1 have a non-empty
1493 intersection.
1495 Benchmark compile/20001226-1.c compilation time after changing this
1496 function.
1499 static inline bool
1500 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1502 /* The value ranges do not intersect if the maximum of the first range is
1503 less than the minimum of the second range or vice versa.
1504 When those relations are unknown, we can't do any better. */
1505 if (operand_less_p (vr0->max, vr1->min) != 0)
1506 return false;
1507 if (operand_less_p (vr1->max, vr0->min) != 0)
1508 return false;
1509 return true;
1513 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1514 include the value zero, -2 if we cannot tell. */
1516 static inline int
1517 range_includes_zero_p (tree min, tree max)
1519 tree zero = build_int_cst (TREE_TYPE (min), 0);
1520 return value_inside_range (zero, min, max);
1523 /* Return true if *VR is know to only contain nonnegative values. */
1525 static inline bool
1526 value_range_nonnegative_p (value_range_t *vr)
1528 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1529 which would return a useful value should be encoded as a
1530 VR_RANGE. */
1531 if (vr->type == VR_RANGE)
1533 int result = compare_values (vr->min, integer_zero_node);
1534 return (result == 0 || result == 1);
1537 return false;
1540 /* If *VR has a value rante that is a single constant value return that,
1541 otherwise return NULL_TREE. */
1543 static tree
1544 value_range_constant_singleton (value_range_t *vr)
1546 if (vr->type == VR_RANGE
1547 && operand_equal_p (vr->min, vr->max, 0)
1548 && is_gimple_min_invariant (vr->min))
1549 return vr->min;
1551 return NULL_TREE;
1554 /* If OP has a value range with a single constant value return that,
1555 otherwise return NULL_TREE. This returns OP itself if OP is a
1556 constant. */
1558 static tree
1559 op_with_constant_singleton_value_range (tree op)
1561 if (is_gimple_min_invariant (op))
1562 return op;
1564 if (TREE_CODE (op) != SSA_NAME)
1565 return NULL_TREE;
1567 return value_range_constant_singleton (get_value_range (op));
1570 /* Return true if op is in a boolean [0, 1] value-range. */
1572 static bool
1573 op_with_boolean_value_range_p (tree op)
1575 value_range_t *vr;
1577 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1578 return true;
1580 if (integer_zerop (op)
1581 || integer_onep (op))
1582 return true;
1584 if (TREE_CODE (op) != SSA_NAME)
1585 return false;
1587 vr = get_value_range (op);
1588 return (vr->type == VR_RANGE
1589 && integer_zerop (vr->min)
1590 && integer_onep (vr->max));
1593 /* Extract value range information from an ASSERT_EXPR EXPR and store
1594 it in *VR_P. */
1596 static void
1597 extract_range_from_assert (value_range_t *vr_p, tree expr)
1599 tree var, cond, limit, min, max, type;
1600 value_range_t *limit_vr;
1601 enum tree_code cond_code;
1603 var = ASSERT_EXPR_VAR (expr);
1604 cond = ASSERT_EXPR_COND (expr);
1606 gcc_assert (COMPARISON_CLASS_P (cond));
1608 /* Find VAR in the ASSERT_EXPR conditional. */
1609 if (var == TREE_OPERAND (cond, 0)
1610 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1611 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1613 /* If the predicate is of the form VAR COMP LIMIT, then we just
1614 take LIMIT from the RHS and use the same comparison code. */
1615 cond_code = TREE_CODE (cond);
1616 limit = TREE_OPERAND (cond, 1);
1617 cond = TREE_OPERAND (cond, 0);
1619 else
1621 /* If the predicate is of the form LIMIT COMP VAR, then we need
1622 to flip around the comparison code to create the proper range
1623 for VAR. */
1624 cond_code = swap_tree_comparison (TREE_CODE (cond));
1625 limit = TREE_OPERAND (cond, 0);
1626 cond = TREE_OPERAND (cond, 1);
1629 limit = avoid_overflow_infinity (limit);
1631 type = TREE_TYPE (var);
1632 gcc_assert (limit != var);
1634 /* For pointer arithmetic, we only keep track of pointer equality
1635 and inequality. */
1636 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1638 set_value_range_to_varying (vr_p);
1639 return;
1642 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1643 try to use LIMIT's range to avoid creating symbolic ranges
1644 unnecessarily. */
1645 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1647 /* LIMIT's range is only interesting if it has any useful information. */
1648 if (limit_vr
1649 && (limit_vr->type == VR_UNDEFINED
1650 || limit_vr->type == VR_VARYING
1651 || symbolic_range_p (limit_vr)))
1652 limit_vr = NULL;
1654 /* Initially, the new range has the same set of equivalences of
1655 VAR's range. This will be revised before returning the final
1656 value. Since assertions may be chained via mutually exclusive
1657 predicates, we will need to trim the set of equivalences before
1658 we are done. */
1659 gcc_assert (vr_p->equiv == NULL);
1660 add_equivalence (&vr_p->equiv, var);
1662 /* Extract a new range based on the asserted comparison for VAR and
1663 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1664 will only use it for equality comparisons (EQ_EXPR). For any
1665 other kind of assertion, we cannot derive a range from LIMIT's
1666 anti-range that can be used to describe the new range. For
1667 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1668 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1669 no single range for x_2 that could describe LE_EXPR, so we might
1670 as well build the range [b_4, +INF] for it.
1671 One special case we handle is extracting a range from a
1672 range test encoded as (unsigned)var + CST <= limit. */
1673 if (TREE_CODE (cond) == NOP_EXPR
1674 || TREE_CODE (cond) == PLUS_EXPR)
1676 if (TREE_CODE (cond) == PLUS_EXPR)
1678 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1679 TREE_OPERAND (cond, 1));
1680 max = int_const_binop (PLUS_EXPR, limit, min);
1681 cond = TREE_OPERAND (cond, 0);
1683 else
1685 min = build_int_cst (TREE_TYPE (var), 0);
1686 max = limit;
1689 /* Make sure to not set TREE_OVERFLOW on the final type
1690 conversion. We are willingly interpreting large positive
1691 unsigned values as negative signed values here. */
1692 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1693 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1695 /* We can transform a max, min range to an anti-range or
1696 vice-versa. Use set_and_canonicalize_value_range which does
1697 this for us. */
1698 if (cond_code == LE_EXPR)
1699 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1700 min, max, vr_p->equiv);
1701 else if (cond_code == GT_EXPR)
1702 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1703 min, max, vr_p->equiv);
1704 else
1705 gcc_unreachable ();
1707 else if (cond_code == EQ_EXPR)
1709 enum value_range_type range_type;
1711 if (limit_vr)
1713 range_type = limit_vr->type;
1714 min = limit_vr->min;
1715 max = limit_vr->max;
1717 else
1719 range_type = VR_RANGE;
1720 min = limit;
1721 max = limit;
1724 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1726 /* When asserting the equality VAR == LIMIT and LIMIT is another
1727 SSA name, the new range will also inherit the equivalence set
1728 from LIMIT. */
1729 if (TREE_CODE (limit) == SSA_NAME)
1730 add_equivalence (&vr_p->equiv, limit);
1732 else if (cond_code == NE_EXPR)
1734 /* As described above, when LIMIT's range is an anti-range and
1735 this assertion is an inequality (NE_EXPR), then we cannot
1736 derive anything from the anti-range. For instance, if
1737 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1738 not imply that VAR's range is [0, 0]. So, in the case of
1739 anti-ranges, we just assert the inequality using LIMIT and
1740 not its anti-range.
1742 If LIMIT_VR is a range, we can only use it to build a new
1743 anti-range if LIMIT_VR is a single-valued range. For
1744 instance, if LIMIT_VR is [0, 1], the predicate
1745 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1746 Rather, it means that for value 0 VAR should be ~[0, 0]
1747 and for value 1, VAR should be ~[1, 1]. We cannot
1748 represent these ranges.
1750 The only situation in which we can build a valid
1751 anti-range is when LIMIT_VR is a single-valued range
1752 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1753 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1754 if (limit_vr
1755 && limit_vr->type == VR_RANGE
1756 && compare_values (limit_vr->min, limit_vr->max) == 0)
1758 min = limit_vr->min;
1759 max = limit_vr->max;
1761 else
1763 /* In any other case, we cannot use LIMIT's range to build a
1764 valid anti-range. */
1765 min = max = limit;
1768 /* If MIN and MAX cover the whole range for their type, then
1769 just use the original LIMIT. */
1770 if (INTEGRAL_TYPE_P (type)
1771 && vrp_val_is_min (min)
1772 && vrp_val_is_max (max))
1773 min = max = limit;
1775 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1776 min, max, vr_p->equiv);
1778 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1780 min = TYPE_MIN_VALUE (type);
1782 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1783 max = limit;
1784 else
1786 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1787 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1788 LT_EXPR. */
1789 max = limit_vr->max;
1792 /* If the maximum value forces us to be out of bounds, simply punt.
1793 It would be pointless to try and do anything more since this
1794 all should be optimized away above us. */
1795 if ((cond_code == LT_EXPR
1796 && compare_values (max, min) == 0)
1797 || is_overflow_infinity (max))
1798 set_value_range_to_varying (vr_p);
1799 else
1801 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1802 if (cond_code == LT_EXPR)
1804 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1805 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1806 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1807 build_int_cst (TREE_TYPE (max), -1));
1808 else
1809 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1810 build_int_cst (TREE_TYPE (max), 1));
1811 if (EXPR_P (max))
1812 TREE_NO_WARNING (max) = 1;
1815 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1818 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1820 max = TYPE_MAX_VALUE (type);
1822 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1823 min = limit;
1824 else
1826 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1827 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1828 GT_EXPR. */
1829 min = limit_vr->min;
1832 /* If the minimum value forces us to be out of bounds, simply punt.
1833 It would be pointless to try and do anything more since this
1834 all should be optimized away above us. */
1835 if ((cond_code == GT_EXPR
1836 && compare_values (min, max) == 0)
1837 || is_overflow_infinity (min))
1838 set_value_range_to_varying (vr_p);
1839 else
1841 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1842 if (cond_code == GT_EXPR)
1844 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1845 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1846 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1847 build_int_cst (TREE_TYPE (min), -1));
1848 else
1849 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1850 build_int_cst (TREE_TYPE (min), 1));
1851 if (EXPR_P (min))
1852 TREE_NO_WARNING (min) = 1;
1855 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1858 else
1859 gcc_unreachable ();
1861 /* Finally intersect the new range with what we already know about var. */
1862 vrp_intersect_ranges (vr_p, get_value_range (var));
1866 /* Extract range information from SSA name VAR and store it in VR. If
1867 VAR has an interesting range, use it. Otherwise, create the
1868 range [VAR, VAR] and return it. This is useful in situations where
1869 we may have conditionals testing values of VARYING names. For
1870 instance,
1872 x_3 = y_5;
1873 if (x_3 > y_5)
1876 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1877 always false. */
1879 static void
1880 extract_range_from_ssa_name (value_range_t *vr, tree var)
1882 value_range_t *var_vr = get_value_range (var);
1884 if (var_vr->type != VR_VARYING)
1885 copy_value_range (vr, var_vr);
1886 else
1887 set_value_range (vr, VR_RANGE, var, var, NULL);
1889 add_equivalence (&vr->equiv, var);
1893 /* Wrapper around int_const_binop. If the operation overflows and we
1894 are not using wrapping arithmetic, then adjust the result to be
1895 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1896 NULL_TREE if we need to use an overflow infinity representation but
1897 the type does not support it. */
1899 static tree
1900 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1902 tree res;
1904 res = int_const_binop (code, val1, val2);
1906 /* If we are using unsigned arithmetic, operate symbolically
1907 on -INF and +INF as int_const_binop only handles signed overflow. */
1908 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1910 int checkz = compare_values (res, val1);
1911 bool overflow = false;
1913 /* Ensure that res = val1 [+*] val2 >= val1
1914 or that res = val1 - val2 <= val1. */
1915 if ((code == PLUS_EXPR
1916 && !(checkz == 1 || checkz == 0))
1917 || (code == MINUS_EXPR
1918 && !(checkz == 0 || checkz == -1)))
1920 overflow = true;
1922 /* Checking for multiplication overflow is done by dividing the
1923 output of the multiplication by the first input of the
1924 multiplication. If the result of that division operation is
1925 not equal to the second input of the multiplication, then the
1926 multiplication overflowed. */
1927 else if (code == MULT_EXPR && !integer_zerop (val1))
1929 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1930 res,
1931 val1);
1932 int check = compare_values (tmp, val2);
1934 if (check != 0)
1935 overflow = true;
1938 if (overflow)
1940 res = copy_node (res);
1941 TREE_OVERFLOW (res) = 1;
1945 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1946 /* If the singed operation wraps then int_const_binop has done
1947 everything we want. */
1949 /* Signed division of -1/0 overflows and by the time it gets here
1950 returns NULL_TREE. */
1951 else if (!res)
1952 return NULL_TREE;
1953 else if ((TREE_OVERFLOW (res)
1954 && !TREE_OVERFLOW (val1)
1955 && !TREE_OVERFLOW (val2))
1956 || is_overflow_infinity (val1)
1957 || is_overflow_infinity (val2))
1959 /* If the operation overflowed but neither VAL1 nor VAL2 are
1960 overflown, return -INF or +INF depending on the operation
1961 and the combination of signs of the operands. */
1962 int sgn1 = tree_int_cst_sgn (val1);
1963 int sgn2 = tree_int_cst_sgn (val2);
1965 if (needs_overflow_infinity (TREE_TYPE (res))
1966 && !supports_overflow_infinity (TREE_TYPE (res)))
1967 return NULL_TREE;
1969 /* We have to punt on adding infinities of different signs,
1970 since we can't tell what the sign of the result should be.
1971 Likewise for subtracting infinities of the same sign. */
1972 if (((code == PLUS_EXPR && sgn1 != sgn2)
1973 || (code == MINUS_EXPR && sgn1 == sgn2))
1974 && is_overflow_infinity (val1)
1975 && is_overflow_infinity (val2))
1976 return NULL_TREE;
1978 /* Don't try to handle division or shifting of infinities. */
1979 if ((code == TRUNC_DIV_EXPR
1980 || code == FLOOR_DIV_EXPR
1981 || code == CEIL_DIV_EXPR
1982 || code == EXACT_DIV_EXPR
1983 || code == ROUND_DIV_EXPR
1984 || code == RSHIFT_EXPR)
1985 && (is_overflow_infinity (val1)
1986 || is_overflow_infinity (val2)))
1987 return NULL_TREE;
1989 /* Notice that we only need to handle the restricted set of
1990 operations handled by extract_range_from_binary_expr.
1991 Among them, only multiplication, addition and subtraction
1992 can yield overflow without overflown operands because we
1993 are working with integral types only... except in the
1994 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1995 for division too. */
1997 /* For multiplication, the sign of the overflow is given
1998 by the comparison of the signs of the operands. */
1999 if ((code == MULT_EXPR && sgn1 == sgn2)
2000 /* For addition, the operands must be of the same sign
2001 to yield an overflow. Its sign is therefore that
2002 of one of the operands, for example the first. For
2003 infinite operands X + -INF is negative, not positive. */
2004 || (code == PLUS_EXPR
2005 && (sgn1 >= 0
2006 ? !is_negative_overflow_infinity (val2)
2007 : is_positive_overflow_infinity (val2)))
2008 /* For subtraction, non-infinite operands must be of
2009 different signs to yield an overflow. Its sign is
2010 therefore that of the first operand or the opposite of
2011 that of the second operand. A first operand of 0 counts
2012 as positive here, for the corner case 0 - (-INF), which
2013 overflows, but must yield +INF. For infinite operands 0
2014 - INF is negative, not positive. */
2015 || (code == MINUS_EXPR
2016 && (sgn1 >= 0
2017 ? !is_positive_overflow_infinity (val2)
2018 : is_negative_overflow_infinity (val2)))
2019 /* We only get in here with positive shift count, so the
2020 overflow direction is the same as the sign of val1.
2021 Actually rshift does not overflow at all, but we only
2022 handle the case of shifting overflowed -INF and +INF. */
2023 || (code == RSHIFT_EXPR
2024 && sgn1 >= 0)
2025 /* For division, the only case is -INF / -1 = +INF. */
2026 || code == TRUNC_DIV_EXPR
2027 || code == FLOOR_DIV_EXPR
2028 || code == CEIL_DIV_EXPR
2029 || code == EXACT_DIV_EXPR
2030 || code == ROUND_DIV_EXPR)
2031 return (needs_overflow_infinity (TREE_TYPE (res))
2032 ? positive_overflow_infinity (TREE_TYPE (res))
2033 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2034 else
2035 return (needs_overflow_infinity (TREE_TYPE (res))
2036 ? negative_overflow_infinity (TREE_TYPE (res))
2037 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2040 return res;
2044 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
2045 bitmask if some bit is unset, it means for all numbers in the range
2046 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2047 bitmask if some bit is set, it means for all numbers in the range
2048 the bit is 1, otherwise it might be 0 or 1. */
2050 static bool
2051 zero_nonzero_bits_from_vr (const tree expr_type,
2052 value_range_t *vr,
2053 wide_int *may_be_nonzero,
2054 wide_int *must_be_nonzero)
2056 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
2057 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
2058 if (!range_int_cst_p (vr)
2059 || is_overflow_infinity (vr->min)
2060 || is_overflow_infinity (vr->max))
2061 return false;
2063 if (range_int_cst_singleton_p (vr))
2065 *may_be_nonzero = vr->min;
2066 *must_be_nonzero = *may_be_nonzero;
2068 else if (tree_int_cst_sgn (vr->min) >= 0
2069 || tree_int_cst_sgn (vr->max) < 0)
2071 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
2072 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
2073 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
2074 if (xor_mask != 0)
2076 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
2077 may_be_nonzero->get_precision ());
2078 *may_be_nonzero = *may_be_nonzero | mask;
2079 *must_be_nonzero = must_be_nonzero->and_not (mask);
2083 return true;
2086 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2087 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2088 false otherwise. If *AR can be represented with a single range
2089 *VR1 will be VR_UNDEFINED. */
2091 static bool
2092 ranges_from_anti_range (value_range_t *ar,
2093 value_range_t *vr0, value_range_t *vr1)
2095 tree type = TREE_TYPE (ar->min);
2097 vr0->type = VR_UNDEFINED;
2098 vr1->type = VR_UNDEFINED;
2100 if (ar->type != VR_ANTI_RANGE
2101 || TREE_CODE (ar->min) != INTEGER_CST
2102 || TREE_CODE (ar->max) != INTEGER_CST
2103 || !vrp_val_min (type)
2104 || !vrp_val_max (type))
2105 return false;
2107 if (!vrp_val_is_min (ar->min))
2109 vr0->type = VR_RANGE;
2110 vr0->min = vrp_val_min (type);
2111 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
2113 if (!vrp_val_is_max (ar->max))
2115 vr1->type = VR_RANGE;
2116 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
2117 vr1->max = vrp_val_max (type);
2119 if (vr0->type == VR_UNDEFINED)
2121 *vr0 = *vr1;
2122 vr1->type = VR_UNDEFINED;
2125 return vr0->type != VR_UNDEFINED;
2128 /* Helper to extract a value-range *VR for a multiplicative operation
2129 *VR0 CODE *VR1. */
2131 static void
2132 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2133 enum tree_code code,
2134 value_range_t *vr0, value_range_t *vr1)
2136 enum value_range_type type;
2137 tree val[4];
2138 size_t i;
2139 tree min, max;
2140 bool sop;
2141 int cmp;
2143 /* Multiplications, divisions and shifts are a bit tricky to handle,
2144 depending on the mix of signs we have in the two ranges, we
2145 need to operate on different values to get the minimum and
2146 maximum values for the new range. One approach is to figure
2147 out all the variations of range combinations and do the
2148 operations.
2150 However, this involves several calls to compare_values and it
2151 is pretty convoluted. It's simpler to do the 4 operations
2152 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2153 MAX1) and then figure the smallest and largest values to form
2154 the new range. */
2155 gcc_assert (code == MULT_EXPR
2156 || code == TRUNC_DIV_EXPR
2157 || code == FLOOR_DIV_EXPR
2158 || code == CEIL_DIV_EXPR
2159 || code == EXACT_DIV_EXPR
2160 || code == ROUND_DIV_EXPR
2161 || code == RSHIFT_EXPR
2162 || code == LSHIFT_EXPR);
2163 gcc_assert ((vr0->type == VR_RANGE
2164 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2165 && vr0->type == vr1->type);
2167 type = vr0->type;
2169 /* Compute the 4 cross operations. */
2170 sop = false;
2171 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2172 if (val[0] == NULL_TREE)
2173 sop = true;
2175 if (vr1->max == vr1->min)
2176 val[1] = NULL_TREE;
2177 else
2179 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2180 if (val[1] == NULL_TREE)
2181 sop = true;
2184 if (vr0->max == vr0->min)
2185 val[2] = NULL_TREE;
2186 else
2188 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2189 if (val[2] == NULL_TREE)
2190 sop = true;
2193 if (vr0->min == vr0->max || vr1->min == vr1->max)
2194 val[3] = NULL_TREE;
2195 else
2197 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2198 if (val[3] == NULL_TREE)
2199 sop = true;
2202 if (sop)
2204 set_value_range_to_varying (vr);
2205 return;
2208 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2209 of VAL[i]. */
2210 min = val[0];
2211 max = val[0];
2212 for (i = 1; i < 4; i++)
2214 if (!is_gimple_min_invariant (min)
2215 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2216 || !is_gimple_min_invariant (max)
2217 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2218 break;
2220 if (val[i])
2222 if (!is_gimple_min_invariant (val[i])
2223 || (TREE_OVERFLOW (val[i])
2224 && !is_overflow_infinity (val[i])))
2226 /* If we found an overflowed value, set MIN and MAX
2227 to it so that we set the resulting range to
2228 VARYING. */
2229 min = max = val[i];
2230 break;
2233 if (compare_values (val[i], min) == -1)
2234 min = val[i];
2236 if (compare_values (val[i], max) == 1)
2237 max = val[i];
2241 /* If either MIN or MAX overflowed, then set the resulting range to
2242 VARYING. But we do accept an overflow infinity
2243 representation. */
2244 if (min == NULL_TREE
2245 || !is_gimple_min_invariant (min)
2246 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2247 || max == NULL_TREE
2248 || !is_gimple_min_invariant (max)
2249 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2251 set_value_range_to_varying (vr);
2252 return;
2255 /* We punt if:
2256 1) [-INF, +INF]
2257 2) [-INF, +-INF(OVF)]
2258 3) [+-INF(OVF), +INF]
2259 4) [+-INF(OVF), +-INF(OVF)]
2260 We learn nothing when we have INF and INF(OVF) on both sides.
2261 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2262 overflow. */
2263 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2264 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2266 set_value_range_to_varying (vr);
2267 return;
2270 cmp = compare_values (min, max);
2271 if (cmp == -2 || cmp == 1)
2273 /* If the new range has its limits swapped around (MIN > MAX),
2274 then the operation caused one of them to wrap around, mark
2275 the new range VARYING. */
2276 set_value_range_to_varying (vr);
2278 else
2279 set_value_range (vr, type, min, max, NULL);
2282 /* Extract range information from a binary operation CODE based on
2283 the ranges of each of its operands *VR0 and *VR1 with resulting
2284 type EXPR_TYPE. The resulting range is stored in *VR. */
2286 static void
2287 extract_range_from_binary_expr_1 (value_range_t *vr,
2288 enum tree_code code, tree expr_type,
2289 value_range_t *vr0_, value_range_t *vr1_)
2291 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2292 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2293 enum value_range_type type;
2294 tree min = NULL_TREE, max = NULL_TREE;
2295 int cmp;
2297 if (!INTEGRAL_TYPE_P (expr_type)
2298 && !POINTER_TYPE_P (expr_type))
2300 set_value_range_to_varying (vr);
2301 return;
2304 /* Not all binary expressions can be applied to ranges in a
2305 meaningful way. Handle only arithmetic operations. */
2306 if (code != PLUS_EXPR
2307 && code != MINUS_EXPR
2308 && code != POINTER_PLUS_EXPR
2309 && code != MULT_EXPR
2310 && code != TRUNC_DIV_EXPR
2311 && code != FLOOR_DIV_EXPR
2312 && code != CEIL_DIV_EXPR
2313 && code != EXACT_DIV_EXPR
2314 && code != ROUND_DIV_EXPR
2315 && code != TRUNC_MOD_EXPR
2316 && code != RSHIFT_EXPR
2317 && code != LSHIFT_EXPR
2318 && code != MIN_EXPR
2319 && code != MAX_EXPR
2320 && code != BIT_AND_EXPR
2321 && code != BIT_IOR_EXPR
2322 && code != BIT_XOR_EXPR)
2324 set_value_range_to_varying (vr);
2325 return;
2328 /* If both ranges are UNDEFINED, so is the result. */
2329 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2331 set_value_range_to_undefined (vr);
2332 return;
2334 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2335 code. At some point we may want to special-case operations that
2336 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2337 operand. */
2338 else if (vr0.type == VR_UNDEFINED)
2339 set_value_range_to_varying (&vr0);
2340 else if (vr1.type == VR_UNDEFINED)
2341 set_value_range_to_varying (&vr1);
2343 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2344 and express ~[] op X as ([]' op X) U ([]'' op X). */
2345 if (vr0.type == VR_ANTI_RANGE
2346 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2348 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2349 if (vrtem1.type != VR_UNDEFINED)
2351 value_range_t vrres = VR_INITIALIZER;
2352 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2353 &vrtem1, vr1_);
2354 vrp_meet (vr, &vrres);
2356 return;
2358 /* Likewise for X op ~[]. */
2359 if (vr1.type == VR_ANTI_RANGE
2360 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2362 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2363 if (vrtem1.type != VR_UNDEFINED)
2365 value_range_t vrres = VR_INITIALIZER;
2366 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2367 vr0_, &vrtem1);
2368 vrp_meet (vr, &vrres);
2370 return;
2373 /* The type of the resulting value range defaults to VR0.TYPE. */
2374 type = vr0.type;
2376 /* Refuse to operate on VARYING ranges, ranges of different kinds
2377 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2378 because we may be able to derive a useful range even if one of
2379 the operands is VR_VARYING or symbolic range. Similarly for
2380 divisions, MIN/MAX and PLUS/MINUS.
2382 TODO, we may be able to derive anti-ranges in some cases. */
2383 if (code != BIT_AND_EXPR
2384 && code != BIT_IOR_EXPR
2385 && code != TRUNC_DIV_EXPR
2386 && code != FLOOR_DIV_EXPR
2387 && code != CEIL_DIV_EXPR
2388 && code != EXACT_DIV_EXPR
2389 && code != ROUND_DIV_EXPR
2390 && code != TRUNC_MOD_EXPR
2391 && code != MIN_EXPR
2392 && code != MAX_EXPR
2393 && code != PLUS_EXPR
2394 && code != MINUS_EXPR
2395 && code != RSHIFT_EXPR
2396 && (vr0.type == VR_VARYING
2397 || vr1.type == VR_VARYING
2398 || vr0.type != vr1.type
2399 || symbolic_range_p (&vr0)
2400 || symbolic_range_p (&vr1)))
2402 set_value_range_to_varying (vr);
2403 return;
2406 /* Now evaluate the expression to determine the new range. */
2407 if (POINTER_TYPE_P (expr_type))
2409 if (code == MIN_EXPR || code == MAX_EXPR)
2411 /* For MIN/MAX expressions with pointers, we only care about
2412 nullness, if both are non null, then the result is nonnull.
2413 If both are null, then the result is null. Otherwise they
2414 are varying. */
2415 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2416 set_value_range_to_nonnull (vr, expr_type);
2417 else if (range_is_null (&vr0) && range_is_null (&vr1))
2418 set_value_range_to_null (vr, expr_type);
2419 else
2420 set_value_range_to_varying (vr);
2422 else if (code == POINTER_PLUS_EXPR)
2424 /* For pointer types, we are really only interested in asserting
2425 whether the expression evaluates to non-NULL. */
2426 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2427 set_value_range_to_nonnull (vr, expr_type);
2428 else if (range_is_null (&vr0) && range_is_null (&vr1))
2429 set_value_range_to_null (vr, expr_type);
2430 else
2431 set_value_range_to_varying (vr);
2433 else if (code == BIT_AND_EXPR)
2435 /* For pointer types, we are really only interested in asserting
2436 whether the expression evaluates to non-NULL. */
2437 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2438 set_value_range_to_nonnull (vr, expr_type);
2439 else if (range_is_null (&vr0) || range_is_null (&vr1))
2440 set_value_range_to_null (vr, expr_type);
2441 else
2442 set_value_range_to_varying (vr);
2444 else
2445 set_value_range_to_varying (vr);
2447 return;
2450 /* For integer ranges, apply the operation to each end of the
2451 range and see what we end up with. */
2452 if (code == PLUS_EXPR || code == MINUS_EXPR)
2454 const bool minus_p = (code == MINUS_EXPR);
2455 tree min_op0 = vr0.min;
2456 tree min_op1 = minus_p ? vr1.max : vr1.min;
2457 tree max_op0 = vr0.max;
2458 tree max_op1 = minus_p ? vr1.min : vr1.max;
2459 tree sym_min_op0 = NULL_TREE;
2460 tree sym_min_op1 = NULL_TREE;
2461 tree sym_max_op0 = NULL_TREE;
2462 tree sym_max_op1 = NULL_TREE;
2463 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2465 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2466 single-symbolic ranges, try to compute the precise resulting range,
2467 but only if we know that this resulting range will also be constant
2468 or single-symbolic. */
2469 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2470 && (TREE_CODE (min_op0) == INTEGER_CST
2471 || (sym_min_op0
2472 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2473 && (TREE_CODE (min_op1) == INTEGER_CST
2474 || (sym_min_op1
2475 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2476 && (!(sym_min_op0 && sym_min_op1)
2477 || (sym_min_op0 == sym_min_op1
2478 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2479 && (TREE_CODE (max_op0) == INTEGER_CST
2480 || (sym_max_op0
2481 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2482 && (TREE_CODE (max_op1) == INTEGER_CST
2483 || (sym_max_op1
2484 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2485 && (!(sym_max_op0 && sym_max_op1)
2486 || (sym_max_op0 == sym_max_op1
2487 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2489 const signop sgn = TYPE_SIGN (expr_type);
2490 const unsigned int prec = TYPE_PRECISION (expr_type);
2491 wide_int type_min, type_max, wmin, wmax;
2492 int min_ovf = 0;
2493 int max_ovf = 0;
2495 /* Get the lower and upper bounds of the type. */
2496 if (TYPE_OVERFLOW_WRAPS (expr_type))
2498 type_min = wi::min_value (prec, sgn);
2499 type_max = wi::max_value (prec, sgn);
2501 else
2503 type_min = vrp_val_min (expr_type);
2504 type_max = vrp_val_max (expr_type);
2507 /* Combine the lower bounds, if any. */
2508 if (min_op0 && min_op1)
2510 if (minus_p)
2512 wmin = wi::sub (min_op0, min_op1);
2514 /* Check for overflow. */
2515 if (wi::cmp (0, min_op1, sgn)
2516 != wi::cmp (wmin, min_op0, sgn))
2517 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2519 else
2521 wmin = wi::add (min_op0, min_op1);
2523 /* Check for overflow. */
2524 if (wi::cmp (min_op1, 0, sgn)
2525 != wi::cmp (wmin, min_op0, sgn))
2526 min_ovf = wi::cmp (min_op0, wmin, sgn);
2529 else if (min_op0)
2530 wmin = min_op0;
2531 else if (min_op1)
2532 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2533 else
2534 wmin = wi::shwi (0, prec);
2536 /* Combine the upper bounds, if any. */
2537 if (max_op0 && max_op1)
2539 if (minus_p)
2541 wmax = wi::sub (max_op0, max_op1);
2543 /* Check for overflow. */
2544 if (wi::cmp (0, max_op1, sgn)
2545 != wi::cmp (wmax, max_op0, sgn))
2546 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2548 else
2550 wmax = wi::add (max_op0, max_op1);
2552 if (wi::cmp (max_op1, 0, sgn)
2553 != wi::cmp (wmax, max_op0, sgn))
2554 max_ovf = wi::cmp (max_op0, wmax, sgn);
2557 else if (max_op0)
2558 wmax = max_op0;
2559 else if (max_op1)
2560 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2561 else
2562 wmax = wi::shwi (0, prec);
2564 /* Check for type overflow. */
2565 if (min_ovf == 0)
2567 if (wi::cmp (wmin, type_min, sgn) == -1)
2568 min_ovf = -1;
2569 else if (wi::cmp (wmin, type_max, sgn) == 1)
2570 min_ovf = 1;
2572 if (max_ovf == 0)
2574 if (wi::cmp (wmax, type_min, sgn) == -1)
2575 max_ovf = -1;
2576 else if (wi::cmp (wmax, type_max, sgn) == 1)
2577 max_ovf = 1;
2580 /* If we have overflow for the constant part and the resulting
2581 range will be symbolic, drop to VR_VARYING. */
2582 if ((min_ovf && sym_min_op0 != sym_min_op1)
2583 || (max_ovf && sym_max_op0 != sym_max_op1))
2585 set_value_range_to_varying (vr);
2586 return;
2589 if (TYPE_OVERFLOW_WRAPS (expr_type))
2591 /* If overflow wraps, truncate the values and adjust the
2592 range kind and bounds appropriately. */
2593 wide_int tmin = wide_int::from (wmin, prec, sgn);
2594 wide_int tmax = wide_int::from (wmax, prec, sgn);
2595 if (min_ovf == max_ovf)
2597 /* No overflow or both overflow or underflow. The
2598 range kind stays VR_RANGE. */
2599 min = wide_int_to_tree (expr_type, tmin);
2600 max = wide_int_to_tree (expr_type, tmax);
2602 else if (min_ovf == -1 && max_ovf == 1)
2604 /* Underflow and overflow, drop to VR_VARYING. */
2605 set_value_range_to_varying (vr);
2606 return;
2608 else
2610 /* Min underflow or max overflow. The range kind
2611 changes to VR_ANTI_RANGE. */
2612 bool covers = false;
2613 wide_int tem = tmin;
2614 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2615 || (max_ovf == 1 && min_ovf == 0));
2616 type = VR_ANTI_RANGE;
2617 tmin = tmax + 1;
2618 if (wi::cmp (tmin, tmax, sgn) < 0)
2619 covers = true;
2620 tmax = tem - 1;
2621 if (wi::cmp (tmax, tem, sgn) > 0)
2622 covers = true;
2623 /* If the anti-range would cover nothing, drop to varying.
2624 Likewise if the anti-range bounds are outside of the
2625 types values. */
2626 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2628 set_value_range_to_varying (vr);
2629 return;
2631 min = wide_int_to_tree (expr_type, tmin);
2632 max = wide_int_to_tree (expr_type, tmax);
2635 else
2637 /* If overflow does not wrap, saturate to the types min/max
2638 value. */
2639 if (min_ovf == -1)
2641 if (needs_overflow_infinity (expr_type)
2642 && supports_overflow_infinity (expr_type))
2643 min = negative_overflow_infinity (expr_type);
2644 else
2645 min = wide_int_to_tree (expr_type, type_min);
2647 else if (min_ovf == 1)
2649 if (needs_overflow_infinity (expr_type)
2650 && supports_overflow_infinity (expr_type))
2651 min = positive_overflow_infinity (expr_type);
2652 else
2653 min = wide_int_to_tree (expr_type, type_max);
2655 else
2656 min = wide_int_to_tree (expr_type, wmin);
2658 if (max_ovf == -1)
2660 if (needs_overflow_infinity (expr_type)
2661 && supports_overflow_infinity (expr_type))
2662 max = negative_overflow_infinity (expr_type);
2663 else
2664 max = wide_int_to_tree (expr_type, type_min);
2666 else if (max_ovf == 1)
2668 if (needs_overflow_infinity (expr_type)
2669 && supports_overflow_infinity (expr_type))
2670 max = positive_overflow_infinity (expr_type);
2671 else
2672 max = wide_int_to_tree (expr_type, type_max);
2674 else
2675 max = wide_int_to_tree (expr_type, wmax);
2678 if (needs_overflow_infinity (expr_type)
2679 && supports_overflow_infinity (expr_type))
2681 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2682 || (min_op1
2683 && (minus_p
2684 ? is_positive_overflow_infinity (min_op1)
2685 : is_negative_overflow_infinity (min_op1))))
2686 min = negative_overflow_infinity (expr_type);
2687 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2688 || (max_op1
2689 && (minus_p
2690 ? is_negative_overflow_infinity (max_op1)
2691 : is_positive_overflow_infinity (max_op1))))
2692 max = positive_overflow_infinity (expr_type);
2695 /* If the result lower bound is constant, we're done;
2696 otherwise, build the symbolic lower bound. */
2697 if (sym_min_op0 == sym_min_op1)
2699 else if (sym_min_op0)
2700 min = build_symbolic_expr (expr_type, sym_min_op0,
2701 neg_min_op0, min);
2702 else if (sym_min_op1)
2703 min = build_symbolic_expr (expr_type, sym_min_op1,
2704 neg_min_op1 ^ minus_p, min);
2706 /* Likewise for the upper bound. */
2707 if (sym_max_op0 == sym_max_op1)
2709 else if (sym_max_op0)
2710 max = build_symbolic_expr (expr_type, sym_max_op0,
2711 neg_max_op0, max);
2712 else if (sym_max_op1)
2713 max = build_symbolic_expr (expr_type, sym_max_op1,
2714 neg_max_op1 ^ minus_p, max);
2716 else
2718 /* For other cases, for example if we have a PLUS_EXPR with two
2719 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2720 to compute a precise range for such a case.
2721 ??? General even mixed range kind operations can be expressed
2722 by for example transforming ~[3, 5] + [1, 2] to range-only
2723 operations and a union primitive:
2724 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2725 [-INF+1, 4] U [6, +INF(OVF)]
2726 though usually the union is not exactly representable with
2727 a single range or anti-range as the above is
2728 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2729 but one could use a scheme similar to equivalences for this. */
2730 set_value_range_to_varying (vr);
2731 return;
2734 else if (code == MIN_EXPR
2735 || code == MAX_EXPR)
2737 if (vr0.type == VR_RANGE
2738 && !symbolic_range_p (&vr0))
2740 type = VR_RANGE;
2741 if (vr1.type == VR_RANGE
2742 && !symbolic_range_p (&vr1))
2744 /* For operations that make the resulting range directly
2745 proportional to the original ranges, apply the operation to
2746 the same end of each range. */
2747 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2748 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2750 else if (code == MIN_EXPR)
2752 min = vrp_val_min (expr_type);
2753 max = vr0.max;
2755 else if (code == MAX_EXPR)
2757 min = vr0.min;
2758 max = vrp_val_max (expr_type);
2761 else if (vr1.type == VR_RANGE
2762 && !symbolic_range_p (&vr1))
2764 type = VR_RANGE;
2765 if (code == MIN_EXPR)
2767 min = vrp_val_min (expr_type);
2768 max = vr1.max;
2770 else if (code == MAX_EXPR)
2772 min = vr1.min;
2773 max = vrp_val_max (expr_type);
2776 else
2778 set_value_range_to_varying (vr);
2779 return;
2782 else if (code == MULT_EXPR)
2784 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2785 drop to varying. This test requires 2*prec bits if both
2786 operands are signed and 2*prec + 2 bits if either is not. */
2788 signop sign = TYPE_SIGN (expr_type);
2789 unsigned int prec = TYPE_PRECISION (expr_type);
2791 if (range_int_cst_p (&vr0)
2792 && range_int_cst_p (&vr1)
2793 && TYPE_OVERFLOW_WRAPS (expr_type))
2795 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2796 typedef generic_wide_int
2797 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2798 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2799 vrp_int size = sizem1 + 1;
2801 /* Extend the values using the sign of the result to PREC2.
2802 From here on out, everthing is just signed math no matter
2803 what the input types were. */
2804 vrp_int min0 = vrp_int_cst (vr0.min);
2805 vrp_int max0 = vrp_int_cst (vr0.max);
2806 vrp_int min1 = vrp_int_cst (vr1.min);
2807 vrp_int max1 = vrp_int_cst (vr1.max);
2808 /* Canonicalize the intervals. */
2809 if (sign == UNSIGNED)
2811 if (wi::ltu_p (size, min0 + max0))
2813 min0 -= size;
2814 max0 -= size;
2817 if (wi::ltu_p (size, min1 + max1))
2819 min1 -= size;
2820 max1 -= size;
2824 vrp_int prod0 = min0 * min1;
2825 vrp_int prod1 = min0 * max1;
2826 vrp_int prod2 = max0 * min1;
2827 vrp_int prod3 = max0 * max1;
2829 /* Sort the 4 products so that min is in prod0 and max is in
2830 prod3. */
2831 /* min0min1 > max0max1 */
2832 if (wi::gts_p (prod0, prod3))
2833 std::swap (prod0, prod3);
2835 /* min0max1 > max0min1 */
2836 if (wi::gts_p (prod1, prod2))
2837 std::swap (prod1, prod2);
2839 if (wi::gts_p (prod0, prod1))
2840 std::swap (prod0, prod1);
2842 if (wi::gts_p (prod2, prod3))
2843 std::swap (prod2, prod3);
2845 /* diff = max - min. */
2846 prod2 = prod3 - prod0;
2847 if (wi::geu_p (prod2, sizem1))
2849 /* the range covers all values. */
2850 set_value_range_to_varying (vr);
2851 return;
2854 /* The following should handle the wrapping and selecting
2855 VR_ANTI_RANGE for us. */
2856 min = wide_int_to_tree (expr_type, prod0);
2857 max = wide_int_to_tree (expr_type, prod3);
2858 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2859 return;
2862 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2863 drop to VR_VARYING. It would take more effort to compute a
2864 precise range for such a case. For example, if we have
2865 op0 == 65536 and op1 == 65536 with their ranges both being
2866 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2867 we cannot claim that the product is in ~[0,0]. Note that we
2868 are guaranteed to have vr0.type == vr1.type at this
2869 point. */
2870 if (vr0.type == VR_ANTI_RANGE
2871 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2873 set_value_range_to_varying (vr);
2874 return;
2877 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2878 return;
2880 else if (code == RSHIFT_EXPR
2881 || code == LSHIFT_EXPR)
2883 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2884 then drop to VR_VARYING. Outside of this range we get undefined
2885 behavior from the shift operation. We cannot even trust
2886 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2887 shifts, and the operation at the tree level may be widened. */
2888 if (range_int_cst_p (&vr1)
2889 && compare_tree_int (vr1.min, 0) >= 0
2890 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2892 if (code == RSHIFT_EXPR)
2894 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2895 useful ranges just from the shift count. E.g.
2896 x >> 63 for signed 64-bit x is always [-1, 0]. */
2897 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2899 vr0.type = type = VR_RANGE;
2900 vr0.min = vrp_val_min (expr_type);
2901 vr0.max = vrp_val_max (expr_type);
2903 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2904 return;
2906 /* We can map lshifts by constants to MULT_EXPR handling. */
2907 else if (code == LSHIFT_EXPR
2908 && range_int_cst_singleton_p (&vr1))
2910 bool saved_flag_wrapv;
2911 value_range_t vr1p = VR_INITIALIZER;
2912 vr1p.type = VR_RANGE;
2913 vr1p.min = (wide_int_to_tree
2914 (expr_type,
2915 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2916 TYPE_PRECISION (expr_type))));
2917 vr1p.max = vr1p.min;
2918 /* We have to use a wrapping multiply though as signed overflow
2919 on lshifts is implementation defined in C89. */
2920 saved_flag_wrapv = flag_wrapv;
2921 flag_wrapv = 1;
2922 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2923 &vr0, &vr1p);
2924 flag_wrapv = saved_flag_wrapv;
2925 return;
2927 else if (code == LSHIFT_EXPR
2928 && range_int_cst_p (&vr0))
2930 int prec = TYPE_PRECISION (expr_type);
2931 int overflow_pos = prec;
2932 int bound_shift;
2933 wide_int low_bound, high_bound;
2934 bool uns = TYPE_UNSIGNED (expr_type);
2935 bool in_bounds = false;
2937 if (!uns)
2938 overflow_pos -= 1;
2940 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2941 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2942 overflow. However, for that to happen, vr1.max needs to be
2943 zero, which means vr1 is a singleton range of zero, which
2944 means it should be handled by the previous LSHIFT_EXPR
2945 if-clause. */
2946 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2947 wide_int complement = ~(bound - 1);
2949 if (uns)
2951 low_bound = bound;
2952 high_bound = complement;
2953 if (wi::ltu_p (vr0.max, low_bound))
2955 /* [5, 6] << [1, 2] == [10, 24]. */
2956 /* We're shifting out only zeroes, the value increases
2957 monotonically. */
2958 in_bounds = true;
2960 else if (wi::ltu_p (high_bound, vr0.min))
2962 /* [0xffffff00, 0xffffffff] << [1, 2]
2963 == [0xfffffc00, 0xfffffffe]. */
2964 /* We're shifting out only ones, the value decreases
2965 monotonically. */
2966 in_bounds = true;
2969 else
2971 /* [-1, 1] << [1, 2] == [-4, 4]. */
2972 low_bound = complement;
2973 high_bound = bound;
2974 if (wi::lts_p (vr0.max, high_bound)
2975 && wi::lts_p (low_bound, vr0.min))
2977 /* For non-negative numbers, we're shifting out only
2978 zeroes, the value increases monotonically.
2979 For negative numbers, we're shifting out only ones, the
2980 value decreases monotomically. */
2981 in_bounds = true;
2985 if (in_bounds)
2987 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2988 return;
2992 set_value_range_to_varying (vr);
2993 return;
2995 else if (code == TRUNC_DIV_EXPR
2996 || code == FLOOR_DIV_EXPR
2997 || code == CEIL_DIV_EXPR
2998 || code == EXACT_DIV_EXPR
2999 || code == ROUND_DIV_EXPR)
3001 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
3003 /* For division, if op1 has VR_RANGE but op0 does not, something
3004 can be deduced just from that range. Say [min, max] / [4, max]
3005 gives [min / 4, max / 4] range. */
3006 if (vr1.type == VR_RANGE
3007 && !symbolic_range_p (&vr1)
3008 && range_includes_zero_p (vr1.min, vr1.max) == 0)
3010 vr0.type = type = VR_RANGE;
3011 vr0.min = vrp_val_min (expr_type);
3012 vr0.max = vrp_val_max (expr_type);
3014 else
3016 set_value_range_to_varying (vr);
3017 return;
3021 /* For divisions, if flag_non_call_exceptions is true, we must
3022 not eliminate a division by zero. */
3023 if (cfun->can_throw_non_call_exceptions
3024 && (vr1.type != VR_RANGE
3025 || range_includes_zero_p (vr1.min, vr1.max) != 0))
3027 set_value_range_to_varying (vr);
3028 return;
3031 /* For divisions, if op0 is VR_RANGE, we can deduce a range
3032 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
3033 include 0. */
3034 if (vr0.type == VR_RANGE
3035 && (vr1.type != VR_RANGE
3036 || range_includes_zero_p (vr1.min, vr1.max) != 0))
3038 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
3039 int cmp;
3041 min = NULL_TREE;
3042 max = NULL_TREE;
3043 if (TYPE_UNSIGNED (expr_type)
3044 || value_range_nonnegative_p (&vr1))
3046 /* For unsigned division or when divisor is known
3047 to be non-negative, the range has to cover
3048 all numbers from 0 to max for positive max
3049 and all numbers from min to 0 for negative min. */
3050 cmp = compare_values (vr0.max, zero);
3051 if (cmp == -1)
3053 /* When vr0.max < 0, vr1.min != 0 and value
3054 ranges for dividend and divisor are available. */
3055 if (vr1.type == VR_RANGE
3056 && !symbolic_range_p (&vr0)
3057 && !symbolic_range_p (&vr1)
3058 && !compare_values (vr1.min, zero))
3059 max = int_const_binop (code, vr0.max, vr1.min);
3060 else
3061 max = zero;
3063 else if (cmp == 0 || cmp == 1)
3064 max = vr0.max;
3065 else
3066 type = VR_VARYING;
3067 cmp = compare_values (vr0.min, zero);
3068 if (cmp == 1)
3070 /* For unsigned division when value ranges for dividend
3071 and divisor are available. */
3072 if (vr1.type == VR_RANGE
3073 && !symbolic_range_p (&vr0)
3074 && !symbolic_range_p (&vr1))
3075 min = int_const_binop (code, vr0.min, vr1.max);
3076 else
3077 min = zero;
3079 else if (cmp == 0 || cmp == -1)
3080 min = vr0.min;
3081 else
3082 type = VR_VARYING;
3084 else
3086 /* Otherwise the range is -max .. max or min .. -min
3087 depending on which bound is bigger in absolute value,
3088 as the division can change the sign. */
3089 abs_extent_range (vr, vr0.min, vr0.max);
3090 return;
3092 if (type == VR_VARYING)
3094 set_value_range_to_varying (vr);
3095 return;
3098 else
3100 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3101 return;
3104 else if (code == TRUNC_MOD_EXPR)
3106 if (range_is_null (&vr1))
3108 set_value_range_to_undefined (vr);
3109 return;
3111 /* ABS (A % B) < ABS (B) and either
3112 0 <= A % B <= A or A <= A % B <= 0. */
3113 type = VR_RANGE;
3114 signop sgn = TYPE_SIGN (expr_type);
3115 unsigned int prec = TYPE_PRECISION (expr_type);
3116 wide_int wmin, wmax, tmp;
3117 wide_int zero = wi::zero (prec);
3118 wide_int one = wi::one (prec);
3119 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3121 wmax = wi::sub (vr1.max, one);
3122 if (sgn == SIGNED)
3124 tmp = wi::sub (wi::minus_one (prec), vr1.min);
3125 wmax = wi::smax (wmax, tmp);
3128 else
3130 wmax = wi::max_value (prec, sgn);
3131 /* X % INT_MIN may be INT_MAX. */
3132 if (sgn == UNSIGNED)
3133 wmax = wmax - one;
3136 if (sgn == UNSIGNED)
3137 wmin = zero;
3138 else
3140 wmin = -wmax;
3141 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3143 tmp = vr0.min;
3144 if (wi::gts_p (tmp, zero))
3145 tmp = zero;
3146 wmin = wi::smax (wmin, tmp);
3150 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3152 tmp = vr0.max;
3153 if (sgn == SIGNED && wi::neg_p (tmp))
3154 tmp = zero;
3155 wmax = wi::min (wmax, tmp, sgn);
3158 min = wide_int_to_tree (expr_type, wmin);
3159 max = wide_int_to_tree (expr_type, wmax);
3161 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3163 bool int_cst_range0, int_cst_range1;
3164 wide_int may_be_nonzero0, may_be_nonzero1;
3165 wide_int must_be_nonzero0, must_be_nonzero1;
3167 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3168 &may_be_nonzero0,
3169 &must_be_nonzero0);
3170 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3171 &may_be_nonzero1,
3172 &must_be_nonzero1);
3174 type = VR_RANGE;
3175 if (code == BIT_AND_EXPR)
3177 min = wide_int_to_tree (expr_type,
3178 must_be_nonzero0 & must_be_nonzero1);
3179 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3180 /* If both input ranges contain only negative values we can
3181 truncate the result range maximum to the minimum of the
3182 input range maxima. */
3183 if (int_cst_range0 && int_cst_range1
3184 && tree_int_cst_sgn (vr0.max) < 0
3185 && tree_int_cst_sgn (vr1.max) < 0)
3187 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3188 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3190 /* If either input range contains only non-negative values
3191 we can truncate the result range maximum to the respective
3192 maximum of the input range. */
3193 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3194 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3195 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3196 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3197 max = wide_int_to_tree (expr_type, wmax);
3199 else if (code == BIT_IOR_EXPR)
3201 max = wide_int_to_tree (expr_type,
3202 may_be_nonzero0 | may_be_nonzero1);
3203 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3204 /* If the input ranges contain only positive values we can
3205 truncate the minimum of the result range to the maximum
3206 of the input range minima. */
3207 if (int_cst_range0 && int_cst_range1
3208 && tree_int_cst_sgn (vr0.min) >= 0
3209 && tree_int_cst_sgn (vr1.min) >= 0)
3211 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3212 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3214 /* If either input range contains only negative values
3215 we can truncate the minimum of the result range to the
3216 respective minimum range. */
3217 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3218 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3219 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3220 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3221 min = wide_int_to_tree (expr_type, wmin);
3223 else if (code == BIT_XOR_EXPR)
3225 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3226 | ~(may_be_nonzero0 | may_be_nonzero1));
3227 wide_int result_one_bits
3228 = (must_be_nonzero0.and_not (may_be_nonzero1)
3229 | must_be_nonzero1.and_not (may_be_nonzero0));
3230 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3231 min = wide_int_to_tree (expr_type, result_one_bits);
3232 /* If the range has all positive or all negative values the
3233 result is better than VARYING. */
3234 if (tree_int_cst_sgn (min) < 0
3235 || tree_int_cst_sgn (max) >= 0)
3237 else
3238 max = min = NULL_TREE;
3241 else
3242 gcc_unreachable ();
3244 /* If either MIN or MAX overflowed, then set the resulting range to
3245 VARYING. But we do accept an overflow infinity representation. */
3246 if (min == NULL_TREE
3247 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3248 || max == NULL_TREE
3249 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3251 set_value_range_to_varying (vr);
3252 return;
3255 /* We punt if:
3256 1) [-INF, +INF]
3257 2) [-INF, +-INF(OVF)]
3258 3) [+-INF(OVF), +INF]
3259 4) [+-INF(OVF), +-INF(OVF)]
3260 We learn nothing when we have INF and INF(OVF) on both sides.
3261 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3262 overflow. */
3263 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3264 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3266 set_value_range_to_varying (vr);
3267 return;
3270 cmp = compare_values (min, max);
3271 if (cmp == -2 || cmp == 1)
3273 /* If the new range has its limits swapped around (MIN > MAX),
3274 then the operation caused one of them to wrap around, mark
3275 the new range VARYING. */
3276 set_value_range_to_varying (vr);
3278 else
3279 set_value_range (vr, type, min, max, NULL);
3282 /* Extract range information from a binary expression OP0 CODE OP1 based on
3283 the ranges of each of its operands with resulting type EXPR_TYPE.
3284 The resulting range is stored in *VR. */
3286 static void
3287 extract_range_from_binary_expr (value_range_t *vr,
3288 enum tree_code code,
3289 tree expr_type, tree op0, tree op1)
3291 value_range_t vr0 = VR_INITIALIZER;
3292 value_range_t vr1 = VR_INITIALIZER;
3294 /* Get value ranges for each operand. For constant operands, create
3295 a new value range with the operand to simplify processing. */
3296 if (TREE_CODE (op0) == SSA_NAME)
3297 vr0 = *(get_value_range (op0));
3298 else if (is_gimple_min_invariant (op0))
3299 set_value_range_to_value (&vr0, op0, NULL);
3300 else
3301 set_value_range_to_varying (&vr0);
3303 if (TREE_CODE (op1) == SSA_NAME)
3304 vr1 = *(get_value_range (op1));
3305 else if (is_gimple_min_invariant (op1))
3306 set_value_range_to_value (&vr1, op1, NULL);
3307 else
3308 set_value_range_to_varying (&vr1);
3310 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3312 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3313 and based on the other operand, for example if it was deduced from a
3314 symbolic comparison. When a bound of the range of the first operand
3315 is invariant, we set the corresponding bound of the new range to INF
3316 in order to avoid recursing on the range of the second operand. */
3317 if (vr->type == VR_VARYING
3318 && (code == PLUS_EXPR || code == MINUS_EXPR)
3319 && TREE_CODE (op1) == SSA_NAME
3320 && vr0.type == VR_RANGE
3321 && symbolic_range_based_on_p (&vr0, op1))
3323 const bool minus_p = (code == MINUS_EXPR);
3324 value_range_t n_vr1 = VR_INITIALIZER;
3326 /* Try with VR0 and [-INF, OP1]. */
3327 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3328 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3330 /* Try with VR0 and [OP1, +INF]. */
3331 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3332 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3334 /* Try with VR0 and [OP1, OP1]. */
3335 else
3336 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3338 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3341 if (vr->type == VR_VARYING
3342 && (code == PLUS_EXPR || code == MINUS_EXPR)
3343 && TREE_CODE (op0) == SSA_NAME
3344 && vr1.type == VR_RANGE
3345 && symbolic_range_based_on_p (&vr1, op0))
3347 const bool minus_p = (code == MINUS_EXPR);
3348 value_range_t n_vr0 = VR_INITIALIZER;
3350 /* Try with [-INF, OP0] and VR1. */
3351 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3352 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3354 /* Try with [OP0, +INF] and VR1. */
3355 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3356 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3358 /* Try with [OP0, OP0] and VR1. */
3359 else
3360 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3362 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3366 /* Extract range information from a unary operation CODE based on
3367 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3368 The resulting range is stored in *VR. */
3370 static void
3371 extract_range_from_unary_expr_1 (value_range_t *vr,
3372 enum tree_code code, tree type,
3373 value_range_t *vr0_, tree op0_type)
3375 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3377 /* VRP only operates on integral and pointer types. */
3378 if (!(INTEGRAL_TYPE_P (op0_type)
3379 || POINTER_TYPE_P (op0_type))
3380 || !(INTEGRAL_TYPE_P (type)
3381 || POINTER_TYPE_P (type)))
3383 set_value_range_to_varying (vr);
3384 return;
3387 /* If VR0 is UNDEFINED, so is the result. */
3388 if (vr0.type == VR_UNDEFINED)
3390 set_value_range_to_undefined (vr);
3391 return;
3394 /* Handle operations that we express in terms of others. */
3395 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3397 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3398 copy_value_range (vr, &vr0);
3399 return;
3401 else if (code == NEGATE_EXPR)
3403 /* -X is simply 0 - X, so re-use existing code that also handles
3404 anti-ranges fine. */
3405 value_range_t zero = VR_INITIALIZER;
3406 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3407 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3408 return;
3410 else if (code == BIT_NOT_EXPR)
3412 /* ~X is simply -1 - X, so re-use existing code that also handles
3413 anti-ranges fine. */
3414 value_range_t minusone = VR_INITIALIZER;
3415 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3416 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3417 type, &minusone, &vr0);
3418 return;
3421 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3422 and express op ~[] as (op []') U (op []''). */
3423 if (vr0.type == VR_ANTI_RANGE
3424 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3426 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3427 if (vrtem1.type != VR_UNDEFINED)
3429 value_range_t vrres = VR_INITIALIZER;
3430 extract_range_from_unary_expr_1 (&vrres, code, type,
3431 &vrtem1, op0_type);
3432 vrp_meet (vr, &vrres);
3434 return;
3437 if (CONVERT_EXPR_CODE_P (code))
3439 tree inner_type = op0_type;
3440 tree outer_type = type;
3442 /* If the expression evaluates to a pointer, we are only interested in
3443 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3444 if (POINTER_TYPE_P (type))
3446 if (range_is_nonnull (&vr0))
3447 set_value_range_to_nonnull (vr, type);
3448 else if (range_is_null (&vr0))
3449 set_value_range_to_null (vr, type);
3450 else
3451 set_value_range_to_varying (vr);
3452 return;
3455 /* If VR0 is varying and we increase the type precision, assume
3456 a full range for the following transformation. */
3457 if (vr0.type == VR_VARYING
3458 && INTEGRAL_TYPE_P (inner_type)
3459 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3461 vr0.type = VR_RANGE;
3462 vr0.min = TYPE_MIN_VALUE (inner_type);
3463 vr0.max = TYPE_MAX_VALUE (inner_type);
3466 /* If VR0 is a constant range or anti-range and the conversion is
3467 not truncating we can convert the min and max values and
3468 canonicalize the resulting range. Otherwise we can do the
3469 conversion if the size of the range is less than what the
3470 precision of the target type can represent and the range is
3471 not an anti-range. */
3472 if ((vr0.type == VR_RANGE
3473 || vr0.type == VR_ANTI_RANGE)
3474 && TREE_CODE (vr0.min) == INTEGER_CST
3475 && TREE_CODE (vr0.max) == INTEGER_CST
3476 && (!is_overflow_infinity (vr0.min)
3477 || (vr0.type == VR_RANGE
3478 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3479 && needs_overflow_infinity (outer_type)
3480 && supports_overflow_infinity (outer_type)))
3481 && (!is_overflow_infinity (vr0.max)
3482 || (vr0.type == VR_RANGE
3483 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3484 && needs_overflow_infinity (outer_type)
3485 && supports_overflow_infinity (outer_type)))
3486 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3487 || (vr0.type == VR_RANGE
3488 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3489 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3490 size_int (TYPE_PRECISION (outer_type)))))))
3492 tree new_min, new_max;
3493 if (is_overflow_infinity (vr0.min))
3494 new_min = negative_overflow_infinity (outer_type);
3495 else
3496 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3497 0, false);
3498 if (is_overflow_infinity (vr0.max))
3499 new_max = positive_overflow_infinity (outer_type);
3500 else
3501 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3502 0, false);
3503 set_and_canonicalize_value_range (vr, vr0.type,
3504 new_min, new_max, NULL);
3505 return;
3508 set_value_range_to_varying (vr);
3509 return;
3511 else if (code == ABS_EXPR)
3513 tree min, max;
3514 int cmp;
3516 /* Pass through vr0 in the easy cases. */
3517 if (TYPE_UNSIGNED (type)
3518 || value_range_nonnegative_p (&vr0))
3520 copy_value_range (vr, &vr0);
3521 return;
3524 /* For the remaining varying or symbolic ranges we can't do anything
3525 useful. */
3526 if (vr0.type == VR_VARYING
3527 || symbolic_range_p (&vr0))
3529 set_value_range_to_varying (vr);
3530 return;
3533 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3534 useful range. */
3535 if (!TYPE_OVERFLOW_UNDEFINED (type)
3536 && ((vr0.type == VR_RANGE
3537 && vrp_val_is_min (vr0.min))
3538 || (vr0.type == VR_ANTI_RANGE
3539 && !vrp_val_is_min (vr0.min))))
3541 set_value_range_to_varying (vr);
3542 return;
3545 /* ABS_EXPR may flip the range around, if the original range
3546 included negative values. */
3547 if (is_overflow_infinity (vr0.min))
3548 min = positive_overflow_infinity (type);
3549 else if (!vrp_val_is_min (vr0.min))
3550 min = fold_unary_to_constant (code, type, vr0.min);
3551 else if (!needs_overflow_infinity (type))
3552 min = TYPE_MAX_VALUE (type);
3553 else if (supports_overflow_infinity (type))
3554 min = positive_overflow_infinity (type);
3555 else
3557 set_value_range_to_varying (vr);
3558 return;
3561 if (is_overflow_infinity (vr0.max))
3562 max = positive_overflow_infinity (type);
3563 else if (!vrp_val_is_min (vr0.max))
3564 max = fold_unary_to_constant (code, type, vr0.max);
3565 else if (!needs_overflow_infinity (type))
3566 max = TYPE_MAX_VALUE (type);
3567 else if (supports_overflow_infinity (type)
3568 /* We shouldn't generate [+INF, +INF] as set_value_range
3569 doesn't like this and ICEs. */
3570 && !is_positive_overflow_infinity (min))
3571 max = positive_overflow_infinity (type);
3572 else
3574 set_value_range_to_varying (vr);
3575 return;
3578 cmp = compare_values (min, max);
3580 /* If a VR_ANTI_RANGEs contains zero, then we have
3581 ~[-INF, min(MIN, MAX)]. */
3582 if (vr0.type == VR_ANTI_RANGE)
3584 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3586 /* Take the lower of the two values. */
3587 if (cmp != 1)
3588 max = min;
3590 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3591 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3592 flag_wrapv is set and the original anti-range doesn't include
3593 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3594 if (TYPE_OVERFLOW_WRAPS (type))
3596 tree type_min_value = TYPE_MIN_VALUE (type);
3598 min = (vr0.min != type_min_value
3599 ? int_const_binop (PLUS_EXPR, type_min_value,
3600 build_int_cst (TREE_TYPE (type_min_value), 1))
3601 : type_min_value);
3603 else
3605 if (overflow_infinity_range_p (&vr0))
3606 min = negative_overflow_infinity (type);
3607 else
3608 min = TYPE_MIN_VALUE (type);
3611 else
3613 /* All else has failed, so create the range [0, INF], even for
3614 flag_wrapv since TYPE_MIN_VALUE is in the original
3615 anti-range. */
3616 vr0.type = VR_RANGE;
3617 min = build_int_cst (type, 0);
3618 if (needs_overflow_infinity (type))
3620 if (supports_overflow_infinity (type))
3621 max = positive_overflow_infinity (type);
3622 else
3624 set_value_range_to_varying (vr);
3625 return;
3628 else
3629 max = TYPE_MAX_VALUE (type);
3633 /* If the range contains zero then we know that the minimum value in the
3634 range will be zero. */
3635 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3637 if (cmp == 1)
3638 max = min;
3639 min = build_int_cst (type, 0);
3641 else
3643 /* If the range was reversed, swap MIN and MAX. */
3644 if (cmp == 1)
3645 std::swap (min, max);
3648 cmp = compare_values (min, max);
3649 if (cmp == -2 || cmp == 1)
3651 /* If the new range has its limits swapped around (MIN > MAX),
3652 then the operation caused one of them to wrap around, mark
3653 the new range VARYING. */
3654 set_value_range_to_varying (vr);
3656 else
3657 set_value_range (vr, vr0.type, min, max, NULL);
3658 return;
3661 /* For unhandled operations fall back to varying. */
3662 set_value_range_to_varying (vr);
3663 return;
3667 /* Extract range information from a unary expression CODE OP0 based on
3668 the range of its operand with resulting type TYPE.
3669 The resulting range is stored in *VR. */
3671 static void
3672 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3673 tree type, tree op0)
3675 value_range_t vr0 = VR_INITIALIZER;
3677 /* Get value ranges for the operand. For constant operands, create
3678 a new value range with the operand to simplify processing. */
3679 if (TREE_CODE (op0) == SSA_NAME)
3680 vr0 = *(get_value_range (op0));
3681 else if (is_gimple_min_invariant (op0))
3682 set_value_range_to_value (&vr0, op0, NULL);
3683 else
3684 set_value_range_to_varying (&vr0);
3686 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3690 /* Extract range information from a conditional expression STMT based on
3691 the ranges of each of its operands and the expression code. */
3693 static void
3694 extract_range_from_cond_expr (value_range_t *vr, gassign *stmt)
3696 tree op0, op1;
3697 value_range_t vr0 = VR_INITIALIZER;
3698 value_range_t vr1 = VR_INITIALIZER;
3700 /* Get value ranges for each operand. For constant operands, create
3701 a new value range with the operand to simplify processing. */
3702 op0 = gimple_assign_rhs2 (stmt);
3703 if (TREE_CODE (op0) == SSA_NAME)
3704 vr0 = *(get_value_range (op0));
3705 else if (is_gimple_min_invariant (op0))
3706 set_value_range_to_value (&vr0, op0, NULL);
3707 else
3708 set_value_range_to_varying (&vr0);
3710 op1 = gimple_assign_rhs3 (stmt);
3711 if (TREE_CODE (op1) == SSA_NAME)
3712 vr1 = *(get_value_range (op1));
3713 else if (is_gimple_min_invariant (op1))
3714 set_value_range_to_value (&vr1, op1, NULL);
3715 else
3716 set_value_range_to_varying (&vr1);
3718 /* The resulting value range is the union of the operand ranges */
3719 copy_value_range (vr, &vr0);
3720 vrp_meet (vr, &vr1);
3724 /* Extract range information from a comparison expression EXPR based
3725 on the range of its operand and the expression code. */
3727 static void
3728 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3729 tree type, tree op0, tree op1)
3731 bool sop = false;
3732 tree val;
3734 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3735 NULL);
3737 /* A disadvantage of using a special infinity as an overflow
3738 representation is that we lose the ability to record overflow
3739 when we don't have an infinity. So we have to ignore a result
3740 which relies on overflow. */
3742 if (val && !is_overflow_infinity (val) && !sop)
3744 /* Since this expression was found on the RHS of an assignment,
3745 its type may be different from _Bool. Convert VAL to EXPR's
3746 type. */
3747 val = fold_convert (type, val);
3748 if (is_gimple_min_invariant (val))
3749 set_value_range_to_value (vr, val, vr->equiv);
3750 else
3751 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3753 else
3754 /* The result of a comparison is always true or false. */
3755 set_value_range_to_truthvalue (vr, type);
3758 /* Helper function for simplify_internal_call_using_ranges and
3759 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3760 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3761 always overflow. Set *OVF to true if it is known to always
3762 overflow. */
3764 static bool
3765 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3766 tree op0, tree op1, bool *ovf)
3768 value_range_t vr0 = VR_INITIALIZER;
3769 value_range_t vr1 = VR_INITIALIZER;
3770 if (TREE_CODE (op0) == SSA_NAME)
3771 vr0 = *get_value_range (op0);
3772 else if (TREE_CODE (op0) == INTEGER_CST)
3773 set_value_range_to_value (&vr0, op0, NULL);
3774 else
3775 set_value_range_to_varying (&vr0);
3777 if (TREE_CODE (op1) == SSA_NAME)
3778 vr1 = *get_value_range (op1);
3779 else if (TREE_CODE (op1) == INTEGER_CST)
3780 set_value_range_to_value (&vr1, op1, NULL);
3781 else
3782 set_value_range_to_varying (&vr1);
3784 if (!range_int_cst_p (&vr0)
3785 || TREE_OVERFLOW (vr0.min)
3786 || TREE_OVERFLOW (vr0.max))
3788 vr0.min = vrp_val_min (TREE_TYPE (op0));
3789 vr0.max = vrp_val_max (TREE_TYPE (op0));
3791 if (!range_int_cst_p (&vr1)
3792 || TREE_OVERFLOW (vr1.min)
3793 || TREE_OVERFLOW (vr1.max))
3795 vr1.min = vrp_val_min (TREE_TYPE (op1));
3796 vr1.max = vrp_val_max (TREE_TYPE (op1));
3798 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3799 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3800 if (arith_overflowed_p (subcode, type, vr0.max,
3801 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3802 return false;
3803 if (subcode == MULT_EXPR)
3805 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3806 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3807 return false;
3809 if (*ovf)
3811 /* So far we found that there is an overflow on the boundaries.
3812 That doesn't prove that there is an overflow even for all values
3813 in between the boundaries. For that compute widest_int range
3814 of the result and see if it doesn't overlap the range of
3815 type. */
3816 widest_int wmin, wmax;
3817 widest_int w[4];
3818 int i;
3819 w[0] = wi::to_widest (vr0.min);
3820 w[1] = wi::to_widest (vr0.max);
3821 w[2] = wi::to_widest (vr1.min);
3822 w[3] = wi::to_widest (vr1.max);
3823 for (i = 0; i < 4; i++)
3825 widest_int wt;
3826 switch (subcode)
3828 case PLUS_EXPR:
3829 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3830 break;
3831 case MINUS_EXPR:
3832 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3833 break;
3834 case MULT_EXPR:
3835 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3836 break;
3837 default:
3838 gcc_unreachable ();
3840 if (i == 0)
3842 wmin = wt;
3843 wmax = wt;
3845 else
3847 wmin = wi::smin (wmin, wt);
3848 wmax = wi::smax (wmax, wt);
3851 /* The result of op0 CODE op1 is known to be in range
3852 [wmin, wmax]. */
3853 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3854 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3855 /* If all values in [wmin, wmax] are smaller than
3856 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3857 the arithmetic operation will always overflow. */
3858 if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax))
3859 return true;
3860 return false;
3862 return true;
3865 /* Try to derive a nonnegative or nonzero range out of STMT relying
3866 primarily on generic routines in fold in conjunction with range data.
3867 Store the result in *VR */
3869 static void
3870 extract_range_basic (value_range_t *vr, gimple *stmt)
3872 bool sop = false;
3873 tree type = gimple_expr_type (stmt);
3875 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3877 tree fndecl = gimple_call_fndecl (stmt), arg;
3878 int mini, maxi, zerov = 0, prec;
3880 switch (DECL_FUNCTION_CODE (fndecl))
3882 case BUILT_IN_CONSTANT_P:
3883 /* If the call is __builtin_constant_p and the argument is a
3884 function parameter resolve it to false. This avoids bogus
3885 array bound warnings.
3886 ??? We could do this as early as inlining is finished. */
3887 arg = gimple_call_arg (stmt, 0);
3888 if (TREE_CODE (arg) == SSA_NAME
3889 && SSA_NAME_IS_DEFAULT_DEF (arg)
3890 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3892 set_value_range_to_null (vr, type);
3893 return;
3895 break;
3896 /* Both __builtin_ffs* and __builtin_popcount return
3897 [0, prec]. */
3898 CASE_INT_FN (BUILT_IN_FFS):
3899 CASE_INT_FN (BUILT_IN_POPCOUNT):
3900 arg = gimple_call_arg (stmt, 0);
3901 prec = TYPE_PRECISION (TREE_TYPE (arg));
3902 mini = 0;
3903 maxi = prec;
3904 if (TREE_CODE (arg) == SSA_NAME)
3906 value_range_t *vr0 = get_value_range (arg);
3907 /* If arg is non-zero, then ffs or popcount
3908 are non-zero. */
3909 if (((vr0->type == VR_RANGE
3910 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3911 || (vr0->type == VR_ANTI_RANGE
3912 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3913 && !is_overflow_infinity (vr0->min)
3914 && !is_overflow_infinity (vr0->max))
3915 mini = 1;
3916 /* If some high bits are known to be zero,
3917 we can decrease the maximum. */
3918 if (vr0->type == VR_RANGE
3919 && TREE_CODE (vr0->max) == INTEGER_CST
3920 && !operand_less_p (vr0->min,
3921 build_zero_cst (TREE_TYPE (vr0->min)))
3922 && !is_overflow_infinity (vr0->max))
3923 maxi = tree_floor_log2 (vr0->max) + 1;
3925 goto bitop_builtin;
3926 /* __builtin_parity* returns [0, 1]. */
3927 CASE_INT_FN (BUILT_IN_PARITY):
3928 mini = 0;
3929 maxi = 1;
3930 goto bitop_builtin;
3931 /* __builtin_c[lt]z* return [0, prec-1], except for
3932 when the argument is 0, but that is undefined behavior.
3933 On many targets where the CLZ RTL or optab value is defined
3934 for 0 the value is prec, so include that in the range
3935 by default. */
3936 CASE_INT_FN (BUILT_IN_CLZ):
3937 arg = gimple_call_arg (stmt, 0);
3938 prec = TYPE_PRECISION (TREE_TYPE (arg));
3939 mini = 0;
3940 maxi = prec;
3941 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3942 != CODE_FOR_nothing
3943 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3944 zerov)
3945 /* Handle only the single common value. */
3946 && zerov != prec)
3947 /* Magic value to give up, unless vr0 proves
3948 arg is non-zero. */
3949 mini = -2;
3950 if (TREE_CODE (arg) == SSA_NAME)
3952 value_range_t *vr0 = get_value_range (arg);
3953 /* From clz of VR_RANGE minimum we can compute
3954 result maximum. */
3955 if (vr0->type == VR_RANGE
3956 && TREE_CODE (vr0->min) == INTEGER_CST
3957 && !is_overflow_infinity (vr0->min))
3959 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3960 if (maxi != prec)
3961 mini = 0;
3963 else if (vr0->type == VR_ANTI_RANGE
3964 && integer_zerop (vr0->min)
3965 && !is_overflow_infinity (vr0->min))
3967 maxi = prec - 1;
3968 mini = 0;
3970 if (mini == -2)
3971 break;
3972 /* From clz of VR_RANGE maximum we can compute
3973 result minimum. */
3974 if (vr0->type == VR_RANGE
3975 && TREE_CODE (vr0->max) == INTEGER_CST
3976 && !is_overflow_infinity (vr0->max))
3978 mini = prec - 1 - tree_floor_log2 (vr0->max);
3979 if (mini == prec)
3980 break;
3983 if (mini == -2)
3984 break;
3985 goto bitop_builtin;
3986 /* __builtin_ctz* return [0, prec-1], except for
3987 when the argument is 0, but that is undefined behavior.
3988 If there is a ctz optab for this mode and
3989 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3990 otherwise just assume 0 won't be seen. */
3991 CASE_INT_FN (BUILT_IN_CTZ):
3992 arg = gimple_call_arg (stmt, 0);
3993 prec = TYPE_PRECISION (TREE_TYPE (arg));
3994 mini = 0;
3995 maxi = prec - 1;
3996 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3997 != CODE_FOR_nothing
3998 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3999 zerov))
4001 /* Handle only the two common values. */
4002 if (zerov == -1)
4003 mini = -1;
4004 else if (zerov == prec)
4005 maxi = prec;
4006 else
4007 /* Magic value to give up, unless vr0 proves
4008 arg is non-zero. */
4009 mini = -2;
4011 if (TREE_CODE (arg) == SSA_NAME)
4013 value_range_t *vr0 = get_value_range (arg);
4014 /* If arg is non-zero, then use [0, prec - 1]. */
4015 if (((vr0->type == VR_RANGE
4016 && integer_nonzerop (vr0->min))
4017 || (vr0->type == VR_ANTI_RANGE
4018 && integer_zerop (vr0->min)))
4019 && !is_overflow_infinity (vr0->min))
4021 mini = 0;
4022 maxi = prec - 1;
4024 /* If some high bits are known to be zero,
4025 we can decrease the result maximum. */
4026 if (vr0->type == VR_RANGE
4027 && TREE_CODE (vr0->max) == INTEGER_CST
4028 && !is_overflow_infinity (vr0->max))
4030 maxi = tree_floor_log2 (vr0->max);
4031 /* For vr0 [0, 0] give up. */
4032 if (maxi == -1)
4033 break;
4036 if (mini == -2)
4037 break;
4038 goto bitop_builtin;
4039 /* __builtin_clrsb* returns [0, prec-1]. */
4040 CASE_INT_FN (BUILT_IN_CLRSB):
4041 arg = gimple_call_arg (stmt, 0);
4042 prec = TYPE_PRECISION (TREE_TYPE (arg));
4043 mini = 0;
4044 maxi = prec - 1;
4045 goto bitop_builtin;
4046 bitop_builtin:
4047 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
4048 build_int_cst (type, maxi), NULL);
4049 return;
4050 default:
4051 break;
4054 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4056 enum tree_code subcode = ERROR_MARK;
4057 switch (gimple_call_internal_fn (stmt))
4059 case IFN_UBSAN_CHECK_ADD:
4060 subcode = PLUS_EXPR;
4061 break;
4062 case IFN_UBSAN_CHECK_SUB:
4063 subcode = MINUS_EXPR;
4064 break;
4065 case IFN_UBSAN_CHECK_MUL:
4066 subcode = MULT_EXPR;
4067 break;
4068 default:
4069 break;
4071 if (subcode != ERROR_MARK)
4073 bool saved_flag_wrapv = flag_wrapv;
4074 /* Pretend the arithmetics is wrapping. If there is
4075 any overflow, we'll complain, but will actually do
4076 wrapping operation. */
4077 flag_wrapv = 1;
4078 extract_range_from_binary_expr (vr, subcode, type,
4079 gimple_call_arg (stmt, 0),
4080 gimple_call_arg (stmt, 1));
4081 flag_wrapv = saved_flag_wrapv;
4083 /* If for both arguments vrp_valueize returned non-NULL,
4084 this should have been already folded and if not, it
4085 wasn't folded because of overflow. Avoid removing the
4086 UBSAN_CHECK_* calls in that case. */
4087 if (vr->type == VR_RANGE
4088 && (vr->min == vr->max
4089 || operand_equal_p (vr->min, vr->max, 0)))
4090 set_value_range_to_varying (vr);
4091 return;
4094 /* Handle extraction of the two results (result of arithmetics and
4095 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4096 internal function. */
4097 else if (is_gimple_assign (stmt)
4098 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4099 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4100 && INTEGRAL_TYPE_P (type))
4102 enum tree_code code = gimple_assign_rhs_code (stmt);
4103 tree op = gimple_assign_rhs1 (stmt);
4104 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4106 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
4107 if (is_gimple_call (g) && gimple_call_internal_p (g))
4109 enum tree_code subcode = ERROR_MARK;
4110 switch (gimple_call_internal_fn (g))
4112 case IFN_ADD_OVERFLOW:
4113 subcode = PLUS_EXPR;
4114 break;
4115 case IFN_SUB_OVERFLOW:
4116 subcode = MINUS_EXPR;
4117 break;
4118 case IFN_MUL_OVERFLOW:
4119 subcode = MULT_EXPR;
4120 break;
4121 default:
4122 break;
4124 if (subcode != ERROR_MARK)
4126 tree op0 = gimple_call_arg (g, 0);
4127 tree op1 = gimple_call_arg (g, 1);
4128 if (code == IMAGPART_EXPR)
4130 bool ovf = false;
4131 if (check_for_binary_op_overflow (subcode, type,
4132 op0, op1, &ovf))
4133 set_value_range_to_value (vr,
4134 build_int_cst (type, ovf),
4135 NULL);
4136 else
4137 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4138 build_int_cst (type, 1), NULL);
4140 else if (types_compatible_p (type, TREE_TYPE (op0))
4141 && types_compatible_p (type, TREE_TYPE (op1)))
4143 bool saved_flag_wrapv = flag_wrapv;
4144 /* Pretend the arithmetics is wrapping. If there is
4145 any overflow, IMAGPART_EXPR will be set. */
4146 flag_wrapv = 1;
4147 extract_range_from_binary_expr (vr, subcode, type,
4148 op0, op1);
4149 flag_wrapv = saved_flag_wrapv;
4151 else
4153 value_range_t vr0 = VR_INITIALIZER;
4154 value_range_t vr1 = VR_INITIALIZER;
4155 bool saved_flag_wrapv = flag_wrapv;
4156 /* Pretend the arithmetics is wrapping. If there is
4157 any overflow, IMAGPART_EXPR will be set. */
4158 flag_wrapv = 1;
4159 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4160 type, op0);
4161 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4162 type, op1);
4163 extract_range_from_binary_expr_1 (vr, subcode, type,
4164 &vr0, &vr1);
4165 flag_wrapv = saved_flag_wrapv;
4167 return;
4172 if (INTEGRAL_TYPE_P (type)
4173 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4174 set_value_range_to_nonnegative (vr, type,
4175 sop || stmt_overflow_infinity (stmt));
4176 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4177 && !sop)
4178 set_value_range_to_nonnull (vr, type);
4179 else
4180 set_value_range_to_varying (vr);
4184 /* Try to compute a useful range out of assignment STMT and store it
4185 in *VR. */
4187 static void
4188 extract_range_from_assignment (value_range_t *vr, gassign *stmt)
4190 enum tree_code code = gimple_assign_rhs_code (stmt);
4192 if (code == ASSERT_EXPR)
4193 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4194 else if (code == SSA_NAME)
4195 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4196 else if (TREE_CODE_CLASS (code) == tcc_binary)
4197 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4198 gimple_expr_type (stmt),
4199 gimple_assign_rhs1 (stmt),
4200 gimple_assign_rhs2 (stmt));
4201 else if (TREE_CODE_CLASS (code) == tcc_unary)
4202 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4203 gimple_expr_type (stmt),
4204 gimple_assign_rhs1 (stmt));
4205 else if (code == COND_EXPR)
4206 extract_range_from_cond_expr (vr, stmt);
4207 else if (TREE_CODE_CLASS (code) == tcc_comparison)
4208 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4209 gimple_expr_type (stmt),
4210 gimple_assign_rhs1 (stmt),
4211 gimple_assign_rhs2 (stmt));
4212 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4213 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4214 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4215 else
4216 set_value_range_to_varying (vr);
4218 if (vr->type == VR_VARYING)
4219 extract_range_basic (vr, stmt);
4222 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4223 would be profitable to adjust VR using scalar evolution information
4224 for VAR. If so, update VR with the new limits. */
4226 static void
4227 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
4228 gimple *stmt, tree var)
4230 tree init, step, chrec, tmin, tmax, min, max, type, tem;
4231 enum ev_direction dir;
4233 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4234 better opportunities than a regular range, but I'm not sure. */
4235 if (vr->type == VR_ANTI_RANGE)
4236 return;
4238 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4240 /* Like in PR19590, scev can return a constant function. */
4241 if (is_gimple_min_invariant (chrec))
4243 set_value_range_to_value (vr, chrec, vr->equiv);
4244 return;
4247 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4248 return;
4250 init = initial_condition_in_loop_num (chrec, loop->num);
4251 tem = op_with_constant_singleton_value_range (init);
4252 if (tem)
4253 init = tem;
4254 step = evolution_part_in_loop_num (chrec, loop->num);
4255 tem = op_with_constant_singleton_value_range (step);
4256 if (tem)
4257 step = tem;
4259 /* If STEP is symbolic, we can't know whether INIT will be the
4260 minimum or maximum value in the range. Also, unless INIT is
4261 a simple expression, compare_values and possibly other functions
4262 in tree-vrp won't be able to handle it. */
4263 if (step == NULL_TREE
4264 || !is_gimple_min_invariant (step)
4265 || !valid_value_p (init))
4266 return;
4268 dir = scev_direction (chrec);
4269 if (/* Do not adjust ranges if we do not know whether the iv increases
4270 or decreases, ... */
4271 dir == EV_DIR_UNKNOWN
4272 /* ... or if it may wrap. */
4273 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4274 true))
4275 return;
4277 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4278 negative_overflow_infinity and positive_overflow_infinity,
4279 because we have concluded that the loop probably does not
4280 wrap. */
4282 type = TREE_TYPE (var);
4283 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4284 tmin = lower_bound_in_type (type, type);
4285 else
4286 tmin = TYPE_MIN_VALUE (type);
4287 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4288 tmax = upper_bound_in_type (type, type);
4289 else
4290 tmax = TYPE_MAX_VALUE (type);
4292 /* Try to use estimated number of iterations for the loop to constrain the
4293 final value in the evolution. */
4294 if (TREE_CODE (step) == INTEGER_CST
4295 && is_gimple_val (init)
4296 && (TREE_CODE (init) != SSA_NAME
4297 || get_value_range (init)->type == VR_RANGE))
4299 widest_int nit;
4301 /* We are only entering here for loop header PHI nodes, so using
4302 the number of latch executions is the correct thing to use. */
4303 if (max_loop_iterations (loop, &nit))
4305 value_range_t maxvr = VR_INITIALIZER;
4306 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4307 bool overflow;
4309 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4310 &overflow);
4311 /* If the multiplication overflowed we can't do a meaningful
4312 adjustment. Likewise if the result doesn't fit in the type
4313 of the induction variable. For a signed type we have to
4314 check whether the result has the expected signedness which
4315 is that of the step as number of iterations is unsigned. */
4316 if (!overflow
4317 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4318 && (sgn == UNSIGNED
4319 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4321 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4322 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4323 TREE_TYPE (init), init, tem);
4324 /* Likewise if the addition did. */
4325 if (maxvr.type == VR_RANGE)
4327 tmin = maxvr.min;
4328 tmax = maxvr.max;
4334 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4336 min = tmin;
4337 max = tmax;
4339 /* For VARYING or UNDEFINED ranges, just about anything we get
4340 from scalar evolutions should be better. */
4342 if (dir == EV_DIR_DECREASES)
4343 max = init;
4344 else
4345 min = init;
4347 else if (vr->type == VR_RANGE)
4349 min = vr->min;
4350 max = vr->max;
4352 if (dir == EV_DIR_DECREASES)
4354 /* INIT is the maximum value. If INIT is lower than VR->MAX
4355 but no smaller than VR->MIN, set VR->MAX to INIT. */
4356 if (compare_values (init, max) == -1)
4357 max = init;
4359 /* According to the loop information, the variable does not
4360 overflow. If we think it does, probably because of an
4361 overflow due to arithmetic on a different INF value,
4362 reset now. */
4363 if (is_negative_overflow_infinity (min)
4364 || compare_values (min, tmin) == -1)
4365 min = tmin;
4368 else
4370 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4371 if (compare_values (init, min) == 1)
4372 min = init;
4374 if (is_positive_overflow_infinity (max)
4375 || compare_values (tmax, max) == -1)
4376 max = tmax;
4379 else
4380 return;
4382 /* If we just created an invalid range with the minimum
4383 greater than the maximum, we fail conservatively.
4384 This should happen only in unreachable
4385 parts of code, or for invalid programs. */
4386 if (compare_values (min, max) == 1
4387 || (is_negative_overflow_infinity (min)
4388 && is_positive_overflow_infinity (max)))
4389 return;
4391 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4395 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4397 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4398 all the values in the ranges.
4400 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4402 - Return NULL_TREE if it is not always possible to determine the
4403 value of the comparison.
4405 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4406 overflow infinity was used in the test. */
4409 static tree
4410 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
4411 bool *strict_overflow_p)
4413 /* VARYING or UNDEFINED ranges cannot be compared. */
4414 if (vr0->type == VR_VARYING
4415 || vr0->type == VR_UNDEFINED
4416 || vr1->type == VR_VARYING
4417 || vr1->type == VR_UNDEFINED)
4418 return NULL_TREE;
4420 /* Anti-ranges need to be handled separately. */
4421 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4423 /* If both are anti-ranges, then we cannot compute any
4424 comparison. */
4425 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4426 return NULL_TREE;
4428 /* These comparisons are never statically computable. */
4429 if (comp == GT_EXPR
4430 || comp == GE_EXPR
4431 || comp == LT_EXPR
4432 || comp == LE_EXPR)
4433 return NULL_TREE;
4435 /* Equality can be computed only between a range and an
4436 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4437 if (vr0->type == VR_RANGE)
4439 /* To simplify processing, make VR0 the anti-range. */
4440 value_range_t *tmp = vr0;
4441 vr0 = vr1;
4442 vr1 = tmp;
4445 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4447 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4448 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4449 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4451 return NULL_TREE;
4454 if (!usable_range_p (vr0, strict_overflow_p)
4455 || !usable_range_p (vr1, strict_overflow_p))
4456 return NULL_TREE;
4458 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4459 operands around and change the comparison code. */
4460 if (comp == GT_EXPR || comp == GE_EXPR)
4462 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4463 std::swap (vr0, vr1);
4466 if (comp == EQ_EXPR)
4468 /* Equality may only be computed if both ranges represent
4469 exactly one value. */
4470 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4471 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4473 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4474 strict_overflow_p);
4475 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4476 strict_overflow_p);
4477 if (cmp_min == 0 && cmp_max == 0)
4478 return boolean_true_node;
4479 else if (cmp_min != -2 && cmp_max != -2)
4480 return boolean_false_node;
4482 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4483 else if (compare_values_warnv (vr0->min, vr1->max,
4484 strict_overflow_p) == 1
4485 || compare_values_warnv (vr1->min, vr0->max,
4486 strict_overflow_p) == 1)
4487 return boolean_false_node;
4489 return NULL_TREE;
4491 else if (comp == NE_EXPR)
4493 int cmp1, cmp2;
4495 /* If VR0 is completely to the left or completely to the right
4496 of VR1, they are always different. Notice that we need to
4497 make sure that both comparisons yield similar results to
4498 avoid comparing values that cannot be compared at
4499 compile-time. */
4500 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4501 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4502 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4503 return boolean_true_node;
4505 /* If VR0 and VR1 represent a single value and are identical,
4506 return false. */
4507 else if (compare_values_warnv (vr0->min, vr0->max,
4508 strict_overflow_p) == 0
4509 && compare_values_warnv (vr1->min, vr1->max,
4510 strict_overflow_p) == 0
4511 && compare_values_warnv (vr0->min, vr1->min,
4512 strict_overflow_p) == 0
4513 && compare_values_warnv (vr0->max, vr1->max,
4514 strict_overflow_p) == 0)
4515 return boolean_false_node;
4517 /* Otherwise, they may or may not be different. */
4518 else
4519 return NULL_TREE;
4521 else if (comp == LT_EXPR || comp == LE_EXPR)
4523 int tst;
4525 /* If VR0 is to the left of VR1, return true. */
4526 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4527 if ((comp == LT_EXPR && tst == -1)
4528 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4530 if (overflow_infinity_range_p (vr0)
4531 || overflow_infinity_range_p (vr1))
4532 *strict_overflow_p = true;
4533 return boolean_true_node;
4536 /* If VR0 is to the right of VR1, return false. */
4537 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4538 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4539 || (comp == LE_EXPR && tst == 1))
4541 if (overflow_infinity_range_p (vr0)
4542 || overflow_infinity_range_p (vr1))
4543 *strict_overflow_p = true;
4544 return boolean_false_node;
4547 /* Otherwise, we don't know. */
4548 return NULL_TREE;
4551 gcc_unreachable ();
4555 /* Given a value range VR, a value VAL and a comparison code COMP, return
4556 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4557 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4558 always returns false. Return NULL_TREE if it is not always
4559 possible to determine the value of the comparison. Also set
4560 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4561 infinity was used in the test. */
4563 static tree
4564 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4565 bool *strict_overflow_p)
4567 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4568 return NULL_TREE;
4570 /* Anti-ranges need to be handled separately. */
4571 if (vr->type == VR_ANTI_RANGE)
4573 /* For anti-ranges, the only predicates that we can compute at
4574 compile time are equality and inequality. */
4575 if (comp == GT_EXPR
4576 || comp == GE_EXPR
4577 || comp == LT_EXPR
4578 || comp == LE_EXPR)
4579 return NULL_TREE;
4581 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4582 if (value_inside_range (val, vr->min, vr->max) == 1)
4583 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4585 return NULL_TREE;
4588 if (!usable_range_p (vr, strict_overflow_p))
4589 return NULL_TREE;
4591 if (comp == EQ_EXPR)
4593 /* EQ_EXPR may only be computed if VR represents exactly
4594 one value. */
4595 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4597 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4598 if (cmp == 0)
4599 return boolean_true_node;
4600 else if (cmp == -1 || cmp == 1 || cmp == 2)
4601 return boolean_false_node;
4603 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4604 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4605 return boolean_false_node;
4607 return NULL_TREE;
4609 else if (comp == NE_EXPR)
4611 /* If VAL is not inside VR, then they are always different. */
4612 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4613 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4614 return boolean_true_node;
4616 /* If VR represents exactly one value equal to VAL, then return
4617 false. */
4618 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4619 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4620 return boolean_false_node;
4622 /* Otherwise, they may or may not be different. */
4623 return NULL_TREE;
4625 else if (comp == LT_EXPR || comp == LE_EXPR)
4627 int tst;
4629 /* If VR is to the left of VAL, return true. */
4630 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4631 if ((comp == LT_EXPR && tst == -1)
4632 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4634 if (overflow_infinity_range_p (vr))
4635 *strict_overflow_p = true;
4636 return boolean_true_node;
4639 /* If VR is to the right of VAL, return false. */
4640 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4641 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4642 || (comp == LE_EXPR && tst == 1))
4644 if (overflow_infinity_range_p (vr))
4645 *strict_overflow_p = true;
4646 return boolean_false_node;
4649 /* Otherwise, we don't know. */
4650 return NULL_TREE;
4652 else if (comp == GT_EXPR || comp == GE_EXPR)
4654 int tst;
4656 /* If VR is to the right of VAL, return true. */
4657 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4658 if ((comp == GT_EXPR && tst == 1)
4659 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4661 if (overflow_infinity_range_p (vr))
4662 *strict_overflow_p = true;
4663 return boolean_true_node;
4666 /* If VR is to the left of VAL, return false. */
4667 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4668 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4669 || (comp == GE_EXPR && tst == -1))
4671 if (overflow_infinity_range_p (vr))
4672 *strict_overflow_p = true;
4673 return boolean_false_node;
4676 /* Otherwise, we don't know. */
4677 return NULL_TREE;
4680 gcc_unreachable ();
4684 /* Debugging dumps. */
4686 void dump_value_range (FILE *, value_range_t *);
4687 void debug_value_range (value_range_t *);
4688 void dump_all_value_ranges (FILE *);
4689 void debug_all_value_ranges (void);
4690 void dump_vr_equiv (FILE *, bitmap);
4691 void debug_vr_equiv (bitmap);
4694 /* Dump value range VR to FILE. */
4696 void
4697 dump_value_range (FILE *file, value_range_t *vr)
4699 if (vr == NULL)
4700 fprintf (file, "[]");
4701 else if (vr->type == VR_UNDEFINED)
4702 fprintf (file, "UNDEFINED");
4703 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4705 tree type = TREE_TYPE (vr->min);
4707 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4709 if (is_negative_overflow_infinity (vr->min))
4710 fprintf (file, "-INF(OVF)");
4711 else if (INTEGRAL_TYPE_P (type)
4712 && !TYPE_UNSIGNED (type)
4713 && vrp_val_is_min (vr->min))
4714 fprintf (file, "-INF");
4715 else
4716 print_generic_expr (file, vr->min, 0);
4718 fprintf (file, ", ");
4720 if (is_positive_overflow_infinity (vr->max))
4721 fprintf (file, "+INF(OVF)");
4722 else if (INTEGRAL_TYPE_P (type)
4723 && vrp_val_is_max (vr->max))
4724 fprintf (file, "+INF");
4725 else
4726 print_generic_expr (file, vr->max, 0);
4728 fprintf (file, "]");
4730 if (vr->equiv)
4732 bitmap_iterator bi;
4733 unsigned i, c = 0;
4735 fprintf (file, " EQUIVALENCES: { ");
4737 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4739 print_generic_expr (file, ssa_name (i), 0);
4740 fprintf (file, " ");
4741 c++;
4744 fprintf (file, "} (%u elements)", c);
4747 else if (vr->type == VR_VARYING)
4748 fprintf (file, "VARYING");
4749 else
4750 fprintf (file, "INVALID RANGE");
4754 /* Dump value range VR to stderr. */
4756 DEBUG_FUNCTION void
4757 debug_value_range (value_range_t *vr)
4759 dump_value_range (stderr, vr);
4760 fprintf (stderr, "\n");
4764 /* Dump value ranges of all SSA_NAMEs to FILE. */
4766 void
4767 dump_all_value_ranges (FILE *file)
4769 size_t i;
4771 for (i = 0; i < num_vr_values; i++)
4773 if (vr_value[i])
4775 print_generic_expr (file, ssa_name (i), 0);
4776 fprintf (file, ": ");
4777 dump_value_range (file, vr_value[i]);
4778 fprintf (file, "\n");
4782 fprintf (file, "\n");
4786 /* Dump all value ranges to stderr. */
4788 DEBUG_FUNCTION void
4789 debug_all_value_ranges (void)
4791 dump_all_value_ranges (stderr);
4795 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4796 create a new SSA name N and return the assertion assignment
4797 'N = ASSERT_EXPR <V, V OP W>'. */
4799 static gimple *
4800 build_assert_expr_for (tree cond, tree v)
4802 tree a;
4803 gassign *assertion;
4805 gcc_assert (TREE_CODE (v) == SSA_NAME
4806 && COMPARISON_CLASS_P (cond));
4808 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4809 assertion = gimple_build_assign (NULL_TREE, a);
4811 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4812 operand of the ASSERT_EXPR. Create it so the new name and the old one
4813 are registered in the replacement table so that we can fix the SSA web
4814 after adding all the ASSERT_EXPRs. */
4815 create_new_def_for (v, assertion, NULL);
4817 return assertion;
4821 /* Return false if EXPR is a predicate expression involving floating
4822 point values. */
4824 static inline bool
4825 fp_predicate (gimple *stmt)
4827 GIMPLE_CHECK (stmt, GIMPLE_COND);
4829 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4832 /* If the range of values taken by OP can be inferred after STMT executes,
4833 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4834 describes the inferred range. Return true if a range could be
4835 inferred. */
4837 static bool
4838 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4840 *val_p = NULL_TREE;
4841 *comp_code_p = ERROR_MARK;
4843 /* Do not attempt to infer anything in names that flow through
4844 abnormal edges. */
4845 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4846 return false;
4848 /* Similarly, don't infer anything from statements that may throw
4849 exceptions. ??? Relax this requirement? */
4850 if (stmt_could_throw_p (stmt))
4851 return false;
4853 /* If STMT is the last statement of a basic block with no normal
4854 successors, there is no point inferring anything about any of its
4855 operands. We would not be able to find a proper insertion point
4856 for the assertion, anyway. */
4857 if (stmt_ends_bb_p (stmt))
4859 edge_iterator ei;
4860 edge e;
4862 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4863 if (!(e->flags & EDGE_ABNORMAL))
4864 break;
4865 if (e == NULL)
4866 return false;
4869 if (infer_nonnull_range (stmt, op))
4871 *val_p = build_int_cst (TREE_TYPE (op), 0);
4872 *comp_code_p = NE_EXPR;
4873 return true;
4876 return false;
4880 void dump_asserts_for (FILE *, tree);
4881 void debug_asserts_for (tree);
4882 void dump_all_asserts (FILE *);
4883 void debug_all_asserts (void);
4885 /* Dump all the registered assertions for NAME to FILE. */
4887 void
4888 dump_asserts_for (FILE *file, tree name)
4890 assert_locus *loc;
4892 fprintf (file, "Assertions to be inserted for ");
4893 print_generic_expr (file, name, 0);
4894 fprintf (file, "\n");
4896 loc = asserts_for[SSA_NAME_VERSION (name)];
4897 while (loc)
4899 fprintf (file, "\t");
4900 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4901 fprintf (file, "\n\tBB #%d", loc->bb->index);
4902 if (loc->e)
4904 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4905 loc->e->dest->index);
4906 dump_edge_info (file, loc->e, dump_flags, 0);
4908 fprintf (file, "\n\tPREDICATE: ");
4909 print_generic_expr (file, name, 0);
4910 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4911 print_generic_expr (file, loc->val, 0);
4912 fprintf (file, "\n\n");
4913 loc = loc->next;
4916 fprintf (file, "\n");
4920 /* Dump all the registered assertions for NAME to stderr. */
4922 DEBUG_FUNCTION void
4923 debug_asserts_for (tree name)
4925 dump_asserts_for (stderr, name);
4929 /* Dump all the registered assertions for all the names to FILE. */
4931 void
4932 dump_all_asserts (FILE *file)
4934 unsigned i;
4935 bitmap_iterator bi;
4937 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4938 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4939 dump_asserts_for (file, ssa_name (i));
4940 fprintf (file, "\n");
4944 /* Dump all the registered assertions for all the names to stderr. */
4946 DEBUG_FUNCTION void
4947 debug_all_asserts (void)
4949 dump_all_asserts (stderr);
4953 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4954 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4955 E->DEST, then register this location as a possible insertion point
4956 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4958 BB, E and SI provide the exact insertion point for the new
4959 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4960 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4961 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4962 must not be NULL. */
4964 static void
4965 register_new_assert_for (tree name, tree expr,
4966 enum tree_code comp_code,
4967 tree val,
4968 basic_block bb,
4969 edge e,
4970 gimple_stmt_iterator si)
4972 assert_locus *n, *loc, *last_loc;
4973 basic_block dest_bb;
4975 gcc_checking_assert (bb == NULL || e == NULL);
4977 if (e == NULL)
4978 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4979 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4981 /* Never build an assert comparing against an integer constant with
4982 TREE_OVERFLOW set. This confuses our undefined overflow warning
4983 machinery. */
4984 if (TREE_OVERFLOW_P (val))
4985 val = drop_tree_overflow (val);
4987 /* The new assertion A will be inserted at BB or E. We need to
4988 determine if the new location is dominated by a previously
4989 registered location for A. If we are doing an edge insertion,
4990 assume that A will be inserted at E->DEST. Note that this is not
4991 necessarily true.
4993 If E is a critical edge, it will be split. But even if E is
4994 split, the new block will dominate the same set of blocks that
4995 E->DEST dominates.
4997 The reverse, however, is not true, blocks dominated by E->DEST
4998 will not be dominated by the new block created to split E. So,
4999 if the insertion location is on a critical edge, we will not use
5000 the new location to move another assertion previously registered
5001 at a block dominated by E->DEST. */
5002 dest_bb = (bb) ? bb : e->dest;
5004 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
5005 VAL at a block dominating DEST_BB, then we don't need to insert a new
5006 one. Similarly, if the same assertion already exists at a block
5007 dominated by DEST_BB and the new location is not on a critical
5008 edge, then update the existing location for the assertion (i.e.,
5009 move the assertion up in the dominance tree).
5011 Note, this is implemented as a simple linked list because there
5012 should not be more than a handful of assertions registered per
5013 name. If this becomes a performance problem, a table hashed by
5014 COMP_CODE and VAL could be implemented. */
5015 loc = asserts_for[SSA_NAME_VERSION (name)];
5016 last_loc = loc;
5017 while (loc)
5019 if (loc->comp_code == comp_code
5020 && (loc->val == val
5021 || operand_equal_p (loc->val, val, 0))
5022 && (loc->expr == expr
5023 || operand_equal_p (loc->expr, expr, 0)))
5025 /* If E is not a critical edge and DEST_BB
5026 dominates the existing location for the assertion, move
5027 the assertion up in the dominance tree by updating its
5028 location information. */
5029 if ((e == NULL || !EDGE_CRITICAL_P (e))
5030 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
5032 loc->bb = dest_bb;
5033 loc->e = e;
5034 loc->si = si;
5035 return;
5039 /* Update the last node of the list and move to the next one. */
5040 last_loc = loc;
5041 loc = loc->next;
5044 /* If we didn't find an assertion already registered for
5045 NAME COMP_CODE VAL, add a new one at the end of the list of
5046 assertions associated with NAME. */
5047 n = XNEW (struct assert_locus);
5048 n->bb = dest_bb;
5049 n->e = e;
5050 n->si = si;
5051 n->comp_code = comp_code;
5052 n->val = val;
5053 n->expr = expr;
5054 n->next = NULL;
5056 if (last_loc)
5057 last_loc->next = n;
5058 else
5059 asserts_for[SSA_NAME_VERSION (name)] = n;
5061 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5064 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5065 Extract a suitable test code and value and store them into *CODE_P and
5066 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5068 If no extraction was possible, return FALSE, otherwise return TRUE.
5070 If INVERT is true, then we invert the result stored into *CODE_P. */
5072 static bool
5073 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5074 tree cond_op0, tree cond_op1,
5075 bool invert, enum tree_code *code_p,
5076 tree *val_p)
5078 enum tree_code comp_code;
5079 tree val;
5081 /* Otherwise, we have a comparison of the form NAME COMP VAL
5082 or VAL COMP NAME. */
5083 if (name == cond_op1)
5085 /* If the predicate is of the form VAL COMP NAME, flip
5086 COMP around because we need to register NAME as the
5087 first operand in the predicate. */
5088 comp_code = swap_tree_comparison (cond_code);
5089 val = cond_op0;
5091 else
5093 /* The comparison is of the form NAME COMP VAL, so the
5094 comparison code remains unchanged. */
5095 comp_code = cond_code;
5096 val = cond_op1;
5099 /* Invert the comparison code as necessary. */
5100 if (invert)
5101 comp_code = invert_tree_comparison (comp_code, 0);
5103 /* VRP does not handle float types. */
5104 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
5105 return false;
5107 /* Do not register always-false predicates.
5108 FIXME: this works around a limitation in fold() when dealing with
5109 enumerations. Given 'enum { N1, N2 } x;', fold will not
5110 fold 'if (x > N2)' to 'if (0)'. */
5111 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5112 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5114 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5115 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5117 if (comp_code == GT_EXPR
5118 && (!max
5119 || compare_values (val, max) == 0))
5120 return false;
5122 if (comp_code == LT_EXPR
5123 && (!min
5124 || compare_values (val, min) == 0))
5125 return false;
5127 *code_p = comp_code;
5128 *val_p = val;
5129 return true;
5132 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5133 (otherwise return VAL). VAL and MASK must be zero-extended for
5134 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5135 (to transform signed values into unsigned) and at the end xor
5136 SGNBIT back. */
5138 static wide_int
5139 masked_increment (const wide_int &val_in, const wide_int &mask,
5140 const wide_int &sgnbit, unsigned int prec)
5142 wide_int bit = wi::one (prec), res;
5143 unsigned int i;
5145 wide_int val = val_in ^ sgnbit;
5146 for (i = 0; i < prec; i++, bit += bit)
5148 res = mask;
5149 if ((res & bit) == 0)
5150 continue;
5151 res = bit - 1;
5152 res = (val + bit).and_not (res);
5153 res &= mask;
5154 if (wi::gtu_p (res, val))
5155 return res ^ sgnbit;
5157 return val ^ sgnbit;
5160 /* Try to register an edge assertion for SSA name NAME on edge E for
5161 the condition COND contributing to the conditional jump pointed to by BSI.
5162 Invert the condition COND if INVERT is true. */
5164 static void
5165 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5166 enum tree_code cond_code,
5167 tree cond_op0, tree cond_op1, bool invert)
5169 tree val;
5170 enum tree_code comp_code;
5172 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5173 cond_op0,
5174 cond_op1,
5175 invert, &comp_code, &val))
5176 return;
5178 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5179 reachable from E. */
5180 if (live_on_edge (e, name)
5181 && !has_single_use (name))
5182 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5184 /* In the case of NAME <= CST and NAME being defined as
5185 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5186 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5187 This catches range and anti-range tests. */
5188 if ((comp_code == LE_EXPR
5189 || comp_code == GT_EXPR)
5190 && TREE_CODE (val) == INTEGER_CST
5191 && TYPE_UNSIGNED (TREE_TYPE (val)))
5193 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5194 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5196 /* Extract CST2 from the (optional) addition. */
5197 if (is_gimple_assign (def_stmt)
5198 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5200 name2 = gimple_assign_rhs1 (def_stmt);
5201 cst2 = gimple_assign_rhs2 (def_stmt);
5202 if (TREE_CODE (name2) == SSA_NAME
5203 && TREE_CODE (cst2) == INTEGER_CST)
5204 def_stmt = SSA_NAME_DEF_STMT (name2);
5207 /* Extract NAME2 from the (optional) sign-changing cast. */
5208 if (gimple_assign_cast_p (def_stmt))
5210 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5211 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5212 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5213 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5214 name3 = gimple_assign_rhs1 (def_stmt);
5217 /* If name3 is used later, create an ASSERT_EXPR for it. */
5218 if (name3 != NULL_TREE
5219 && TREE_CODE (name3) == SSA_NAME
5220 && (cst2 == NULL_TREE
5221 || TREE_CODE (cst2) == INTEGER_CST)
5222 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5223 && live_on_edge (e, name3)
5224 && !has_single_use (name3))
5226 tree tmp;
5228 /* Build an expression for the range test. */
5229 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5230 if (cst2 != NULL_TREE)
5231 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5233 if (dump_file)
5235 fprintf (dump_file, "Adding assert for ");
5236 print_generic_expr (dump_file, name3, 0);
5237 fprintf (dump_file, " from ");
5238 print_generic_expr (dump_file, tmp, 0);
5239 fprintf (dump_file, "\n");
5242 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5245 /* If name2 is used later, create an ASSERT_EXPR for it. */
5246 if (name2 != NULL_TREE
5247 && TREE_CODE (name2) == SSA_NAME
5248 && TREE_CODE (cst2) == INTEGER_CST
5249 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5250 && live_on_edge (e, name2)
5251 && !has_single_use (name2))
5253 tree tmp;
5255 /* Build an expression for the range test. */
5256 tmp = name2;
5257 if (TREE_TYPE (name) != TREE_TYPE (name2))
5258 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5259 if (cst2 != NULL_TREE)
5260 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5262 if (dump_file)
5264 fprintf (dump_file, "Adding assert for ");
5265 print_generic_expr (dump_file, name2, 0);
5266 fprintf (dump_file, " from ");
5267 print_generic_expr (dump_file, tmp, 0);
5268 fprintf (dump_file, "\n");
5271 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5275 /* In the case of post-in/decrement tests like if (i++) ... and uses
5276 of the in/decremented value on the edge the extra name we want to
5277 assert for is not on the def chain of the name compared. Instead
5278 it is in the set of use stmts.
5279 Similar cases happen for conversions that were simplified through
5280 fold_{sign_changed,widened}_comparison. */
5281 if ((comp_code == NE_EXPR
5282 || comp_code == EQ_EXPR)
5283 && TREE_CODE (val) == INTEGER_CST)
5285 imm_use_iterator ui;
5286 gimple *use_stmt;
5287 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5289 if (!is_gimple_assign (use_stmt))
5290 continue;
5292 /* Cut off to use-stmts that are dominating the predecessor. */
5293 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5294 continue;
5296 tree name2 = gimple_assign_lhs (use_stmt);
5297 if (TREE_CODE (name2) != SSA_NAME
5298 || !live_on_edge (e, name2))
5299 continue;
5301 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5302 tree cst;
5303 if (code == PLUS_EXPR
5304 || code == MINUS_EXPR)
5306 cst = gimple_assign_rhs2 (use_stmt);
5307 if (TREE_CODE (cst) != INTEGER_CST)
5308 continue;
5309 cst = int_const_binop (code, val, cst);
5311 else if (CONVERT_EXPR_CODE_P (code))
5313 /* For truncating conversions we cannot record
5314 an inequality. */
5315 if (comp_code == NE_EXPR
5316 && (TYPE_PRECISION (TREE_TYPE (name2))
5317 < TYPE_PRECISION (TREE_TYPE (name))))
5318 continue;
5319 cst = fold_convert (TREE_TYPE (name2), val);
5321 else
5322 continue;
5324 if (TREE_OVERFLOW_P (cst))
5325 cst = drop_tree_overflow (cst);
5326 register_new_assert_for (name2, name2, comp_code, cst,
5327 NULL, e, bsi);
5331 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5332 && TREE_CODE (val) == INTEGER_CST)
5334 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5335 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5336 tree val2 = NULL_TREE;
5337 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5338 wide_int mask = wi::zero (prec);
5339 unsigned int nprec = prec;
5340 enum tree_code rhs_code = ERROR_MARK;
5342 if (is_gimple_assign (def_stmt))
5343 rhs_code = gimple_assign_rhs_code (def_stmt);
5345 /* Add asserts for NAME cmp CST and NAME being defined
5346 as NAME = (int) NAME2. */
5347 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5348 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5349 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5350 && gimple_assign_cast_p (def_stmt))
5352 name2 = gimple_assign_rhs1 (def_stmt);
5353 if (CONVERT_EXPR_CODE_P (rhs_code)
5354 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5355 && TYPE_UNSIGNED (TREE_TYPE (name2))
5356 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5357 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5358 || !tree_int_cst_equal (val,
5359 TYPE_MIN_VALUE (TREE_TYPE (val))))
5360 && live_on_edge (e, name2)
5361 && !has_single_use (name2))
5363 tree tmp, cst;
5364 enum tree_code new_comp_code = comp_code;
5366 cst = fold_convert (TREE_TYPE (name2),
5367 TYPE_MIN_VALUE (TREE_TYPE (val)));
5368 /* Build an expression for the range test. */
5369 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5370 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5371 fold_convert (TREE_TYPE (name2), val));
5372 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5374 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5375 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5376 build_int_cst (TREE_TYPE (name2), 1));
5379 if (dump_file)
5381 fprintf (dump_file, "Adding assert for ");
5382 print_generic_expr (dump_file, name2, 0);
5383 fprintf (dump_file, " from ");
5384 print_generic_expr (dump_file, tmp, 0);
5385 fprintf (dump_file, "\n");
5388 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5389 e, bsi);
5393 /* Add asserts for NAME cmp CST and NAME being defined as
5394 NAME = NAME2 >> CST2.
5396 Extract CST2 from the right shift. */
5397 if (rhs_code == RSHIFT_EXPR)
5399 name2 = gimple_assign_rhs1 (def_stmt);
5400 cst2 = gimple_assign_rhs2 (def_stmt);
5401 if (TREE_CODE (name2) == SSA_NAME
5402 && tree_fits_uhwi_p (cst2)
5403 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5404 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5405 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5406 && live_on_edge (e, name2)
5407 && !has_single_use (name2))
5409 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5410 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5413 if (val2 != NULL_TREE
5414 && TREE_CODE (val2) == INTEGER_CST
5415 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5416 TREE_TYPE (val),
5417 val2, cst2), val))
5419 enum tree_code new_comp_code = comp_code;
5420 tree tmp, new_val;
5422 tmp = name2;
5423 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5425 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5427 tree type = build_nonstandard_integer_type (prec, 1);
5428 tmp = build1 (NOP_EXPR, type, name2);
5429 val2 = fold_convert (type, val2);
5431 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5432 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5433 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5435 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5437 wide_int minval
5438 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5439 new_val = val2;
5440 if (minval == new_val)
5441 new_val = NULL_TREE;
5443 else
5445 wide_int maxval
5446 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5447 mask |= val2;
5448 if (mask == maxval)
5449 new_val = NULL_TREE;
5450 else
5451 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5454 if (new_val)
5456 if (dump_file)
5458 fprintf (dump_file, "Adding assert for ");
5459 print_generic_expr (dump_file, name2, 0);
5460 fprintf (dump_file, " from ");
5461 print_generic_expr (dump_file, tmp, 0);
5462 fprintf (dump_file, "\n");
5465 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5466 NULL, e, bsi);
5470 /* Add asserts for NAME cmp CST and NAME being defined as
5471 NAME = NAME2 & CST2.
5473 Extract CST2 from the and.
5475 Also handle
5476 NAME = (unsigned) NAME2;
5477 casts where NAME's type is unsigned and has smaller precision
5478 than NAME2's type as if it was NAME = NAME2 & MASK. */
5479 names[0] = NULL_TREE;
5480 names[1] = NULL_TREE;
5481 cst2 = NULL_TREE;
5482 if (rhs_code == BIT_AND_EXPR
5483 || (CONVERT_EXPR_CODE_P (rhs_code)
5484 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5485 && TYPE_UNSIGNED (TREE_TYPE (val))
5486 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5487 > prec))
5489 name2 = gimple_assign_rhs1 (def_stmt);
5490 if (rhs_code == BIT_AND_EXPR)
5491 cst2 = gimple_assign_rhs2 (def_stmt);
5492 else
5494 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5495 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5497 if (TREE_CODE (name2) == SSA_NAME
5498 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5499 && TREE_CODE (cst2) == INTEGER_CST
5500 && !integer_zerop (cst2)
5501 && (nprec > 1
5502 || TYPE_UNSIGNED (TREE_TYPE (val))))
5504 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5505 if (gimple_assign_cast_p (def_stmt2))
5507 names[1] = gimple_assign_rhs1 (def_stmt2);
5508 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5509 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5510 || (TYPE_PRECISION (TREE_TYPE (name2))
5511 != TYPE_PRECISION (TREE_TYPE (names[1])))
5512 || !live_on_edge (e, names[1])
5513 || has_single_use (names[1]))
5514 names[1] = NULL_TREE;
5516 if (live_on_edge (e, name2)
5517 && !has_single_use (name2))
5518 names[0] = name2;
5521 if (names[0] || names[1])
5523 wide_int minv, maxv, valv, cst2v;
5524 wide_int tem, sgnbit;
5525 bool valid_p = false, valn, cst2n;
5526 enum tree_code ccode = comp_code;
5528 valv = wide_int::from (val, nprec, UNSIGNED);
5529 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5530 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5531 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5532 /* If CST2 doesn't have most significant bit set,
5533 but VAL is negative, we have comparison like
5534 if ((x & 0x123) > -4) (always true). Just give up. */
5535 if (!cst2n && valn)
5536 ccode = ERROR_MARK;
5537 if (cst2n)
5538 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5539 else
5540 sgnbit = wi::zero (nprec);
5541 minv = valv & cst2v;
5542 switch (ccode)
5544 case EQ_EXPR:
5545 /* Minimum unsigned value for equality is VAL & CST2
5546 (should be equal to VAL, otherwise we probably should
5547 have folded the comparison into false) and
5548 maximum unsigned value is VAL | ~CST2. */
5549 maxv = valv | ~cst2v;
5550 valid_p = true;
5551 break;
5553 case NE_EXPR:
5554 tem = valv | ~cst2v;
5555 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5556 if (valv == 0)
5558 cst2n = false;
5559 sgnbit = wi::zero (nprec);
5560 goto gt_expr;
5562 /* If (VAL | ~CST2) is all ones, handle it as
5563 (X & CST2) < VAL. */
5564 if (tem == -1)
5566 cst2n = false;
5567 valn = false;
5568 sgnbit = wi::zero (nprec);
5569 goto lt_expr;
5571 if (!cst2n && wi::neg_p (cst2v))
5572 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5573 if (sgnbit != 0)
5575 if (valv == sgnbit)
5577 cst2n = true;
5578 valn = true;
5579 goto gt_expr;
5581 if (tem == wi::mask (nprec - 1, false, nprec))
5583 cst2n = true;
5584 goto lt_expr;
5586 if (!cst2n)
5587 sgnbit = wi::zero (nprec);
5589 break;
5591 case GE_EXPR:
5592 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5593 is VAL and maximum unsigned value is ~0. For signed
5594 comparison, if CST2 doesn't have most significant bit
5595 set, handle it similarly. If CST2 has MSB set,
5596 the minimum is the same, and maximum is ~0U/2. */
5597 if (minv != valv)
5599 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5600 VAL. */
5601 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5602 if (minv == valv)
5603 break;
5605 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5606 valid_p = true;
5607 break;
5609 case GT_EXPR:
5610 gt_expr:
5611 /* Find out smallest MINV where MINV > VAL
5612 && (MINV & CST2) == MINV, if any. If VAL is signed and
5613 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5614 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5615 if (minv == valv)
5616 break;
5617 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5618 valid_p = true;
5619 break;
5621 case LE_EXPR:
5622 /* Minimum unsigned value for <= is 0 and maximum
5623 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5624 Otherwise, find smallest VAL2 where VAL2 > VAL
5625 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5626 as maximum.
5627 For signed comparison, if CST2 doesn't have most
5628 significant bit set, handle it similarly. If CST2 has
5629 MSB set, the maximum is the same and minimum is INT_MIN. */
5630 if (minv == valv)
5631 maxv = valv;
5632 else
5634 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5635 if (maxv == valv)
5636 break;
5637 maxv -= 1;
5639 maxv |= ~cst2v;
5640 minv = sgnbit;
5641 valid_p = true;
5642 break;
5644 case LT_EXPR:
5645 lt_expr:
5646 /* Minimum unsigned value for < is 0 and maximum
5647 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5648 Otherwise, find smallest VAL2 where VAL2 > VAL
5649 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5650 as maximum.
5651 For signed comparison, if CST2 doesn't have most
5652 significant bit set, handle it similarly. If CST2 has
5653 MSB set, the maximum is the same and minimum is INT_MIN. */
5654 if (minv == valv)
5656 if (valv == sgnbit)
5657 break;
5658 maxv = valv;
5660 else
5662 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5663 if (maxv == valv)
5664 break;
5666 maxv -= 1;
5667 maxv |= ~cst2v;
5668 minv = sgnbit;
5669 valid_p = true;
5670 break;
5672 default:
5673 break;
5675 if (valid_p
5676 && (maxv - minv) != -1)
5678 tree tmp, new_val, type;
5679 int i;
5681 for (i = 0; i < 2; i++)
5682 if (names[i])
5684 wide_int maxv2 = maxv;
5685 tmp = names[i];
5686 type = TREE_TYPE (names[i]);
5687 if (!TYPE_UNSIGNED (type))
5689 type = build_nonstandard_integer_type (nprec, 1);
5690 tmp = build1 (NOP_EXPR, type, names[i]);
5692 if (minv != 0)
5694 tmp = build2 (PLUS_EXPR, type, tmp,
5695 wide_int_to_tree (type, -minv));
5696 maxv2 = maxv - minv;
5698 new_val = wide_int_to_tree (type, maxv2);
5700 if (dump_file)
5702 fprintf (dump_file, "Adding assert for ");
5703 print_generic_expr (dump_file, names[i], 0);
5704 fprintf (dump_file, " from ");
5705 print_generic_expr (dump_file, tmp, 0);
5706 fprintf (dump_file, "\n");
5709 register_new_assert_for (names[i], tmp, LE_EXPR,
5710 new_val, NULL, e, bsi);
5717 /* OP is an operand of a truth value expression which is known to have
5718 a particular value. Register any asserts for OP and for any
5719 operands in OP's defining statement.
5721 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5722 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5724 static void
5725 register_edge_assert_for_1 (tree op, enum tree_code code,
5726 edge e, gimple_stmt_iterator bsi)
5728 gimple *op_def;
5729 tree val;
5730 enum tree_code rhs_code;
5732 /* We only care about SSA_NAMEs. */
5733 if (TREE_CODE (op) != SSA_NAME)
5734 return;
5736 /* We know that OP will have a zero or nonzero value. If OP is used
5737 more than once go ahead and register an assert for OP. */
5738 if (live_on_edge (e, op)
5739 && !has_single_use (op))
5741 val = build_int_cst (TREE_TYPE (op), 0);
5742 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5745 /* Now look at how OP is set. If it's set from a comparison,
5746 a truth operation or some bit operations, then we may be able
5747 to register information about the operands of that assignment. */
5748 op_def = SSA_NAME_DEF_STMT (op);
5749 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5750 return;
5752 rhs_code = gimple_assign_rhs_code (op_def);
5754 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5756 bool invert = (code == EQ_EXPR ? true : false);
5757 tree op0 = gimple_assign_rhs1 (op_def);
5758 tree op1 = gimple_assign_rhs2 (op_def);
5760 if (TREE_CODE (op0) == SSA_NAME)
5761 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5762 if (TREE_CODE (op1) == SSA_NAME)
5763 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5765 else if ((code == NE_EXPR
5766 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5767 || (code == EQ_EXPR
5768 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5770 /* Recurse on each operand. */
5771 tree op0 = gimple_assign_rhs1 (op_def);
5772 tree op1 = gimple_assign_rhs2 (op_def);
5773 if (TREE_CODE (op0) == SSA_NAME
5774 && has_single_use (op0))
5775 register_edge_assert_for_1 (op0, code, e, bsi);
5776 if (TREE_CODE (op1) == SSA_NAME
5777 && has_single_use (op1))
5778 register_edge_assert_for_1 (op1, code, e, bsi);
5780 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5781 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5783 /* Recurse, flipping CODE. */
5784 code = invert_tree_comparison (code, false);
5785 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5787 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5789 /* Recurse through the copy. */
5790 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5792 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5794 /* Recurse through the type conversion, unless it is a narrowing
5795 conversion or conversion from non-integral type. */
5796 tree rhs = gimple_assign_rhs1 (op_def);
5797 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5798 && (TYPE_PRECISION (TREE_TYPE (rhs))
5799 <= TYPE_PRECISION (TREE_TYPE (op))))
5800 register_edge_assert_for_1 (rhs, code, e, bsi);
5804 /* Try to register an edge assertion for SSA name NAME on edge E for
5805 the condition COND contributing to the conditional jump pointed to by
5806 SI. */
5808 static void
5809 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5810 enum tree_code cond_code, tree cond_op0,
5811 tree cond_op1)
5813 tree val;
5814 enum tree_code comp_code;
5815 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5817 /* Do not attempt to infer anything in names that flow through
5818 abnormal edges. */
5819 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5820 return;
5822 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5823 cond_op0, cond_op1,
5824 is_else_edge,
5825 &comp_code, &val))
5826 return;
5828 /* Register ASSERT_EXPRs for name. */
5829 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5830 cond_op1, is_else_edge);
5833 /* If COND is effectively an equality test of an SSA_NAME against
5834 the value zero or one, then we may be able to assert values
5835 for SSA_NAMEs which flow into COND. */
5837 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5838 statement of NAME we can assert both operands of the BIT_AND_EXPR
5839 have nonzero value. */
5840 if (((comp_code == EQ_EXPR && integer_onep (val))
5841 || (comp_code == NE_EXPR && integer_zerop (val))))
5843 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5845 if (is_gimple_assign (def_stmt)
5846 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5848 tree op0 = gimple_assign_rhs1 (def_stmt);
5849 tree op1 = gimple_assign_rhs2 (def_stmt);
5850 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5851 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5855 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5856 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5857 have zero value. */
5858 if (((comp_code == EQ_EXPR && integer_zerop (val))
5859 || (comp_code == NE_EXPR && integer_onep (val))))
5861 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5863 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5864 necessarily zero value, or if type-precision is one. */
5865 if (is_gimple_assign (def_stmt)
5866 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5867 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5868 || comp_code == EQ_EXPR)))
5870 tree op0 = gimple_assign_rhs1 (def_stmt);
5871 tree op1 = gimple_assign_rhs2 (def_stmt);
5872 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5873 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5879 /* Determine whether the outgoing edges of BB should receive an
5880 ASSERT_EXPR for each of the operands of BB's LAST statement.
5881 The last statement of BB must be a COND_EXPR.
5883 If any of the sub-graphs rooted at BB have an interesting use of
5884 the predicate operands, an assert location node is added to the
5885 list of assertions for the corresponding operands. */
5887 static void
5888 find_conditional_asserts (basic_block bb, gcond *last)
5890 gimple_stmt_iterator bsi;
5891 tree op;
5892 edge_iterator ei;
5893 edge e;
5894 ssa_op_iter iter;
5896 bsi = gsi_for_stmt (last);
5898 /* Look for uses of the operands in each of the sub-graphs
5899 rooted at BB. We need to check each of the outgoing edges
5900 separately, so that we know what kind of ASSERT_EXPR to
5901 insert. */
5902 FOR_EACH_EDGE (e, ei, bb->succs)
5904 if (e->dest == bb)
5905 continue;
5907 /* Register the necessary assertions for each operand in the
5908 conditional predicate. */
5909 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5910 register_edge_assert_for (op, e, bsi,
5911 gimple_cond_code (last),
5912 gimple_cond_lhs (last),
5913 gimple_cond_rhs (last));
5917 struct case_info
5919 tree expr;
5920 basic_block bb;
5923 /* Compare two case labels sorting first by the destination bb index
5924 and then by the case value. */
5926 static int
5927 compare_case_labels (const void *p1, const void *p2)
5929 const struct case_info *ci1 = (const struct case_info *) p1;
5930 const struct case_info *ci2 = (const struct case_info *) p2;
5931 int idx1 = ci1->bb->index;
5932 int idx2 = ci2->bb->index;
5934 if (idx1 < idx2)
5935 return -1;
5936 else if (idx1 == idx2)
5938 /* Make sure the default label is first in a group. */
5939 if (!CASE_LOW (ci1->expr))
5940 return -1;
5941 else if (!CASE_LOW (ci2->expr))
5942 return 1;
5943 else
5944 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5945 CASE_LOW (ci2->expr));
5947 else
5948 return 1;
5951 /* Determine whether the outgoing edges of BB should receive an
5952 ASSERT_EXPR for each of the operands of BB's LAST statement.
5953 The last statement of BB must be a SWITCH_EXPR.
5955 If any of the sub-graphs rooted at BB have an interesting use of
5956 the predicate operands, an assert location node is added to the
5957 list of assertions for the corresponding operands. */
5959 static void
5960 find_switch_asserts (basic_block bb, gswitch *last)
5962 gimple_stmt_iterator bsi;
5963 tree op;
5964 edge e;
5965 struct case_info *ci;
5966 size_t n = gimple_switch_num_labels (last);
5967 #if GCC_VERSION >= 4000
5968 unsigned int idx;
5969 #else
5970 /* Work around GCC 3.4 bug (PR 37086). */
5971 volatile unsigned int idx;
5972 #endif
5974 bsi = gsi_for_stmt (last);
5975 op = gimple_switch_index (last);
5976 if (TREE_CODE (op) != SSA_NAME)
5977 return;
5979 /* Build a vector of case labels sorted by destination label. */
5980 ci = XNEWVEC (struct case_info, n);
5981 for (idx = 0; idx < n; ++idx)
5983 ci[idx].expr = gimple_switch_label (last, idx);
5984 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5986 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5988 for (idx = 0; idx < n; ++idx)
5990 tree min, max;
5991 tree cl = ci[idx].expr;
5992 basic_block cbb = ci[idx].bb;
5994 min = CASE_LOW (cl);
5995 max = CASE_HIGH (cl);
5997 /* If there are multiple case labels with the same destination
5998 we need to combine them to a single value range for the edge. */
5999 if (idx + 1 < n && cbb == ci[idx + 1].bb)
6001 /* Skip labels until the last of the group. */
6002 do {
6003 ++idx;
6004 } while (idx < n && cbb == ci[idx].bb);
6005 --idx;
6007 /* Pick up the maximum of the case label range. */
6008 if (CASE_HIGH (ci[idx].expr))
6009 max = CASE_HIGH (ci[idx].expr);
6010 else
6011 max = CASE_LOW (ci[idx].expr);
6014 /* Nothing to do if the range includes the default label until we
6015 can register anti-ranges. */
6016 if (min == NULL_TREE)
6017 continue;
6019 /* Find the edge to register the assert expr on. */
6020 e = find_edge (bb, cbb);
6022 /* Register the necessary assertions for the operand in the
6023 SWITCH_EXPR. */
6024 register_edge_assert_for (op, e, bsi,
6025 max ? GE_EXPR : EQ_EXPR,
6026 op, fold_convert (TREE_TYPE (op), min));
6027 if (max)
6028 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
6029 fold_convert (TREE_TYPE (op), max));
6032 XDELETEVEC (ci);
6036 /* Traverse all the statements in block BB looking for statements that
6037 may generate useful assertions for the SSA names in their operand.
6038 If a statement produces a useful assertion A for name N_i, then the
6039 list of assertions already generated for N_i is scanned to
6040 determine if A is actually needed.
6042 If N_i already had the assertion A at a location dominating the
6043 current location, then nothing needs to be done. Otherwise, the
6044 new location for A is recorded instead.
6046 1- For every statement S in BB, all the variables used by S are
6047 added to bitmap FOUND_IN_SUBGRAPH.
6049 2- If statement S uses an operand N in a way that exposes a known
6050 value range for N, then if N was not already generated by an
6051 ASSERT_EXPR, create a new assert location for N. For instance,
6052 if N is a pointer and the statement dereferences it, we can
6053 assume that N is not NULL.
6055 3- COND_EXPRs are a special case of #2. We can derive range
6056 information from the predicate but need to insert different
6057 ASSERT_EXPRs for each of the sub-graphs rooted at the
6058 conditional block. If the last statement of BB is a conditional
6059 expression of the form 'X op Y', then
6061 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6063 b) If the conditional is the only entry point to the sub-graph
6064 corresponding to the THEN_CLAUSE, recurse into it. On
6065 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6066 an ASSERT_EXPR is added for the corresponding variable.
6068 c) Repeat step (b) on the ELSE_CLAUSE.
6070 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6072 For instance,
6074 if (a == 9)
6075 b = a;
6076 else
6077 b = c + 1;
6079 In this case, an assertion on the THEN clause is useful to
6080 determine that 'a' is always 9 on that edge. However, an assertion
6081 on the ELSE clause would be unnecessary.
6083 4- If BB does not end in a conditional expression, then we recurse
6084 into BB's dominator children.
6086 At the end of the recursive traversal, every SSA name will have a
6087 list of locations where ASSERT_EXPRs should be added. When a new
6088 location for name N is found, it is registered by calling
6089 register_new_assert_for. That function keeps track of all the
6090 registered assertions to prevent adding unnecessary assertions.
6091 For instance, if a pointer P_4 is dereferenced more than once in a
6092 dominator tree, only the location dominating all the dereference of
6093 P_4 will receive an ASSERT_EXPR. */
6095 static void
6096 find_assert_locations_1 (basic_block bb, sbitmap live)
6098 gimple *last;
6100 last = last_stmt (bb);
6102 /* If BB's last statement is a conditional statement involving integer
6103 operands, determine if we need to add ASSERT_EXPRs. */
6104 if (last
6105 && gimple_code (last) == GIMPLE_COND
6106 && !fp_predicate (last)
6107 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6108 find_conditional_asserts (bb, as_a <gcond *> (last));
6110 /* If BB's last statement is a switch statement involving integer
6111 operands, determine if we need to add ASSERT_EXPRs. */
6112 if (last
6113 && gimple_code (last) == GIMPLE_SWITCH
6114 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6115 find_switch_asserts (bb, as_a <gswitch *> (last));
6117 /* Traverse all the statements in BB marking used names and looking
6118 for statements that may infer assertions for their used operands. */
6119 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6120 gsi_prev (&si))
6122 gimple *stmt;
6123 tree op;
6124 ssa_op_iter i;
6126 stmt = gsi_stmt (si);
6128 if (is_gimple_debug (stmt))
6129 continue;
6131 /* See if we can derive an assertion for any of STMT's operands. */
6132 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6134 tree value;
6135 enum tree_code comp_code;
6137 /* If op is not live beyond this stmt, do not bother to insert
6138 asserts for it. */
6139 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6140 continue;
6142 /* If OP is used in such a way that we can infer a value
6143 range for it, and we don't find a previous assertion for
6144 it, create a new assertion location node for OP. */
6145 if (infer_value_range (stmt, op, &comp_code, &value))
6147 /* If we are able to infer a nonzero value range for OP,
6148 then walk backwards through the use-def chain to see if OP
6149 was set via a typecast.
6151 If so, then we can also infer a nonzero value range
6152 for the operand of the NOP_EXPR. */
6153 if (comp_code == NE_EXPR && integer_zerop (value))
6155 tree t = op;
6156 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6158 while (is_gimple_assign (def_stmt)
6159 && CONVERT_EXPR_CODE_P
6160 (gimple_assign_rhs_code (def_stmt))
6161 && TREE_CODE
6162 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6163 && POINTER_TYPE_P
6164 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6166 t = gimple_assign_rhs1 (def_stmt);
6167 def_stmt = SSA_NAME_DEF_STMT (t);
6169 /* Note we want to register the assert for the
6170 operand of the NOP_EXPR after SI, not after the
6171 conversion. */
6172 if (! has_single_use (t))
6173 register_new_assert_for (t, t, comp_code, value,
6174 bb, NULL, si);
6178 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6182 /* Update live. */
6183 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6184 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6185 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6186 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6189 /* Traverse all PHI nodes in BB, updating live. */
6190 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6191 gsi_next (&si))
6193 use_operand_p arg_p;
6194 ssa_op_iter i;
6195 gphi *phi = si.phi ();
6196 tree res = gimple_phi_result (phi);
6198 if (virtual_operand_p (res))
6199 continue;
6201 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6203 tree arg = USE_FROM_PTR (arg_p);
6204 if (TREE_CODE (arg) == SSA_NAME)
6205 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6208 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6212 /* Do an RPO walk over the function computing SSA name liveness
6213 on-the-fly and deciding on assert expressions to insert. */
6215 static void
6216 find_assert_locations (void)
6218 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6219 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6220 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6221 int rpo_cnt, i;
6223 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6224 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6225 for (i = 0; i < rpo_cnt; ++i)
6226 bb_rpo[rpo[i]] = i;
6228 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6229 the order we compute liveness and insert asserts we otherwise
6230 fail to insert asserts into the loop latch. */
6231 loop_p loop;
6232 FOR_EACH_LOOP (loop, 0)
6234 i = loop->latch->index;
6235 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6236 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6237 !gsi_end_p (gsi); gsi_next (&gsi))
6239 gphi *phi = gsi.phi ();
6240 if (virtual_operand_p (gimple_phi_result (phi)))
6241 continue;
6242 tree arg = gimple_phi_arg_def (phi, j);
6243 if (TREE_CODE (arg) == SSA_NAME)
6245 if (live[i] == NULL)
6247 live[i] = sbitmap_alloc (num_ssa_names);
6248 bitmap_clear (live[i]);
6250 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6255 for (i = rpo_cnt - 1; i >= 0; --i)
6257 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6258 edge e;
6259 edge_iterator ei;
6261 if (!live[rpo[i]])
6263 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6264 bitmap_clear (live[rpo[i]]);
6267 /* Process BB and update the live information with uses in
6268 this block. */
6269 find_assert_locations_1 (bb, live[rpo[i]]);
6271 /* Merge liveness into the predecessor blocks and free it. */
6272 if (!bitmap_empty_p (live[rpo[i]]))
6274 int pred_rpo = i;
6275 FOR_EACH_EDGE (e, ei, bb->preds)
6277 int pred = e->src->index;
6278 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6279 continue;
6281 if (!live[pred])
6283 live[pred] = sbitmap_alloc (num_ssa_names);
6284 bitmap_clear (live[pred]);
6286 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6288 if (bb_rpo[pred] < pred_rpo)
6289 pred_rpo = bb_rpo[pred];
6292 /* Record the RPO number of the last visited block that needs
6293 live information from this block. */
6294 last_rpo[rpo[i]] = pred_rpo;
6296 else
6298 sbitmap_free (live[rpo[i]]);
6299 live[rpo[i]] = NULL;
6302 /* We can free all successors live bitmaps if all their
6303 predecessors have been visited already. */
6304 FOR_EACH_EDGE (e, ei, bb->succs)
6305 if (last_rpo[e->dest->index] == i
6306 && live[e->dest->index])
6308 sbitmap_free (live[e->dest->index]);
6309 live[e->dest->index] = NULL;
6313 XDELETEVEC (rpo);
6314 XDELETEVEC (bb_rpo);
6315 XDELETEVEC (last_rpo);
6316 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6317 if (live[i])
6318 sbitmap_free (live[i]);
6319 XDELETEVEC (live);
6322 /* Create an ASSERT_EXPR for NAME and insert it in the location
6323 indicated by LOC. Return true if we made any edge insertions. */
6325 static bool
6326 process_assert_insertions_for (tree name, assert_locus *loc)
6328 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6329 gimple *stmt;
6330 tree cond;
6331 gimple *assert_stmt;
6332 edge_iterator ei;
6333 edge e;
6335 /* If we have X <=> X do not insert an assert expr for that. */
6336 if (loc->expr == loc->val)
6337 return false;
6339 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6340 assert_stmt = build_assert_expr_for (cond, name);
6341 if (loc->e)
6343 /* We have been asked to insert the assertion on an edge. This
6344 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6345 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6346 || (gimple_code (gsi_stmt (loc->si))
6347 == GIMPLE_SWITCH));
6349 gsi_insert_on_edge (loc->e, assert_stmt);
6350 return true;
6353 /* Otherwise, we can insert right after LOC->SI iff the
6354 statement must not be the last statement in the block. */
6355 stmt = gsi_stmt (loc->si);
6356 if (!stmt_ends_bb_p (stmt))
6358 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6359 return false;
6362 /* If STMT must be the last statement in BB, we can only insert new
6363 assertions on the non-abnormal edge out of BB. Note that since
6364 STMT is not control flow, there may only be one non-abnormal edge
6365 out of BB. */
6366 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6367 if (!(e->flags & EDGE_ABNORMAL))
6369 gsi_insert_on_edge (e, assert_stmt);
6370 return true;
6373 gcc_unreachable ();
6377 /* Process all the insertions registered for every name N_i registered
6378 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6379 found in ASSERTS_FOR[i]. */
6381 static void
6382 process_assert_insertions (void)
6384 unsigned i;
6385 bitmap_iterator bi;
6386 bool update_edges_p = false;
6387 int num_asserts = 0;
6389 if (dump_file && (dump_flags & TDF_DETAILS))
6390 dump_all_asserts (dump_file);
6392 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6394 assert_locus *loc = asserts_for[i];
6395 gcc_assert (loc);
6397 while (loc)
6399 assert_locus *next = loc->next;
6400 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6401 free (loc);
6402 loc = next;
6403 num_asserts++;
6407 if (update_edges_p)
6408 gsi_commit_edge_inserts ();
6410 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6411 num_asserts);
6415 /* Traverse the flowgraph looking for conditional jumps to insert range
6416 expressions. These range expressions are meant to provide information
6417 to optimizations that need to reason in terms of value ranges. They
6418 will not be expanded into RTL. For instance, given:
6420 x = ...
6421 y = ...
6422 if (x < y)
6423 y = x - 2;
6424 else
6425 x = y + 3;
6427 this pass will transform the code into:
6429 x = ...
6430 y = ...
6431 if (x < y)
6433 x = ASSERT_EXPR <x, x < y>
6434 y = x - 2
6436 else
6438 y = ASSERT_EXPR <y, x >= y>
6439 x = y + 3
6442 The idea is that once copy and constant propagation have run, other
6443 optimizations will be able to determine what ranges of values can 'x'
6444 take in different paths of the code, simply by checking the reaching
6445 definition of 'x'. */
6447 static void
6448 insert_range_assertions (void)
6450 need_assert_for = BITMAP_ALLOC (NULL);
6451 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6453 calculate_dominance_info (CDI_DOMINATORS);
6455 find_assert_locations ();
6456 if (!bitmap_empty_p (need_assert_for))
6458 process_assert_insertions ();
6459 update_ssa (TODO_update_ssa_no_phi);
6462 if (dump_file && (dump_flags & TDF_DETAILS))
6464 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6465 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6468 free (asserts_for);
6469 BITMAP_FREE (need_assert_for);
6472 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6473 and "struct" hacks. If VRP can determine that the
6474 array subscript is a constant, check if it is outside valid
6475 range. If the array subscript is a RANGE, warn if it is
6476 non-overlapping with valid range.
6477 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6479 static void
6480 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6482 value_range_t* vr = NULL;
6483 tree low_sub, up_sub;
6484 tree low_bound, up_bound, up_bound_p1;
6485 tree base;
6487 if (TREE_NO_WARNING (ref))
6488 return;
6490 low_sub = up_sub = TREE_OPERAND (ref, 1);
6491 up_bound = array_ref_up_bound (ref);
6493 /* Can not check flexible arrays. */
6494 if (!up_bound
6495 || TREE_CODE (up_bound) != INTEGER_CST)
6496 return;
6498 /* Accesses to trailing arrays via pointers may access storage
6499 beyond the types array bounds. */
6500 base = get_base_address (ref);
6501 if ((warn_array_bounds < 2)
6502 && base && TREE_CODE (base) == MEM_REF)
6504 tree cref, next = NULL_TREE;
6506 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6507 return;
6509 cref = TREE_OPERAND (ref, 0);
6510 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6511 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6512 next && TREE_CODE (next) != FIELD_DECL;
6513 next = DECL_CHAIN (next))
6516 /* If this is the last field in a struct type or a field in a
6517 union type do not warn. */
6518 if (!next)
6519 return;
6522 low_bound = array_ref_low_bound (ref);
6523 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6524 build_int_cst (TREE_TYPE (up_bound), 1));
6526 /* Empty array. */
6527 if (tree_int_cst_equal (low_bound, up_bound_p1))
6529 warning_at (location, OPT_Warray_bounds,
6530 "array subscript is above array bounds");
6531 TREE_NO_WARNING (ref) = 1;
6534 if (TREE_CODE (low_sub) == SSA_NAME)
6536 vr = get_value_range (low_sub);
6537 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6539 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6540 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6544 if (vr && vr->type == VR_ANTI_RANGE)
6546 if (TREE_CODE (up_sub) == INTEGER_CST
6547 && (ignore_off_by_one
6548 ? tree_int_cst_lt (up_bound, up_sub)
6549 : tree_int_cst_le (up_bound, up_sub))
6550 && TREE_CODE (low_sub) == INTEGER_CST
6551 && tree_int_cst_le (low_sub, low_bound))
6553 warning_at (location, OPT_Warray_bounds,
6554 "array subscript is outside array bounds");
6555 TREE_NO_WARNING (ref) = 1;
6558 else if (TREE_CODE (up_sub) == INTEGER_CST
6559 && (ignore_off_by_one
6560 ? !tree_int_cst_le (up_sub, up_bound_p1)
6561 : !tree_int_cst_le (up_sub, up_bound)))
6563 if (dump_file && (dump_flags & TDF_DETAILS))
6565 fprintf (dump_file, "Array bound warning for ");
6566 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6567 fprintf (dump_file, "\n");
6569 warning_at (location, OPT_Warray_bounds,
6570 "array subscript is above array bounds");
6571 TREE_NO_WARNING (ref) = 1;
6573 else if (TREE_CODE (low_sub) == INTEGER_CST
6574 && tree_int_cst_lt (low_sub, low_bound))
6576 if (dump_file && (dump_flags & TDF_DETAILS))
6578 fprintf (dump_file, "Array bound warning for ");
6579 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6580 fprintf (dump_file, "\n");
6582 warning_at (location, OPT_Warray_bounds,
6583 "array subscript is below array bounds");
6584 TREE_NO_WARNING (ref) = 1;
6588 /* Searches if the expr T, located at LOCATION computes
6589 address of an ARRAY_REF, and call check_array_ref on it. */
6591 static void
6592 search_for_addr_array (tree t, location_t location)
6594 /* Check each ARRAY_REFs in the reference chain. */
6597 if (TREE_CODE (t) == ARRAY_REF)
6598 check_array_ref (location, t, true /*ignore_off_by_one*/);
6600 t = TREE_OPERAND (t, 0);
6602 while (handled_component_p (t));
6604 if (TREE_CODE (t) == MEM_REF
6605 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6606 && !TREE_NO_WARNING (t))
6608 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6609 tree low_bound, up_bound, el_sz;
6610 offset_int idx;
6611 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6612 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6613 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6614 return;
6616 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6617 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6618 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6619 if (!low_bound
6620 || TREE_CODE (low_bound) != INTEGER_CST
6621 || !up_bound
6622 || TREE_CODE (up_bound) != INTEGER_CST
6623 || !el_sz
6624 || TREE_CODE (el_sz) != INTEGER_CST)
6625 return;
6627 idx = mem_ref_offset (t);
6628 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6629 if (wi::lts_p (idx, 0))
6631 if (dump_file && (dump_flags & TDF_DETAILS))
6633 fprintf (dump_file, "Array bound warning for ");
6634 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6635 fprintf (dump_file, "\n");
6637 warning_at (location, OPT_Warray_bounds,
6638 "array subscript is below array bounds");
6639 TREE_NO_WARNING (t) = 1;
6641 else if (wi::gts_p (idx, (wi::to_offset (up_bound)
6642 - wi::to_offset (low_bound) + 1)))
6644 if (dump_file && (dump_flags & TDF_DETAILS))
6646 fprintf (dump_file, "Array bound warning for ");
6647 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6648 fprintf (dump_file, "\n");
6650 warning_at (location, OPT_Warray_bounds,
6651 "array subscript is above array bounds");
6652 TREE_NO_WARNING (t) = 1;
6657 /* walk_tree() callback that checks if *TP is
6658 an ARRAY_REF inside an ADDR_EXPR (in which an array
6659 subscript one outside the valid range is allowed). Call
6660 check_array_ref for each ARRAY_REF found. The location is
6661 passed in DATA. */
6663 static tree
6664 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6666 tree t = *tp;
6667 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6668 location_t location;
6670 if (EXPR_HAS_LOCATION (t))
6671 location = EXPR_LOCATION (t);
6672 else
6674 location_t *locp = (location_t *) wi->info;
6675 location = *locp;
6678 *walk_subtree = TRUE;
6680 if (TREE_CODE (t) == ARRAY_REF)
6681 check_array_ref (location, t, false /*ignore_off_by_one*/);
6683 else if (TREE_CODE (t) == ADDR_EXPR)
6685 search_for_addr_array (t, location);
6686 *walk_subtree = FALSE;
6689 return NULL_TREE;
6692 /* Walk over all statements of all reachable BBs and call check_array_bounds
6693 on them. */
6695 static void
6696 check_all_array_refs (void)
6698 basic_block bb;
6699 gimple_stmt_iterator si;
6701 FOR_EACH_BB_FN (bb, cfun)
6703 edge_iterator ei;
6704 edge e;
6705 bool executable = false;
6707 /* Skip blocks that were found to be unreachable. */
6708 FOR_EACH_EDGE (e, ei, bb->preds)
6709 executable |= !!(e->flags & EDGE_EXECUTABLE);
6710 if (!executable)
6711 continue;
6713 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6715 gimple *stmt = gsi_stmt (si);
6716 struct walk_stmt_info wi;
6717 if (!gimple_has_location (stmt)
6718 || is_gimple_debug (stmt))
6719 continue;
6721 memset (&wi, 0, sizeof (wi));
6722 wi.info = CONST_CAST (void *, (const void *)
6723 gimple_location_ptr (stmt));
6725 walk_gimple_op (gsi_stmt (si),
6726 check_array_bounds,
6727 &wi);
6732 /* Return true if all imm uses of VAR are either in STMT, or
6733 feed (optionally through a chain of single imm uses) GIMPLE_COND
6734 in basic block COND_BB. */
6736 static bool
6737 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
6739 use_operand_p use_p, use2_p;
6740 imm_use_iterator iter;
6742 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6743 if (USE_STMT (use_p) != stmt)
6745 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
6746 if (is_gimple_debug (use_stmt))
6747 continue;
6748 while (is_gimple_assign (use_stmt)
6749 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6750 && single_imm_use (gimple_assign_lhs (use_stmt),
6751 &use2_p, &use_stmt2))
6752 use_stmt = use_stmt2;
6753 if (gimple_code (use_stmt) != GIMPLE_COND
6754 || gimple_bb (use_stmt) != cond_bb)
6755 return false;
6757 return true;
6760 /* Handle
6761 _4 = x_3 & 31;
6762 if (_4 != 0)
6763 goto <bb 6>;
6764 else
6765 goto <bb 7>;
6766 <bb 6>:
6767 __builtin_unreachable ();
6768 <bb 7>:
6769 x_5 = ASSERT_EXPR <x_3, ...>;
6770 If x_3 has no other immediate uses (checked by caller),
6771 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6772 from the non-zero bitmask. */
6774 static void
6775 maybe_set_nonzero_bits (basic_block bb, tree var)
6777 edge e = single_pred_edge (bb);
6778 basic_block cond_bb = e->src;
6779 gimple *stmt = last_stmt (cond_bb);
6780 tree cst;
6782 if (stmt == NULL
6783 || gimple_code (stmt) != GIMPLE_COND
6784 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6785 ? EQ_EXPR : NE_EXPR)
6786 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6787 || !integer_zerop (gimple_cond_rhs (stmt)))
6788 return;
6790 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6791 if (!is_gimple_assign (stmt)
6792 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6793 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6794 return;
6795 if (gimple_assign_rhs1 (stmt) != var)
6797 gimple *stmt2;
6799 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6800 return;
6801 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6802 if (!gimple_assign_cast_p (stmt2)
6803 || gimple_assign_rhs1 (stmt2) != var
6804 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6805 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6806 != TYPE_PRECISION (TREE_TYPE (var))))
6807 return;
6809 cst = gimple_assign_rhs2 (stmt);
6810 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
6813 /* Convert range assertion expressions into the implied copies and
6814 copy propagate away the copies. Doing the trivial copy propagation
6815 here avoids the need to run the full copy propagation pass after
6816 VRP.
6818 FIXME, this will eventually lead to copy propagation removing the
6819 names that had useful range information attached to them. For
6820 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6821 then N_i will have the range [3, +INF].
6823 However, by converting the assertion into the implied copy
6824 operation N_i = N_j, we will then copy-propagate N_j into the uses
6825 of N_i and lose the range information. We may want to hold on to
6826 ASSERT_EXPRs a little while longer as the ranges could be used in
6827 things like jump threading.
6829 The problem with keeping ASSERT_EXPRs around is that passes after
6830 VRP need to handle them appropriately.
6832 Another approach would be to make the range information a first
6833 class property of the SSA_NAME so that it can be queried from
6834 any pass. This is made somewhat more complex by the need for
6835 multiple ranges to be associated with one SSA_NAME. */
6837 static void
6838 remove_range_assertions (void)
6840 basic_block bb;
6841 gimple_stmt_iterator si;
6842 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6843 a basic block preceeded by GIMPLE_COND branching to it and
6844 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6845 int is_unreachable;
6847 /* Note that the BSI iterator bump happens at the bottom of the
6848 loop and no bump is necessary if we're removing the statement
6849 referenced by the current BSI. */
6850 FOR_EACH_BB_FN (bb, cfun)
6851 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6853 gimple *stmt = gsi_stmt (si);
6854 gimple *use_stmt;
6856 if (is_gimple_assign (stmt)
6857 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6859 tree lhs = gimple_assign_lhs (stmt);
6860 tree rhs = gimple_assign_rhs1 (stmt);
6861 tree var;
6862 tree cond = fold (ASSERT_EXPR_COND (rhs));
6863 use_operand_p use_p;
6864 imm_use_iterator iter;
6866 gcc_assert (cond != boolean_false_node);
6868 var = ASSERT_EXPR_VAR (rhs);
6869 gcc_assert (TREE_CODE (var) == SSA_NAME);
6871 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
6872 && SSA_NAME_RANGE_INFO (lhs))
6874 if (is_unreachable == -1)
6876 is_unreachable = 0;
6877 if (single_pred_p (bb)
6878 && assert_unreachable_fallthru_edge_p
6879 (single_pred_edge (bb)))
6880 is_unreachable = 1;
6882 /* Handle
6883 if (x_7 >= 10 && x_7 < 20)
6884 __builtin_unreachable ();
6885 x_8 = ASSERT_EXPR <x_7, ...>;
6886 if the only uses of x_7 are in the ASSERT_EXPR and
6887 in the condition. In that case, we can copy the
6888 range info from x_8 computed in this pass also
6889 for x_7. */
6890 if (is_unreachable
6891 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6892 single_pred (bb)))
6894 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
6895 SSA_NAME_RANGE_INFO (lhs)->get_min (),
6896 SSA_NAME_RANGE_INFO (lhs)->get_max ());
6897 maybe_set_nonzero_bits (bb, var);
6901 /* Propagate the RHS into every use of the LHS. */
6902 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
6903 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6904 SET_USE (use_p, var);
6906 /* And finally, remove the copy, it is not needed. */
6907 gsi_remove (&si, true);
6908 release_defs (stmt);
6910 else
6912 if (!is_gimple_debug (gsi_stmt (si)))
6913 is_unreachable = 0;
6914 gsi_next (&si);
6920 /* Return true if STMT is interesting for VRP. */
6922 static bool
6923 stmt_interesting_for_vrp (gimple *stmt)
6925 if (gimple_code (stmt) == GIMPLE_PHI)
6927 tree res = gimple_phi_result (stmt);
6928 return (!virtual_operand_p (res)
6929 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6930 || POINTER_TYPE_P (TREE_TYPE (res))));
6932 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6934 tree lhs = gimple_get_lhs (stmt);
6936 /* In general, assignments with virtual operands are not useful
6937 for deriving ranges, with the obvious exception of calls to
6938 builtin functions. */
6939 if (lhs && TREE_CODE (lhs) == SSA_NAME
6940 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6941 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6942 && (is_gimple_call (stmt)
6943 || !gimple_vuse (stmt)))
6944 return true;
6945 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
6946 switch (gimple_call_internal_fn (stmt))
6948 case IFN_ADD_OVERFLOW:
6949 case IFN_SUB_OVERFLOW:
6950 case IFN_MUL_OVERFLOW:
6951 /* These internal calls return _Complex integer type,
6952 but are interesting to VRP nevertheless. */
6953 if (lhs && TREE_CODE (lhs) == SSA_NAME)
6954 return true;
6955 break;
6956 default:
6957 break;
6960 else if (gimple_code (stmt) == GIMPLE_COND
6961 || gimple_code (stmt) == GIMPLE_SWITCH)
6962 return true;
6964 return false;
6968 /* Initialize local data structures for VRP. */
6970 static void
6971 vrp_initialize (void)
6973 basic_block bb;
6975 values_propagated = false;
6976 num_vr_values = num_ssa_names;
6977 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6978 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6980 FOR_EACH_BB_FN (bb, cfun)
6982 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6983 gsi_next (&si))
6985 gphi *phi = si.phi ();
6986 if (!stmt_interesting_for_vrp (phi))
6988 tree lhs = PHI_RESULT (phi);
6989 set_value_range_to_varying (get_value_range (lhs));
6990 prop_set_simulate_again (phi, false);
6992 else
6993 prop_set_simulate_again (phi, true);
6996 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
6997 gsi_next (&si))
6999 gimple *stmt = gsi_stmt (si);
7001 /* If the statement is a control insn, then we do not
7002 want to avoid simulating the statement once. Failure
7003 to do so means that those edges will never get added. */
7004 if (stmt_ends_bb_p (stmt))
7005 prop_set_simulate_again (stmt, true);
7006 else if (!stmt_interesting_for_vrp (stmt))
7008 ssa_op_iter i;
7009 tree def;
7010 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
7011 set_value_range_to_varying (get_value_range (def));
7012 prop_set_simulate_again (stmt, false);
7014 else
7015 prop_set_simulate_again (stmt, true);
7020 /* Return the singleton value-range for NAME or NAME. */
7022 static inline tree
7023 vrp_valueize (tree name)
7025 if (TREE_CODE (name) == SSA_NAME)
7027 value_range_t *vr = get_value_range (name);
7028 if (vr->type == VR_RANGE
7029 && (vr->min == vr->max
7030 || operand_equal_p (vr->min, vr->max, 0)))
7031 return vr->min;
7033 return name;
7036 /* Return the singleton value-range for NAME if that is a constant
7037 but signal to not follow SSA edges. */
7039 static inline tree
7040 vrp_valueize_1 (tree name)
7042 if (TREE_CODE (name) == SSA_NAME)
7044 /* If the definition may be simulated again we cannot follow
7045 this SSA edge as the SSA propagator does not necessarily
7046 re-visit the use. */
7047 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7048 if (!gimple_nop_p (def_stmt)
7049 && prop_simulate_again_p (def_stmt))
7050 return NULL_TREE;
7051 value_range_t *vr = get_value_range (name);
7052 if (range_int_cst_singleton_p (vr))
7053 return vr->min;
7055 return name;
7058 /* Visit assignment STMT. If it produces an interesting range, record
7059 the SSA name in *OUTPUT_P. */
7061 static enum ssa_prop_result
7062 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p)
7064 tree def, lhs;
7065 ssa_op_iter iter;
7066 enum gimple_code code = gimple_code (stmt);
7067 lhs = gimple_get_lhs (stmt);
7069 /* We only keep track of ranges in integral and pointer types. */
7070 if (TREE_CODE (lhs) == SSA_NAME
7071 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7072 /* It is valid to have NULL MIN/MAX values on a type. See
7073 build_range_type. */
7074 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7075 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7076 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7078 value_range_t new_vr = VR_INITIALIZER;
7080 /* Try folding the statement to a constant first. */
7081 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7082 vrp_valueize_1);
7083 if (tem && is_gimple_min_invariant (tem))
7084 set_value_range_to_value (&new_vr, tem, NULL);
7085 /* Then dispatch to value-range extracting functions. */
7086 else if (code == GIMPLE_CALL)
7087 extract_range_basic (&new_vr, stmt);
7088 else
7089 extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt));
7091 if (update_value_range (lhs, &new_vr))
7093 *output_p = lhs;
7095 if (dump_file && (dump_flags & TDF_DETAILS))
7097 fprintf (dump_file, "Found new range for ");
7098 print_generic_expr (dump_file, lhs, 0);
7099 fprintf (dump_file, ": ");
7100 dump_value_range (dump_file, &new_vr);
7101 fprintf (dump_file, "\n");
7104 if (new_vr.type == VR_VARYING)
7105 return SSA_PROP_VARYING;
7107 return SSA_PROP_INTERESTING;
7110 return SSA_PROP_NOT_INTERESTING;
7112 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7113 switch (gimple_call_internal_fn (stmt))
7115 case IFN_ADD_OVERFLOW:
7116 case IFN_SUB_OVERFLOW:
7117 case IFN_MUL_OVERFLOW:
7118 /* These internal calls return _Complex integer type,
7119 which VRP does not track, but the immediate uses
7120 thereof might be interesting. */
7121 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7123 imm_use_iterator iter;
7124 use_operand_p use_p;
7125 enum ssa_prop_result res = SSA_PROP_VARYING;
7127 set_value_range_to_varying (get_value_range (lhs));
7129 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
7131 gimple *use_stmt = USE_STMT (use_p);
7132 if (!is_gimple_assign (use_stmt))
7133 continue;
7134 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
7135 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
7136 continue;
7137 tree rhs1 = gimple_assign_rhs1 (use_stmt);
7138 tree use_lhs = gimple_assign_lhs (use_stmt);
7139 if (TREE_CODE (rhs1) != rhs_code
7140 || TREE_OPERAND (rhs1, 0) != lhs
7141 || TREE_CODE (use_lhs) != SSA_NAME
7142 || !stmt_interesting_for_vrp (use_stmt)
7143 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
7144 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
7145 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
7146 continue;
7148 /* If there is a change in the value range for any of the
7149 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
7150 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
7151 or IMAGPART_EXPR immediate uses, but none of them have
7152 a change in their value ranges, return
7153 SSA_PROP_NOT_INTERESTING. If there are no
7154 {REAL,IMAG}PART_EXPR uses at all,
7155 return SSA_PROP_VARYING. */
7156 value_range_t new_vr = VR_INITIALIZER;
7157 extract_range_basic (&new_vr, use_stmt);
7158 value_range_t *old_vr = get_value_range (use_lhs);
7159 if (old_vr->type != new_vr.type
7160 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
7161 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
7162 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
7163 res = SSA_PROP_INTERESTING;
7164 else
7165 res = SSA_PROP_NOT_INTERESTING;
7166 BITMAP_FREE (new_vr.equiv);
7167 if (res == SSA_PROP_INTERESTING)
7169 *output_p = lhs;
7170 return res;
7174 return res;
7176 break;
7177 default:
7178 break;
7181 /* Every other statement produces no useful ranges. */
7182 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7183 set_value_range_to_varying (get_value_range (def));
7185 return SSA_PROP_VARYING;
7188 /* Helper that gets the value range of the SSA_NAME with version I
7189 or a symbolic range containing the SSA_NAME only if the value range
7190 is varying or undefined. */
7192 static inline value_range_t
7193 get_vr_for_comparison (int i)
7195 value_range_t vr = *get_value_range (ssa_name (i));
7197 /* If name N_i does not have a valid range, use N_i as its own
7198 range. This allows us to compare against names that may
7199 have N_i in their ranges. */
7200 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7202 vr.type = VR_RANGE;
7203 vr.min = ssa_name (i);
7204 vr.max = ssa_name (i);
7207 return vr;
7210 /* Compare all the value ranges for names equivalent to VAR with VAL
7211 using comparison code COMP. Return the same value returned by
7212 compare_range_with_value, including the setting of
7213 *STRICT_OVERFLOW_P. */
7215 static tree
7216 compare_name_with_value (enum tree_code comp, tree var, tree val,
7217 bool *strict_overflow_p)
7219 bitmap_iterator bi;
7220 unsigned i;
7221 bitmap e;
7222 tree retval, t;
7223 int used_strict_overflow;
7224 bool sop;
7225 value_range_t equiv_vr;
7227 /* Get the set of equivalences for VAR. */
7228 e = get_value_range (var)->equiv;
7230 /* Start at -1. Set it to 0 if we do a comparison without relying
7231 on overflow, or 1 if all comparisons rely on overflow. */
7232 used_strict_overflow = -1;
7234 /* Compare vars' value range with val. */
7235 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7236 sop = false;
7237 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7238 if (retval)
7239 used_strict_overflow = sop ? 1 : 0;
7241 /* If the equiv set is empty we have done all work we need to do. */
7242 if (e == NULL)
7244 if (retval
7245 && used_strict_overflow > 0)
7246 *strict_overflow_p = true;
7247 return retval;
7250 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7252 equiv_vr = get_vr_for_comparison (i);
7253 sop = false;
7254 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7255 if (t)
7257 /* If we get different answers from different members
7258 of the equivalence set this check must be in a dead
7259 code region. Folding it to a trap representation
7260 would be correct here. For now just return don't-know. */
7261 if (retval != NULL
7262 && t != retval)
7264 retval = NULL_TREE;
7265 break;
7267 retval = t;
7269 if (!sop)
7270 used_strict_overflow = 0;
7271 else if (used_strict_overflow < 0)
7272 used_strict_overflow = 1;
7276 if (retval
7277 && used_strict_overflow > 0)
7278 *strict_overflow_p = true;
7280 return retval;
7284 /* Given a comparison code COMP and names N1 and N2, compare all the
7285 ranges equivalent to N1 against all the ranges equivalent to N2
7286 to determine the value of N1 COMP N2. Return the same value
7287 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7288 whether we relied on an overflow infinity in the comparison. */
7291 static tree
7292 compare_names (enum tree_code comp, tree n1, tree n2,
7293 bool *strict_overflow_p)
7295 tree t, retval;
7296 bitmap e1, e2;
7297 bitmap_iterator bi1, bi2;
7298 unsigned i1, i2;
7299 int used_strict_overflow;
7300 static bitmap_obstack *s_obstack = NULL;
7301 static bitmap s_e1 = NULL, s_e2 = NULL;
7303 /* Compare the ranges of every name equivalent to N1 against the
7304 ranges of every name equivalent to N2. */
7305 e1 = get_value_range (n1)->equiv;
7306 e2 = get_value_range (n2)->equiv;
7308 /* Use the fake bitmaps if e1 or e2 are not available. */
7309 if (s_obstack == NULL)
7311 s_obstack = XNEW (bitmap_obstack);
7312 bitmap_obstack_initialize (s_obstack);
7313 s_e1 = BITMAP_ALLOC (s_obstack);
7314 s_e2 = BITMAP_ALLOC (s_obstack);
7316 if (e1 == NULL)
7317 e1 = s_e1;
7318 if (e2 == NULL)
7319 e2 = s_e2;
7321 /* Add N1 and N2 to their own set of equivalences to avoid
7322 duplicating the body of the loop just to check N1 and N2
7323 ranges. */
7324 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7325 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7327 /* If the equivalence sets have a common intersection, then the two
7328 names can be compared without checking their ranges. */
7329 if (bitmap_intersect_p (e1, e2))
7331 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7332 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7334 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7335 ? boolean_true_node
7336 : boolean_false_node;
7339 /* Start at -1. Set it to 0 if we do a comparison without relying
7340 on overflow, or 1 if all comparisons rely on overflow. */
7341 used_strict_overflow = -1;
7343 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7344 N2 to their own set of equivalences to avoid duplicating the body
7345 of the loop just to check N1 and N2 ranges. */
7346 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7348 value_range_t vr1 = get_vr_for_comparison (i1);
7350 t = retval = NULL_TREE;
7351 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7353 bool sop = false;
7355 value_range_t vr2 = get_vr_for_comparison (i2);
7357 t = compare_ranges (comp, &vr1, &vr2, &sop);
7358 if (t)
7360 /* If we get different answers from different members
7361 of the equivalence set this check must be in a dead
7362 code region. Folding it to a trap representation
7363 would be correct here. For now just return don't-know. */
7364 if (retval != NULL
7365 && t != retval)
7367 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7368 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7369 return NULL_TREE;
7371 retval = t;
7373 if (!sop)
7374 used_strict_overflow = 0;
7375 else if (used_strict_overflow < 0)
7376 used_strict_overflow = 1;
7380 if (retval)
7382 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7383 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7384 if (used_strict_overflow > 0)
7385 *strict_overflow_p = true;
7386 return retval;
7390 /* None of the equivalent ranges are useful in computing this
7391 comparison. */
7392 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7393 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7394 return NULL_TREE;
7397 /* Helper function for vrp_evaluate_conditional_warnv & other
7398 optimizers. */
7400 static tree
7401 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7402 tree op0, tree op1,
7403 bool * strict_overflow_p)
7405 value_range_t *vr0, *vr1;
7407 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7408 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7410 tree res = NULL_TREE;
7411 if (vr0 && vr1)
7412 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7413 if (!res && vr0)
7414 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7415 if (!res && vr1)
7416 res = (compare_range_with_value
7417 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7418 return res;
7421 /* Helper function for vrp_evaluate_conditional_warnv. */
7423 static tree
7424 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7425 tree op1, bool use_equiv_p,
7426 bool *strict_overflow_p, bool *only_ranges)
7428 tree ret;
7429 if (only_ranges)
7430 *only_ranges = true;
7432 /* We only deal with integral and pointer types. */
7433 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7434 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7435 return NULL_TREE;
7437 if (use_equiv_p)
7439 if (only_ranges
7440 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7441 (code, op0, op1, strict_overflow_p)))
7442 return ret;
7443 *only_ranges = false;
7444 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
7445 return compare_names (code, op0, op1, strict_overflow_p);
7446 else if (TREE_CODE (op0) == SSA_NAME)
7447 return compare_name_with_value (code, op0, op1, strict_overflow_p);
7448 else if (TREE_CODE (op1) == SSA_NAME)
7449 return (compare_name_with_value
7450 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
7452 else
7453 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
7454 strict_overflow_p);
7455 return NULL_TREE;
7458 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7459 information. Return NULL if the conditional can not be evaluated.
7460 The ranges of all the names equivalent with the operands in COND
7461 will be used when trying to compute the value. If the result is
7462 based on undefined signed overflow, issue a warning if
7463 appropriate. */
7465 static tree
7466 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7468 bool sop;
7469 tree ret;
7470 bool only_ranges;
7472 /* Some passes and foldings leak constants with overflow flag set
7473 into the IL. Avoid doing wrong things with these and bail out. */
7474 if ((TREE_CODE (op0) == INTEGER_CST
7475 && TREE_OVERFLOW (op0))
7476 || (TREE_CODE (op1) == INTEGER_CST
7477 && TREE_OVERFLOW (op1)))
7478 return NULL_TREE;
7480 sop = false;
7481 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7482 &only_ranges);
7484 if (ret && sop)
7486 enum warn_strict_overflow_code wc;
7487 const char* warnmsg;
7489 if (is_gimple_min_invariant (ret))
7491 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7492 warnmsg = G_("assuming signed overflow does not occur when "
7493 "simplifying conditional to constant");
7495 else
7497 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7498 warnmsg = G_("assuming signed overflow does not occur when "
7499 "simplifying conditional");
7502 if (issue_strict_overflow_warning (wc))
7504 location_t location;
7506 if (!gimple_has_location (stmt))
7507 location = input_location;
7508 else
7509 location = gimple_location (stmt);
7510 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7514 if (warn_type_limits
7515 && ret && only_ranges
7516 && TREE_CODE_CLASS (code) == tcc_comparison
7517 && TREE_CODE (op0) == SSA_NAME)
7519 /* If the comparison is being folded and the operand on the LHS
7520 is being compared against a constant value that is outside of
7521 the natural range of OP0's type, then the predicate will
7522 always fold regardless of the value of OP0. If -Wtype-limits
7523 was specified, emit a warning. */
7524 tree type = TREE_TYPE (op0);
7525 value_range_t *vr0 = get_value_range (op0);
7527 if (vr0->type == VR_RANGE
7528 && INTEGRAL_TYPE_P (type)
7529 && vrp_val_is_min (vr0->min)
7530 && vrp_val_is_max (vr0->max)
7531 && is_gimple_min_invariant (op1))
7533 location_t location;
7535 if (!gimple_has_location (stmt))
7536 location = input_location;
7537 else
7538 location = gimple_location (stmt);
7540 warning_at (location, OPT_Wtype_limits,
7541 integer_zerop (ret)
7542 ? G_("comparison always false "
7543 "due to limited range of data type")
7544 : G_("comparison always true "
7545 "due to limited range of data type"));
7549 return ret;
7553 /* Visit conditional statement STMT. If we can determine which edge
7554 will be taken out of STMT's basic block, record it in
7555 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7556 SSA_PROP_VARYING. */
7558 static enum ssa_prop_result
7559 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7561 tree val;
7562 bool sop;
7564 *taken_edge_p = NULL;
7566 if (dump_file && (dump_flags & TDF_DETAILS))
7568 tree use;
7569 ssa_op_iter i;
7571 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7572 print_gimple_stmt (dump_file, stmt, 0, 0);
7573 fprintf (dump_file, "\nWith known ranges\n");
7575 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7577 fprintf (dump_file, "\t");
7578 print_generic_expr (dump_file, use, 0);
7579 fprintf (dump_file, ": ");
7580 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7583 fprintf (dump_file, "\n");
7586 /* Compute the value of the predicate COND by checking the known
7587 ranges of each of its operands.
7589 Note that we cannot evaluate all the equivalent ranges here
7590 because those ranges may not yet be final and with the current
7591 propagation strategy, we cannot determine when the value ranges
7592 of the names in the equivalence set have changed.
7594 For instance, given the following code fragment
7596 i_5 = PHI <8, i_13>
7598 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7599 if (i_14 == 1)
7602 Assume that on the first visit to i_14, i_5 has the temporary
7603 range [8, 8] because the second argument to the PHI function is
7604 not yet executable. We derive the range ~[0, 0] for i_14 and the
7605 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7606 the first time, since i_14 is equivalent to the range [8, 8], we
7607 determine that the predicate is always false.
7609 On the next round of propagation, i_13 is determined to be
7610 VARYING, which causes i_5 to drop down to VARYING. So, another
7611 visit to i_14 is scheduled. In this second visit, we compute the
7612 exact same range and equivalence set for i_14, namely ~[0, 0] and
7613 { i_5 }. But we did not have the previous range for i_5
7614 registered, so vrp_visit_assignment thinks that the range for
7615 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7616 is not visited again, which stops propagation from visiting
7617 statements in the THEN clause of that if().
7619 To properly fix this we would need to keep the previous range
7620 value for the names in the equivalence set. This way we would've
7621 discovered that from one visit to the other i_5 changed from
7622 range [8, 8] to VR_VARYING.
7624 However, fixing this apparent limitation may not be worth the
7625 additional checking. Testing on several code bases (GCC, DLV,
7626 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7627 4 more predicates folded in SPEC. */
7628 sop = false;
7630 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7631 gimple_cond_lhs (stmt),
7632 gimple_cond_rhs (stmt),
7633 false, &sop, NULL);
7634 if (val)
7636 if (!sop)
7637 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7638 else
7640 if (dump_file && (dump_flags & TDF_DETAILS))
7641 fprintf (dump_file,
7642 "\nIgnoring predicate evaluation because "
7643 "it assumes that signed overflow is undefined");
7644 val = NULL_TREE;
7648 if (dump_file && (dump_flags & TDF_DETAILS))
7650 fprintf (dump_file, "\nPredicate evaluates to: ");
7651 if (val == NULL_TREE)
7652 fprintf (dump_file, "DON'T KNOW\n");
7653 else
7654 print_generic_stmt (dump_file, val, 0);
7657 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7660 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7661 that includes the value VAL. The search is restricted to the range
7662 [START_IDX, n - 1] where n is the size of VEC.
7664 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7665 returned.
7667 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7668 it is placed in IDX and false is returned.
7670 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7671 returned. */
7673 static bool
7674 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7676 size_t n = gimple_switch_num_labels (stmt);
7677 size_t low, high;
7679 /* Find case label for minimum of the value range or the next one.
7680 At each iteration we are searching in [low, high - 1]. */
7682 for (low = start_idx, high = n; high != low; )
7684 tree t;
7685 int cmp;
7686 /* Note that i != high, so we never ask for n. */
7687 size_t i = (high + low) / 2;
7688 t = gimple_switch_label (stmt, i);
7690 /* Cache the result of comparing CASE_LOW and val. */
7691 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7693 if (cmp == 0)
7695 /* Ranges cannot be empty. */
7696 *idx = i;
7697 return true;
7699 else if (cmp > 0)
7700 high = i;
7701 else
7703 low = i + 1;
7704 if (CASE_HIGH (t) != NULL
7705 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7707 *idx = i;
7708 return true;
7713 *idx = high;
7714 return false;
7717 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7718 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7719 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7720 then MAX_IDX < MIN_IDX.
7721 Returns true if the default label is not needed. */
7723 static bool
7724 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
7725 size_t *max_idx)
7727 size_t i, j;
7728 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7729 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7731 if (i == j
7732 && min_take_default
7733 && max_take_default)
7735 /* Only the default case label reached.
7736 Return an empty range. */
7737 *min_idx = 1;
7738 *max_idx = 0;
7739 return false;
7741 else
7743 bool take_default = min_take_default || max_take_default;
7744 tree low, high;
7745 size_t k;
7747 if (max_take_default)
7748 j--;
7750 /* If the case label range is continuous, we do not need
7751 the default case label. Verify that. */
7752 high = CASE_LOW (gimple_switch_label (stmt, i));
7753 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7754 high = CASE_HIGH (gimple_switch_label (stmt, i));
7755 for (k = i + 1; k <= j; ++k)
7757 low = CASE_LOW (gimple_switch_label (stmt, k));
7758 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7760 take_default = true;
7761 break;
7763 high = low;
7764 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7765 high = CASE_HIGH (gimple_switch_label (stmt, k));
7768 *min_idx = i;
7769 *max_idx = j;
7770 return !take_default;
7774 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7775 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7776 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7777 Returns true if the default label is not needed. */
7779 static bool
7780 find_case_label_ranges (gswitch *stmt, value_range_t *vr, size_t *min_idx1,
7781 size_t *max_idx1, size_t *min_idx2,
7782 size_t *max_idx2)
7784 size_t i, j, k, l;
7785 unsigned int n = gimple_switch_num_labels (stmt);
7786 bool take_default;
7787 tree case_low, case_high;
7788 tree min = vr->min, max = vr->max;
7790 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7792 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7794 /* Set second range to emtpy. */
7795 *min_idx2 = 1;
7796 *max_idx2 = 0;
7798 if (vr->type == VR_RANGE)
7800 *min_idx1 = i;
7801 *max_idx1 = j;
7802 return !take_default;
7805 /* Set first range to all case labels. */
7806 *min_idx1 = 1;
7807 *max_idx1 = n - 1;
7809 if (i > j)
7810 return false;
7812 /* Make sure all the values of case labels [i , j] are contained in
7813 range [MIN, MAX]. */
7814 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7815 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7816 if (tree_int_cst_compare (case_low, min) < 0)
7817 i += 1;
7818 if (case_high != NULL_TREE
7819 && tree_int_cst_compare (max, case_high) < 0)
7820 j -= 1;
7822 if (i > j)
7823 return false;
7825 /* If the range spans case labels [i, j], the corresponding anti-range spans
7826 the labels [1, i - 1] and [j + 1, n - 1]. */
7827 k = j + 1;
7828 l = n - 1;
7829 if (k > l)
7831 k = 1;
7832 l = 0;
7835 j = i - 1;
7836 i = 1;
7837 if (i > j)
7839 i = k;
7840 j = l;
7841 k = 1;
7842 l = 0;
7845 *min_idx1 = i;
7846 *max_idx1 = j;
7847 *min_idx2 = k;
7848 *max_idx2 = l;
7849 return false;
7852 /* Visit switch statement STMT. If we can determine which edge
7853 will be taken out of STMT's basic block, record it in
7854 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7855 SSA_PROP_VARYING. */
7857 static enum ssa_prop_result
7858 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
7860 tree op, val;
7861 value_range_t *vr;
7862 size_t i = 0, j = 0, k, l;
7863 bool take_default;
7865 *taken_edge_p = NULL;
7866 op = gimple_switch_index (stmt);
7867 if (TREE_CODE (op) != SSA_NAME)
7868 return SSA_PROP_VARYING;
7870 vr = get_value_range (op);
7871 if (dump_file && (dump_flags & TDF_DETAILS))
7873 fprintf (dump_file, "\nVisiting switch expression with operand ");
7874 print_generic_expr (dump_file, op, 0);
7875 fprintf (dump_file, " with known range ");
7876 dump_value_range (dump_file, vr);
7877 fprintf (dump_file, "\n");
7880 if ((vr->type != VR_RANGE
7881 && vr->type != VR_ANTI_RANGE)
7882 || symbolic_range_p (vr))
7883 return SSA_PROP_VARYING;
7885 /* Find the single edge that is taken from the switch expression. */
7886 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7888 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7889 label */
7890 if (j < i)
7892 gcc_assert (take_default);
7893 val = gimple_switch_default_label (stmt);
7895 else
7897 /* Check if labels with index i to j and maybe the default label
7898 are all reaching the same label. */
7900 val = gimple_switch_label (stmt, i);
7901 if (take_default
7902 && CASE_LABEL (gimple_switch_default_label (stmt))
7903 != CASE_LABEL (val))
7905 if (dump_file && (dump_flags & TDF_DETAILS))
7906 fprintf (dump_file, " not a single destination for this "
7907 "range\n");
7908 return SSA_PROP_VARYING;
7910 for (++i; i <= j; ++i)
7912 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7914 if (dump_file && (dump_flags & TDF_DETAILS))
7915 fprintf (dump_file, " not a single destination for this "
7916 "range\n");
7917 return SSA_PROP_VARYING;
7920 for (; k <= l; ++k)
7922 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7924 if (dump_file && (dump_flags & TDF_DETAILS))
7925 fprintf (dump_file, " not a single destination for this "
7926 "range\n");
7927 return SSA_PROP_VARYING;
7932 *taken_edge_p = find_edge (gimple_bb (stmt),
7933 label_to_block (CASE_LABEL (val)));
7935 if (dump_file && (dump_flags & TDF_DETAILS))
7937 fprintf (dump_file, " will take edge to ");
7938 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7941 return SSA_PROP_INTERESTING;
7945 /* Evaluate statement STMT. If the statement produces a useful range,
7946 return SSA_PROP_INTERESTING and record the SSA name with the
7947 interesting range into *OUTPUT_P.
7949 If STMT is a conditional branch and we can determine its truth
7950 value, the taken edge is recorded in *TAKEN_EDGE_P.
7952 If STMT produces a varying value, return SSA_PROP_VARYING. */
7954 static enum ssa_prop_result
7955 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
7957 tree def;
7958 ssa_op_iter iter;
7960 if (dump_file && (dump_flags & TDF_DETAILS))
7962 fprintf (dump_file, "\nVisiting statement:\n");
7963 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7966 if (!stmt_interesting_for_vrp (stmt))
7967 gcc_assert (stmt_ends_bb_p (stmt));
7968 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7969 return vrp_visit_assignment_or_call (stmt, output_p);
7970 else if (gimple_code (stmt) == GIMPLE_COND)
7971 return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
7972 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7973 return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
7975 /* All other statements produce nothing of interest for VRP, so mark
7976 their outputs varying and prevent further simulation. */
7977 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7978 set_value_range_to_varying (get_value_range (def));
7980 return SSA_PROP_VARYING;
7983 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7984 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7985 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7986 possible such range. The resulting range is not canonicalized. */
7988 static void
7989 union_ranges (enum value_range_type *vr0type,
7990 tree *vr0min, tree *vr0max,
7991 enum value_range_type vr1type,
7992 tree vr1min, tree vr1max)
7994 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7995 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7997 /* [] is vr0, () is vr1 in the following classification comments. */
7998 if (mineq && maxeq)
8000 /* [( )] */
8001 if (*vr0type == vr1type)
8002 /* Nothing to do for equal ranges. */
8004 else if ((*vr0type == VR_RANGE
8005 && vr1type == VR_ANTI_RANGE)
8006 || (*vr0type == VR_ANTI_RANGE
8007 && vr1type == VR_RANGE))
8009 /* For anti-range with range union the result is varying. */
8010 goto give_up;
8012 else
8013 gcc_unreachable ();
8015 else if (operand_less_p (*vr0max, vr1min) == 1
8016 || operand_less_p (vr1max, *vr0min) == 1)
8018 /* [ ] ( ) or ( ) [ ]
8019 If the ranges have an empty intersection, result of the union
8020 operation is the anti-range or if both are anti-ranges
8021 it covers all. */
8022 if (*vr0type == VR_ANTI_RANGE
8023 && vr1type == VR_ANTI_RANGE)
8024 goto give_up;
8025 else if (*vr0type == VR_ANTI_RANGE
8026 && vr1type == VR_RANGE)
8028 else if (*vr0type == VR_RANGE
8029 && vr1type == VR_ANTI_RANGE)
8031 *vr0type = vr1type;
8032 *vr0min = vr1min;
8033 *vr0max = vr1max;
8035 else if (*vr0type == VR_RANGE
8036 && vr1type == VR_RANGE)
8038 /* The result is the convex hull of both ranges. */
8039 if (operand_less_p (*vr0max, vr1min) == 1)
8041 /* If the result can be an anti-range, create one. */
8042 if (TREE_CODE (*vr0max) == INTEGER_CST
8043 && TREE_CODE (vr1min) == INTEGER_CST
8044 && vrp_val_is_min (*vr0min)
8045 && vrp_val_is_max (vr1max))
8047 tree min = int_const_binop (PLUS_EXPR,
8048 *vr0max,
8049 build_int_cst (TREE_TYPE (*vr0max), 1));
8050 tree max = int_const_binop (MINUS_EXPR,
8051 vr1min,
8052 build_int_cst (TREE_TYPE (vr1min), 1));
8053 if (!operand_less_p (max, min))
8055 *vr0type = VR_ANTI_RANGE;
8056 *vr0min = min;
8057 *vr0max = max;
8059 else
8060 *vr0max = vr1max;
8062 else
8063 *vr0max = vr1max;
8065 else
8067 /* If the result can be an anti-range, create one. */
8068 if (TREE_CODE (vr1max) == INTEGER_CST
8069 && TREE_CODE (*vr0min) == INTEGER_CST
8070 && vrp_val_is_min (vr1min)
8071 && vrp_val_is_max (*vr0max))
8073 tree min = int_const_binop (PLUS_EXPR,
8074 vr1max,
8075 build_int_cst (TREE_TYPE (vr1max), 1));
8076 tree max = int_const_binop (MINUS_EXPR,
8077 *vr0min,
8078 build_int_cst (TREE_TYPE (*vr0min), 1));
8079 if (!operand_less_p (max, min))
8081 *vr0type = VR_ANTI_RANGE;
8082 *vr0min = min;
8083 *vr0max = max;
8085 else
8086 *vr0min = vr1min;
8088 else
8089 *vr0min = vr1min;
8092 else
8093 gcc_unreachable ();
8095 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8096 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8098 /* [ ( ) ] or [( ) ] or [ ( )] */
8099 if (*vr0type == VR_RANGE
8100 && vr1type == VR_RANGE)
8102 else if (*vr0type == VR_ANTI_RANGE
8103 && vr1type == VR_ANTI_RANGE)
8105 *vr0type = vr1type;
8106 *vr0min = vr1min;
8107 *vr0max = vr1max;
8109 else if (*vr0type == VR_ANTI_RANGE
8110 && vr1type == VR_RANGE)
8112 /* Arbitrarily choose the right or left gap. */
8113 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8114 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8115 build_int_cst (TREE_TYPE (vr1min), 1));
8116 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8117 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8118 build_int_cst (TREE_TYPE (vr1max), 1));
8119 else
8120 goto give_up;
8122 else if (*vr0type == VR_RANGE
8123 && vr1type == VR_ANTI_RANGE)
8124 /* The result covers everything. */
8125 goto give_up;
8126 else
8127 gcc_unreachable ();
8129 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8130 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8132 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8133 if (*vr0type == VR_RANGE
8134 && vr1type == VR_RANGE)
8136 *vr0type = vr1type;
8137 *vr0min = vr1min;
8138 *vr0max = vr1max;
8140 else if (*vr0type == VR_ANTI_RANGE
8141 && vr1type == VR_ANTI_RANGE)
8143 else if (*vr0type == VR_RANGE
8144 && vr1type == VR_ANTI_RANGE)
8146 *vr0type = VR_ANTI_RANGE;
8147 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8149 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8150 build_int_cst (TREE_TYPE (*vr0min), 1));
8151 *vr0min = vr1min;
8153 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8155 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8156 build_int_cst (TREE_TYPE (*vr0max), 1));
8157 *vr0max = vr1max;
8159 else
8160 goto give_up;
8162 else if (*vr0type == VR_ANTI_RANGE
8163 && vr1type == VR_RANGE)
8164 /* The result covers everything. */
8165 goto give_up;
8166 else
8167 gcc_unreachable ();
8169 else if ((operand_less_p (vr1min, *vr0max) == 1
8170 || operand_equal_p (vr1min, *vr0max, 0))
8171 && operand_less_p (*vr0min, vr1min) == 1
8172 && operand_less_p (*vr0max, vr1max) == 1)
8174 /* [ ( ] ) or [ ]( ) */
8175 if (*vr0type == VR_RANGE
8176 && vr1type == VR_RANGE)
8177 *vr0max = vr1max;
8178 else if (*vr0type == VR_ANTI_RANGE
8179 && vr1type == VR_ANTI_RANGE)
8180 *vr0min = vr1min;
8181 else if (*vr0type == VR_ANTI_RANGE
8182 && vr1type == VR_RANGE)
8184 if (TREE_CODE (vr1min) == INTEGER_CST)
8185 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8186 build_int_cst (TREE_TYPE (vr1min), 1));
8187 else
8188 goto give_up;
8190 else if (*vr0type == VR_RANGE
8191 && vr1type == VR_ANTI_RANGE)
8193 if (TREE_CODE (*vr0max) == INTEGER_CST)
8195 *vr0type = vr1type;
8196 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8197 build_int_cst (TREE_TYPE (*vr0max), 1));
8198 *vr0max = vr1max;
8200 else
8201 goto give_up;
8203 else
8204 gcc_unreachable ();
8206 else if ((operand_less_p (*vr0min, vr1max) == 1
8207 || operand_equal_p (*vr0min, vr1max, 0))
8208 && operand_less_p (vr1min, *vr0min) == 1
8209 && operand_less_p (vr1max, *vr0max) == 1)
8211 /* ( [ ) ] or ( )[ ] */
8212 if (*vr0type == VR_RANGE
8213 && vr1type == VR_RANGE)
8214 *vr0min = vr1min;
8215 else if (*vr0type == VR_ANTI_RANGE
8216 && vr1type == VR_ANTI_RANGE)
8217 *vr0max = vr1max;
8218 else if (*vr0type == VR_ANTI_RANGE
8219 && vr1type == VR_RANGE)
8221 if (TREE_CODE (vr1max) == INTEGER_CST)
8222 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8223 build_int_cst (TREE_TYPE (vr1max), 1));
8224 else
8225 goto give_up;
8227 else if (*vr0type == VR_RANGE
8228 && vr1type == VR_ANTI_RANGE)
8230 if (TREE_CODE (*vr0min) == INTEGER_CST)
8232 *vr0type = vr1type;
8233 *vr0min = vr1min;
8234 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8235 build_int_cst (TREE_TYPE (*vr0min), 1));
8237 else
8238 goto give_up;
8240 else
8241 gcc_unreachable ();
8243 else
8244 goto give_up;
8246 return;
8248 give_up:
8249 *vr0type = VR_VARYING;
8250 *vr0min = NULL_TREE;
8251 *vr0max = NULL_TREE;
8254 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8255 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8256 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8257 possible such range. The resulting range is not canonicalized. */
8259 static void
8260 intersect_ranges (enum value_range_type *vr0type,
8261 tree *vr0min, tree *vr0max,
8262 enum value_range_type vr1type,
8263 tree vr1min, tree vr1max)
8265 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
8266 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
8268 /* [] is vr0, () is vr1 in the following classification comments. */
8269 if (mineq && maxeq)
8271 /* [( )] */
8272 if (*vr0type == vr1type)
8273 /* Nothing to do for equal ranges. */
8275 else if ((*vr0type == VR_RANGE
8276 && vr1type == VR_ANTI_RANGE)
8277 || (*vr0type == VR_ANTI_RANGE
8278 && vr1type == VR_RANGE))
8280 /* For anti-range with range intersection the result is empty. */
8281 *vr0type = VR_UNDEFINED;
8282 *vr0min = NULL_TREE;
8283 *vr0max = NULL_TREE;
8285 else
8286 gcc_unreachable ();
8288 else if (operand_less_p (*vr0max, vr1min) == 1
8289 || operand_less_p (vr1max, *vr0min) == 1)
8291 /* [ ] ( ) or ( ) [ ]
8292 If the ranges have an empty intersection, the result of the
8293 intersect operation is the range for intersecting an
8294 anti-range with a range or empty when intersecting two ranges. */
8295 if (*vr0type == VR_RANGE
8296 && vr1type == VR_ANTI_RANGE)
8298 else if (*vr0type == VR_ANTI_RANGE
8299 && vr1type == VR_RANGE)
8301 *vr0type = vr1type;
8302 *vr0min = vr1min;
8303 *vr0max = vr1max;
8305 else if (*vr0type == VR_RANGE
8306 && vr1type == VR_RANGE)
8308 *vr0type = VR_UNDEFINED;
8309 *vr0min = NULL_TREE;
8310 *vr0max = NULL_TREE;
8312 else if (*vr0type == VR_ANTI_RANGE
8313 && vr1type == VR_ANTI_RANGE)
8315 /* If the anti-ranges are adjacent to each other merge them. */
8316 if (TREE_CODE (*vr0max) == INTEGER_CST
8317 && TREE_CODE (vr1min) == INTEGER_CST
8318 && operand_less_p (*vr0max, vr1min) == 1
8319 && integer_onep (int_const_binop (MINUS_EXPR,
8320 vr1min, *vr0max)))
8321 *vr0max = vr1max;
8322 else if (TREE_CODE (vr1max) == INTEGER_CST
8323 && TREE_CODE (*vr0min) == INTEGER_CST
8324 && operand_less_p (vr1max, *vr0min) == 1
8325 && integer_onep (int_const_binop (MINUS_EXPR,
8326 *vr0min, vr1max)))
8327 *vr0min = vr1min;
8328 /* Else arbitrarily take VR0. */
8331 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8332 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8334 /* [ ( ) ] or [( ) ] or [ ( )] */
8335 if (*vr0type == VR_RANGE
8336 && vr1type == VR_RANGE)
8338 /* If both are ranges the result is the inner one. */
8339 *vr0type = vr1type;
8340 *vr0min = vr1min;
8341 *vr0max = vr1max;
8343 else if (*vr0type == VR_RANGE
8344 && vr1type == VR_ANTI_RANGE)
8346 /* Choose the right gap if the left one is empty. */
8347 if (mineq)
8349 if (TREE_CODE (vr1max) == INTEGER_CST)
8350 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8351 build_int_cst (TREE_TYPE (vr1max), 1));
8352 else
8353 *vr0min = vr1max;
8355 /* Choose the left gap if the right one is empty. */
8356 else if (maxeq)
8358 if (TREE_CODE (vr1min) == INTEGER_CST)
8359 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8360 build_int_cst (TREE_TYPE (vr1min), 1));
8361 else
8362 *vr0max = vr1min;
8364 /* Choose the anti-range if the range is effectively varying. */
8365 else if (vrp_val_is_min (*vr0min)
8366 && vrp_val_is_max (*vr0max))
8368 *vr0type = vr1type;
8369 *vr0min = vr1min;
8370 *vr0max = vr1max;
8372 /* Else choose the range. */
8374 else if (*vr0type == VR_ANTI_RANGE
8375 && vr1type == VR_ANTI_RANGE)
8376 /* If both are anti-ranges the result is the outer one. */
8378 else if (*vr0type == VR_ANTI_RANGE
8379 && vr1type == VR_RANGE)
8381 /* The intersection is empty. */
8382 *vr0type = VR_UNDEFINED;
8383 *vr0min = NULL_TREE;
8384 *vr0max = NULL_TREE;
8386 else
8387 gcc_unreachable ();
8389 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8390 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8392 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8393 if (*vr0type == VR_RANGE
8394 && vr1type == VR_RANGE)
8395 /* Choose the inner range. */
8397 else if (*vr0type == VR_ANTI_RANGE
8398 && vr1type == VR_RANGE)
8400 /* Choose the right gap if the left is empty. */
8401 if (mineq)
8403 *vr0type = VR_RANGE;
8404 if (TREE_CODE (*vr0max) == INTEGER_CST)
8405 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8406 build_int_cst (TREE_TYPE (*vr0max), 1));
8407 else
8408 *vr0min = *vr0max;
8409 *vr0max = vr1max;
8411 /* Choose the left gap if the right is empty. */
8412 else if (maxeq)
8414 *vr0type = VR_RANGE;
8415 if (TREE_CODE (*vr0min) == INTEGER_CST)
8416 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8417 build_int_cst (TREE_TYPE (*vr0min), 1));
8418 else
8419 *vr0max = *vr0min;
8420 *vr0min = vr1min;
8422 /* Choose the anti-range if the range is effectively varying. */
8423 else if (vrp_val_is_min (vr1min)
8424 && vrp_val_is_max (vr1max))
8426 /* Else choose the range. */
8427 else
8429 *vr0type = vr1type;
8430 *vr0min = vr1min;
8431 *vr0max = vr1max;
8434 else if (*vr0type == VR_ANTI_RANGE
8435 && vr1type == VR_ANTI_RANGE)
8437 /* If both are anti-ranges the result is the outer one. */
8438 *vr0type = vr1type;
8439 *vr0min = vr1min;
8440 *vr0max = vr1max;
8442 else if (vr1type == VR_ANTI_RANGE
8443 && *vr0type == VR_RANGE)
8445 /* The intersection is empty. */
8446 *vr0type = VR_UNDEFINED;
8447 *vr0min = NULL_TREE;
8448 *vr0max = NULL_TREE;
8450 else
8451 gcc_unreachable ();
8453 else if ((operand_less_p (vr1min, *vr0max) == 1
8454 || operand_equal_p (vr1min, *vr0max, 0))
8455 && operand_less_p (*vr0min, vr1min) == 1)
8457 /* [ ( ] ) or [ ]( ) */
8458 if (*vr0type == VR_ANTI_RANGE
8459 && vr1type == VR_ANTI_RANGE)
8460 *vr0max = vr1max;
8461 else if (*vr0type == VR_RANGE
8462 && vr1type == VR_RANGE)
8463 *vr0min = vr1min;
8464 else if (*vr0type == VR_RANGE
8465 && vr1type == VR_ANTI_RANGE)
8467 if (TREE_CODE (vr1min) == INTEGER_CST)
8468 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8469 build_int_cst (TREE_TYPE (vr1min), 1));
8470 else
8471 *vr0max = vr1min;
8473 else if (*vr0type == VR_ANTI_RANGE
8474 && vr1type == VR_RANGE)
8476 *vr0type = VR_RANGE;
8477 if (TREE_CODE (*vr0max) == INTEGER_CST)
8478 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8479 build_int_cst (TREE_TYPE (*vr0max), 1));
8480 else
8481 *vr0min = *vr0max;
8482 *vr0max = vr1max;
8484 else
8485 gcc_unreachable ();
8487 else if ((operand_less_p (*vr0min, vr1max) == 1
8488 || operand_equal_p (*vr0min, vr1max, 0))
8489 && operand_less_p (vr1min, *vr0min) == 1)
8491 /* ( [ ) ] or ( )[ ] */
8492 if (*vr0type == VR_ANTI_RANGE
8493 && vr1type == VR_ANTI_RANGE)
8494 *vr0min = vr1min;
8495 else if (*vr0type == VR_RANGE
8496 && vr1type == VR_RANGE)
8497 *vr0max = vr1max;
8498 else if (*vr0type == VR_RANGE
8499 && vr1type == VR_ANTI_RANGE)
8501 if (TREE_CODE (vr1max) == INTEGER_CST)
8502 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8503 build_int_cst (TREE_TYPE (vr1max), 1));
8504 else
8505 *vr0min = vr1max;
8507 else if (*vr0type == VR_ANTI_RANGE
8508 && vr1type == VR_RANGE)
8510 *vr0type = VR_RANGE;
8511 if (TREE_CODE (*vr0min) == INTEGER_CST)
8512 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8513 build_int_cst (TREE_TYPE (*vr0min), 1));
8514 else
8515 *vr0max = *vr0min;
8516 *vr0min = vr1min;
8518 else
8519 gcc_unreachable ();
8522 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8523 result for the intersection. That's always a conservative
8524 correct estimate. */
8526 return;
8530 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8531 in *VR0. This may not be the smallest possible such range. */
8533 static void
8534 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
8536 value_range_t saved;
8538 /* If either range is VR_VARYING the other one wins. */
8539 if (vr1->type == VR_VARYING)
8540 return;
8541 if (vr0->type == VR_VARYING)
8543 copy_value_range (vr0, vr1);
8544 return;
8547 /* When either range is VR_UNDEFINED the resulting range is
8548 VR_UNDEFINED, too. */
8549 if (vr0->type == VR_UNDEFINED)
8550 return;
8551 if (vr1->type == VR_UNDEFINED)
8553 set_value_range_to_undefined (vr0);
8554 return;
8557 /* Save the original vr0 so we can return it as conservative intersection
8558 result when our worker turns things to varying. */
8559 saved = *vr0;
8560 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8561 vr1->type, vr1->min, vr1->max);
8562 /* Make sure to canonicalize the result though as the inversion of a
8563 VR_RANGE can still be a VR_RANGE. */
8564 set_and_canonicalize_value_range (vr0, vr0->type,
8565 vr0->min, vr0->max, vr0->equiv);
8566 /* If that failed, use the saved original VR0. */
8567 if (vr0->type == VR_VARYING)
8569 *vr0 = saved;
8570 return;
8572 /* If the result is VR_UNDEFINED there is no need to mess with
8573 the equivalencies. */
8574 if (vr0->type == VR_UNDEFINED)
8575 return;
8577 /* The resulting set of equivalences for range intersection is the union of
8578 the two sets. */
8579 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8580 bitmap_ior_into (vr0->equiv, vr1->equiv);
8581 else if (vr1->equiv && !vr0->equiv)
8582 bitmap_copy (vr0->equiv, vr1->equiv);
8585 static void
8586 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8588 if (dump_file && (dump_flags & TDF_DETAILS))
8590 fprintf (dump_file, "Intersecting\n ");
8591 dump_value_range (dump_file, vr0);
8592 fprintf (dump_file, "\nand\n ");
8593 dump_value_range (dump_file, vr1);
8594 fprintf (dump_file, "\n");
8596 vrp_intersect_ranges_1 (vr0, vr1);
8597 if (dump_file && (dump_flags & TDF_DETAILS))
8599 fprintf (dump_file, "to\n ");
8600 dump_value_range (dump_file, vr0);
8601 fprintf (dump_file, "\n");
8605 /* Meet operation for value ranges. Given two value ranges VR0 and
8606 VR1, store in VR0 a range that contains both VR0 and VR1. This
8607 may not be the smallest possible such range. */
8609 static void
8610 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8612 value_range_t saved;
8614 if (vr0->type == VR_UNDEFINED)
8616 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8617 return;
8620 if (vr1->type == VR_UNDEFINED)
8622 /* VR0 already has the resulting range. */
8623 return;
8626 if (vr0->type == VR_VARYING)
8628 /* Nothing to do. VR0 already has the resulting range. */
8629 return;
8632 if (vr1->type == VR_VARYING)
8634 set_value_range_to_varying (vr0);
8635 return;
8638 saved = *vr0;
8639 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8640 vr1->type, vr1->min, vr1->max);
8641 if (vr0->type == VR_VARYING)
8643 /* Failed to find an efficient meet. Before giving up and setting
8644 the result to VARYING, see if we can at least derive a useful
8645 anti-range. FIXME, all this nonsense about distinguishing
8646 anti-ranges from ranges is necessary because of the odd
8647 semantics of range_includes_zero_p and friends. */
8648 if (((saved.type == VR_RANGE
8649 && range_includes_zero_p (saved.min, saved.max) == 0)
8650 || (saved.type == VR_ANTI_RANGE
8651 && range_includes_zero_p (saved.min, saved.max) == 1))
8652 && ((vr1->type == VR_RANGE
8653 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8654 || (vr1->type == VR_ANTI_RANGE
8655 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8657 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8659 /* Since this meet operation did not result from the meeting of
8660 two equivalent names, VR0 cannot have any equivalences. */
8661 if (vr0->equiv)
8662 bitmap_clear (vr0->equiv);
8663 return;
8666 set_value_range_to_varying (vr0);
8667 return;
8669 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8670 vr0->equiv);
8671 if (vr0->type == VR_VARYING)
8672 return;
8674 /* The resulting set of equivalences is always the intersection of
8675 the two sets. */
8676 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8677 bitmap_and_into (vr0->equiv, vr1->equiv);
8678 else if (vr0->equiv && !vr1->equiv)
8679 bitmap_clear (vr0->equiv);
8682 static void
8683 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8685 if (dump_file && (dump_flags & TDF_DETAILS))
8687 fprintf (dump_file, "Meeting\n ");
8688 dump_value_range (dump_file, vr0);
8689 fprintf (dump_file, "\nand\n ");
8690 dump_value_range (dump_file, vr1);
8691 fprintf (dump_file, "\n");
8693 vrp_meet_1 (vr0, vr1);
8694 if (dump_file && (dump_flags & TDF_DETAILS))
8696 fprintf (dump_file, "to\n ");
8697 dump_value_range (dump_file, vr0);
8698 fprintf (dump_file, "\n");
8703 /* Visit all arguments for PHI node PHI that flow through executable
8704 edges. If a valid value range can be derived from all the incoming
8705 value ranges, set a new range for the LHS of PHI. */
8707 static enum ssa_prop_result
8708 vrp_visit_phi_node (gphi *phi)
8710 size_t i;
8711 tree lhs = PHI_RESULT (phi);
8712 value_range_t *lhs_vr = get_value_range (lhs);
8713 value_range_t vr_result = VR_INITIALIZER;
8714 bool first = true;
8715 int edges, old_edges;
8716 struct loop *l;
8718 if (dump_file && (dump_flags & TDF_DETAILS))
8720 fprintf (dump_file, "\nVisiting PHI node: ");
8721 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8724 edges = 0;
8725 for (i = 0; i < gimple_phi_num_args (phi); i++)
8727 edge e = gimple_phi_arg_edge (phi, i);
8729 if (dump_file && (dump_flags & TDF_DETAILS))
8731 fprintf (dump_file,
8732 " Argument #%d (%d -> %d %sexecutable)\n",
8733 (int) i, e->src->index, e->dest->index,
8734 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8737 if (e->flags & EDGE_EXECUTABLE)
8739 tree arg = PHI_ARG_DEF (phi, i);
8740 value_range_t vr_arg;
8742 ++edges;
8744 if (TREE_CODE (arg) == SSA_NAME)
8746 vr_arg = *(get_value_range (arg));
8747 /* Do not allow equivalences or symbolic ranges to leak in from
8748 backedges. That creates invalid equivalencies.
8749 See PR53465 and PR54767. */
8750 if (e->flags & EDGE_DFS_BACK)
8752 if (vr_arg.type == VR_RANGE
8753 || vr_arg.type == VR_ANTI_RANGE)
8755 vr_arg.equiv = NULL;
8756 if (symbolic_range_p (&vr_arg))
8758 vr_arg.type = VR_VARYING;
8759 vr_arg.min = NULL_TREE;
8760 vr_arg.max = NULL_TREE;
8764 else
8766 /* If the non-backedge arguments range is VR_VARYING then
8767 we can still try recording a simple equivalence. */
8768 if (vr_arg.type == VR_VARYING)
8770 vr_arg.type = VR_RANGE;
8771 vr_arg.min = arg;
8772 vr_arg.max = arg;
8773 vr_arg.equiv = NULL;
8777 else
8779 if (TREE_OVERFLOW_P (arg))
8780 arg = drop_tree_overflow (arg);
8782 vr_arg.type = VR_RANGE;
8783 vr_arg.min = arg;
8784 vr_arg.max = arg;
8785 vr_arg.equiv = NULL;
8788 if (dump_file && (dump_flags & TDF_DETAILS))
8790 fprintf (dump_file, "\t");
8791 print_generic_expr (dump_file, arg, dump_flags);
8792 fprintf (dump_file, ": ");
8793 dump_value_range (dump_file, &vr_arg);
8794 fprintf (dump_file, "\n");
8797 if (first)
8798 copy_value_range (&vr_result, &vr_arg);
8799 else
8800 vrp_meet (&vr_result, &vr_arg);
8801 first = false;
8803 if (vr_result.type == VR_VARYING)
8804 break;
8808 if (vr_result.type == VR_VARYING)
8809 goto varying;
8810 else if (vr_result.type == VR_UNDEFINED)
8811 goto update_range;
8813 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8814 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8816 /* To prevent infinite iterations in the algorithm, derive ranges
8817 when the new value is slightly bigger or smaller than the
8818 previous one. We don't do this if we have seen a new executable
8819 edge; this helps us avoid an overflow infinity for conditionals
8820 which are not in a loop. If the old value-range was VR_UNDEFINED
8821 use the updated range and iterate one more time. */
8822 if (edges > 0
8823 && gimple_phi_num_args (phi) > 1
8824 && edges == old_edges
8825 && lhs_vr->type != VR_UNDEFINED)
8827 /* Compare old and new ranges, fall back to varying if the
8828 values are not comparable. */
8829 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8830 if (cmp_min == -2)
8831 goto varying;
8832 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8833 if (cmp_max == -2)
8834 goto varying;
8836 /* For non VR_RANGE or for pointers fall back to varying if
8837 the range changed. */
8838 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8839 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8840 && (cmp_min != 0 || cmp_max != 0))
8841 goto varying;
8843 /* If the new minimum is larger than the previous one
8844 retain the old value. If the new minimum value is smaller
8845 than the previous one and not -INF go all the way to -INF + 1.
8846 In the first case, to avoid infinite bouncing between different
8847 minimums, and in the other case to avoid iterating millions of
8848 times to reach -INF. Going to -INF + 1 also lets the following
8849 iteration compute whether there will be any overflow, at the
8850 expense of one additional iteration. */
8851 if (cmp_min < 0)
8852 vr_result.min = lhs_vr->min;
8853 else if (cmp_min > 0
8854 && !vrp_val_is_min (vr_result.min))
8855 vr_result.min
8856 = int_const_binop (PLUS_EXPR,
8857 vrp_val_min (TREE_TYPE (vr_result.min)),
8858 build_int_cst (TREE_TYPE (vr_result.min), 1));
8860 /* Similarly for the maximum value. */
8861 if (cmp_max > 0)
8862 vr_result.max = lhs_vr->max;
8863 else if (cmp_max < 0
8864 && !vrp_val_is_max (vr_result.max))
8865 vr_result.max
8866 = int_const_binop (MINUS_EXPR,
8867 vrp_val_max (TREE_TYPE (vr_result.min)),
8868 build_int_cst (TREE_TYPE (vr_result.min), 1));
8870 /* If we dropped either bound to +-INF then if this is a loop
8871 PHI node SCEV may known more about its value-range. */
8872 if ((cmp_min > 0 || cmp_min < 0
8873 || cmp_max < 0 || cmp_max > 0)
8874 && (l = loop_containing_stmt (phi))
8875 && l->header == gimple_bb (phi))
8876 adjust_range_with_scev (&vr_result, l, phi, lhs);
8878 /* If we will end up with a (-INF, +INF) range, set it to
8879 VARYING. Same if the previous max value was invalid for
8880 the type and we end up with vr_result.min > vr_result.max. */
8881 if ((vrp_val_is_max (vr_result.max)
8882 && vrp_val_is_min (vr_result.min))
8883 || compare_values (vr_result.min,
8884 vr_result.max) > 0)
8885 goto varying;
8888 /* If the new range is different than the previous value, keep
8889 iterating. */
8890 update_range:
8891 if (update_value_range (lhs, &vr_result))
8893 if (dump_file && (dump_flags & TDF_DETAILS))
8895 fprintf (dump_file, "Found new range for ");
8896 print_generic_expr (dump_file, lhs, 0);
8897 fprintf (dump_file, ": ");
8898 dump_value_range (dump_file, &vr_result);
8899 fprintf (dump_file, "\n");
8902 if (vr_result.type == VR_VARYING)
8903 return SSA_PROP_VARYING;
8905 return SSA_PROP_INTERESTING;
8908 /* Nothing changed, don't add outgoing edges. */
8909 return SSA_PROP_NOT_INTERESTING;
8911 /* No match found. Set the LHS to VARYING. */
8912 varying:
8913 set_value_range_to_varying (lhs_vr);
8914 return SSA_PROP_VARYING;
8917 /* Simplify boolean operations if the source is known
8918 to be already a boolean. */
8919 static bool
8920 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8922 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8923 tree lhs, op0, op1;
8924 bool need_conversion;
8926 /* We handle only !=/== case here. */
8927 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8929 op0 = gimple_assign_rhs1 (stmt);
8930 if (!op_with_boolean_value_range_p (op0))
8931 return false;
8933 op1 = gimple_assign_rhs2 (stmt);
8934 if (!op_with_boolean_value_range_p (op1))
8935 return false;
8937 /* Reduce number of cases to handle to NE_EXPR. As there is no
8938 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8939 if (rhs_code == EQ_EXPR)
8941 if (TREE_CODE (op1) == INTEGER_CST)
8942 op1 = int_const_binop (BIT_XOR_EXPR, op1,
8943 build_int_cst (TREE_TYPE (op1), 1));
8944 else
8945 return false;
8948 lhs = gimple_assign_lhs (stmt);
8949 need_conversion
8950 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8952 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8953 if (need_conversion
8954 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8955 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8956 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8957 return false;
8959 /* For A != 0 we can substitute A itself. */
8960 if (integer_zerop (op1))
8961 gimple_assign_set_rhs_with_ops (gsi,
8962 need_conversion
8963 ? NOP_EXPR : TREE_CODE (op0), op0);
8964 /* For A != B we substitute A ^ B. Either with conversion. */
8965 else if (need_conversion)
8967 tree tem = make_ssa_name (TREE_TYPE (op0));
8968 gassign *newop
8969 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
8970 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8971 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
8973 /* Or without. */
8974 else
8975 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8976 update_stmt (gsi_stmt (*gsi));
8978 return true;
8981 /* Simplify a division or modulo operator to a right shift or
8982 bitwise and if the first operand is unsigned or is greater
8983 than zero and the second operand is an exact power of two.
8984 For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it
8985 into just op0 if op0's range is known to be a subset of
8986 [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned
8987 modulo. */
8989 static bool
8990 simplify_div_or_mod_using_ranges (gimple *stmt)
8992 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8993 tree val = NULL;
8994 tree op0 = gimple_assign_rhs1 (stmt);
8995 tree op1 = gimple_assign_rhs2 (stmt);
8996 value_range_t *vr = get_value_range (op0);
8998 if (rhs_code == TRUNC_MOD_EXPR
8999 && TREE_CODE (op1) == INTEGER_CST
9000 && tree_int_cst_sgn (op1) == 1
9001 && range_int_cst_p (vr)
9002 && tree_int_cst_lt (vr->max, op1))
9004 if (TYPE_UNSIGNED (TREE_TYPE (op0))
9005 || tree_int_cst_sgn (vr->min) >= 0
9006 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1),
9007 vr->min))
9009 /* If op0 already has the range op0 % op1 has,
9010 then TRUNC_MOD_EXPR won't change anything. */
9011 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9012 gimple_assign_set_rhs_from_tree (&gsi, op0);
9013 update_stmt (stmt);
9014 return true;
9018 if (!integer_pow2p (op1))
9019 return false;
9021 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9023 val = integer_one_node;
9025 else
9027 bool sop = false;
9029 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9031 if (val
9032 && sop
9033 && integer_onep (val)
9034 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9036 location_t location;
9038 if (!gimple_has_location (stmt))
9039 location = input_location;
9040 else
9041 location = gimple_location (stmt);
9042 warning_at (location, OPT_Wstrict_overflow,
9043 "assuming signed overflow does not occur when "
9044 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9048 if (val && integer_onep (val))
9050 tree t;
9052 if (rhs_code == TRUNC_DIV_EXPR)
9054 t = build_int_cst (integer_type_node, tree_log2 (op1));
9055 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9056 gimple_assign_set_rhs1 (stmt, op0);
9057 gimple_assign_set_rhs2 (stmt, t);
9059 else
9061 t = build_int_cst (TREE_TYPE (op1), 1);
9062 t = int_const_binop (MINUS_EXPR, op1, t);
9063 t = fold_convert (TREE_TYPE (op0), t);
9065 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9066 gimple_assign_set_rhs1 (stmt, op0);
9067 gimple_assign_set_rhs2 (stmt, t);
9070 update_stmt (stmt);
9071 return true;
9074 return false;
9077 /* Simplify a min or max if the ranges of the two operands are
9078 disjoint. Return true if we do simplify. */
9080 static bool
9081 simplify_min_or_max_using_ranges (gimple *stmt)
9083 tree op0 = gimple_assign_rhs1 (stmt);
9084 tree op1 = gimple_assign_rhs2 (stmt);
9085 bool sop = false;
9086 tree val;
9088 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9089 (LE_EXPR, op0, op1, &sop));
9090 if (!val)
9092 sop = false;
9093 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9094 (LT_EXPR, op0, op1, &sop));
9097 if (val)
9099 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9101 location_t location;
9103 if (!gimple_has_location (stmt))
9104 location = input_location;
9105 else
9106 location = gimple_location (stmt);
9107 warning_at (location, OPT_Wstrict_overflow,
9108 "assuming signed overflow does not occur when "
9109 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9112 /* VAL == TRUE -> OP0 < or <= op1
9113 VAL == FALSE -> OP0 > or >= op1. */
9114 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9115 == integer_zerop (val)) ? op0 : op1;
9116 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9117 gimple_assign_set_rhs_from_tree (&gsi, res);
9118 update_stmt (stmt);
9119 return true;
9122 return false;
9125 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9126 ABS_EXPR. If the operand is <= 0, then simplify the
9127 ABS_EXPR into a NEGATE_EXPR. */
9129 static bool
9130 simplify_abs_using_ranges (gimple *stmt)
9132 tree op = gimple_assign_rhs1 (stmt);
9133 value_range_t *vr = get_value_range (op);
9135 if (vr)
9137 tree val = NULL;
9138 bool sop = false;
9140 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9141 if (!val)
9143 /* The range is neither <= 0 nor > 0. Now see if it is
9144 either < 0 or >= 0. */
9145 sop = false;
9146 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9147 &sop);
9150 if (val)
9152 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9154 location_t location;
9156 if (!gimple_has_location (stmt))
9157 location = input_location;
9158 else
9159 location = gimple_location (stmt);
9160 warning_at (location, OPT_Wstrict_overflow,
9161 "assuming signed overflow does not occur when "
9162 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9165 gimple_assign_set_rhs1 (stmt, op);
9166 if (integer_zerop (val))
9167 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9168 else
9169 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9170 update_stmt (stmt);
9171 return true;
9175 return false;
9178 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9179 If all the bits that are being cleared by & are already
9180 known to be zero from VR, or all the bits that are being
9181 set by | are already known to be one from VR, the bit
9182 operation is redundant. */
9184 static bool
9185 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9187 tree op0 = gimple_assign_rhs1 (stmt);
9188 tree op1 = gimple_assign_rhs2 (stmt);
9189 tree op = NULL_TREE;
9190 value_range_t vr0 = VR_INITIALIZER;
9191 value_range_t vr1 = VR_INITIALIZER;
9192 wide_int may_be_nonzero0, may_be_nonzero1;
9193 wide_int must_be_nonzero0, must_be_nonzero1;
9194 wide_int mask;
9196 if (TREE_CODE (op0) == SSA_NAME)
9197 vr0 = *(get_value_range (op0));
9198 else if (is_gimple_min_invariant (op0))
9199 set_value_range_to_value (&vr0, op0, NULL);
9200 else
9201 return false;
9203 if (TREE_CODE (op1) == SSA_NAME)
9204 vr1 = *(get_value_range (op1));
9205 else if (is_gimple_min_invariant (op1))
9206 set_value_range_to_value (&vr1, op1, NULL);
9207 else
9208 return false;
9210 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9211 &must_be_nonzero0))
9212 return false;
9213 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9214 &must_be_nonzero1))
9215 return false;
9217 switch (gimple_assign_rhs_code (stmt))
9219 case BIT_AND_EXPR:
9220 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9221 if (mask == 0)
9223 op = op0;
9224 break;
9226 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9227 if (mask == 0)
9229 op = op1;
9230 break;
9232 break;
9233 case BIT_IOR_EXPR:
9234 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9235 if (mask == 0)
9237 op = op1;
9238 break;
9240 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9241 if (mask == 0)
9243 op = op0;
9244 break;
9246 break;
9247 default:
9248 gcc_unreachable ();
9251 if (op == NULL_TREE)
9252 return false;
9254 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9255 update_stmt (gsi_stmt (*gsi));
9256 return true;
9259 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9260 a known value range VR.
9262 If there is one and only one value which will satisfy the
9263 conditional, then return that value. Else return NULL.
9265 If signed overflow must be undefined for the value to satisfy
9266 the conditional, then set *STRICT_OVERFLOW_P to true. */
9268 static tree
9269 test_for_singularity (enum tree_code cond_code, tree op0,
9270 tree op1, value_range_t *vr,
9271 bool *strict_overflow_p)
9273 tree min = NULL;
9274 tree max = NULL;
9276 /* Extract minimum/maximum values which satisfy the
9277 the conditional as it was written. */
9278 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9280 /* This should not be negative infinity; there is no overflow
9281 here. */
9282 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9284 max = op1;
9285 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9287 tree one = build_int_cst (TREE_TYPE (op0), 1);
9288 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9289 if (EXPR_P (max))
9290 TREE_NO_WARNING (max) = 1;
9293 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9295 /* This should not be positive infinity; there is no overflow
9296 here. */
9297 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9299 min = op1;
9300 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9302 tree one = build_int_cst (TREE_TYPE (op0), 1);
9303 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9304 if (EXPR_P (min))
9305 TREE_NO_WARNING (min) = 1;
9309 /* Now refine the minimum and maximum values using any
9310 value range information we have for op0. */
9311 if (min && max)
9313 if (compare_values (vr->min, min) == 1)
9314 min = vr->min;
9315 if (compare_values (vr->max, max) == -1)
9316 max = vr->max;
9318 /* If the new min/max values have converged to a single value,
9319 then there is only one value which can satisfy the condition,
9320 return that value. */
9321 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9323 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9324 && is_overflow_infinity (vr->max))
9325 *strict_overflow_p = true;
9326 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9327 && is_overflow_infinity (vr->min))
9328 *strict_overflow_p = true;
9330 return min;
9333 return NULL;
9336 /* Return whether the value range *VR fits in an integer type specified
9337 by PRECISION and UNSIGNED_P. */
9339 static bool
9340 range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn)
9342 tree src_type;
9343 unsigned src_precision;
9344 widest_int tem;
9345 signop src_sgn;
9347 /* We can only handle integral and pointer types. */
9348 src_type = TREE_TYPE (vr->min);
9349 if (!INTEGRAL_TYPE_P (src_type)
9350 && !POINTER_TYPE_P (src_type))
9351 return false;
9353 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9354 and so is an identity transform. */
9355 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9356 src_sgn = TYPE_SIGN (src_type);
9357 if ((src_precision < dest_precision
9358 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9359 || (src_precision == dest_precision && src_sgn == dest_sgn))
9360 return true;
9362 /* Now we can only handle ranges with constant bounds. */
9363 if (vr->type != VR_RANGE
9364 || TREE_CODE (vr->min) != INTEGER_CST
9365 || TREE_CODE (vr->max) != INTEGER_CST)
9366 return false;
9368 /* For sign changes, the MSB of the wide_int has to be clear.
9369 An unsigned value with its MSB set cannot be represented by
9370 a signed wide_int, while a negative value cannot be represented
9371 by an unsigned wide_int. */
9372 if (src_sgn != dest_sgn
9373 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9374 return false;
9376 /* Then we can perform the conversion on both ends and compare
9377 the result for equality. */
9378 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9379 if (tem != wi::to_widest (vr->min))
9380 return false;
9381 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9382 if (tem != wi::to_widest (vr->max))
9383 return false;
9385 return true;
9388 /* Simplify a conditional using a relational operator to an equality
9389 test if the range information indicates only one value can satisfy
9390 the original conditional. */
9392 static bool
9393 simplify_cond_using_ranges (gcond *stmt)
9395 tree op0 = gimple_cond_lhs (stmt);
9396 tree op1 = gimple_cond_rhs (stmt);
9397 enum tree_code cond_code = gimple_cond_code (stmt);
9399 if (cond_code != NE_EXPR
9400 && cond_code != EQ_EXPR
9401 && TREE_CODE (op0) == SSA_NAME
9402 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9403 && is_gimple_min_invariant (op1))
9405 value_range_t *vr = get_value_range (op0);
9407 /* If we have range information for OP0, then we might be
9408 able to simplify this conditional. */
9409 if (vr->type == VR_RANGE)
9411 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9412 bool sop = false;
9413 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9415 if (new_tree
9416 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9418 if (dump_file)
9420 fprintf (dump_file, "Simplified relational ");
9421 print_gimple_stmt (dump_file, stmt, 0, 0);
9422 fprintf (dump_file, " into ");
9425 gimple_cond_set_code (stmt, EQ_EXPR);
9426 gimple_cond_set_lhs (stmt, op0);
9427 gimple_cond_set_rhs (stmt, new_tree);
9429 update_stmt (stmt);
9431 if (dump_file)
9433 print_gimple_stmt (dump_file, stmt, 0, 0);
9434 fprintf (dump_file, "\n");
9437 if (sop && issue_strict_overflow_warning (wc))
9439 location_t location = input_location;
9440 if (gimple_has_location (stmt))
9441 location = gimple_location (stmt);
9443 warning_at (location, OPT_Wstrict_overflow,
9444 "assuming signed overflow does not occur when "
9445 "simplifying conditional");
9448 return true;
9451 /* Try again after inverting the condition. We only deal
9452 with integral types here, so no need to worry about
9453 issues with inverting FP comparisons. */
9454 sop = false;
9455 new_tree = test_for_singularity
9456 (invert_tree_comparison (cond_code, false),
9457 op0, op1, vr, &sop);
9459 if (new_tree
9460 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9462 if (dump_file)
9464 fprintf (dump_file, "Simplified relational ");
9465 print_gimple_stmt (dump_file, stmt, 0, 0);
9466 fprintf (dump_file, " into ");
9469 gimple_cond_set_code (stmt, NE_EXPR);
9470 gimple_cond_set_lhs (stmt, op0);
9471 gimple_cond_set_rhs (stmt, new_tree);
9473 update_stmt (stmt);
9475 if (dump_file)
9477 print_gimple_stmt (dump_file, stmt, 0, 0);
9478 fprintf (dump_file, "\n");
9481 if (sop && issue_strict_overflow_warning (wc))
9483 location_t location = input_location;
9484 if (gimple_has_location (stmt))
9485 location = gimple_location (stmt);
9487 warning_at (location, OPT_Wstrict_overflow,
9488 "assuming signed overflow does not occur when "
9489 "simplifying conditional");
9492 return true;
9497 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9498 see if OP0 was set by a type conversion where the source of
9499 the conversion is another SSA_NAME with a range that fits
9500 into the range of OP0's type.
9502 If so, the conversion is redundant as the earlier SSA_NAME can be
9503 used for the comparison directly if we just massage the constant in the
9504 comparison. */
9505 if (TREE_CODE (op0) == SSA_NAME
9506 && TREE_CODE (op1) == INTEGER_CST)
9508 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9509 tree innerop;
9511 if (!is_gimple_assign (def_stmt)
9512 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9513 return false;
9515 innerop = gimple_assign_rhs1 (def_stmt);
9517 if (TREE_CODE (innerop) == SSA_NAME
9518 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
9520 value_range_t *vr = get_value_range (innerop);
9522 if (range_int_cst_p (vr)
9523 && range_fits_type_p (vr,
9524 TYPE_PRECISION (TREE_TYPE (op0)),
9525 TYPE_SIGN (TREE_TYPE (op0)))
9526 && int_fits_type_p (op1, TREE_TYPE (innerop))
9527 /* The range must not have overflowed, or if it did overflow
9528 we must not be wrapping/trapping overflow and optimizing
9529 with strict overflow semantics. */
9530 && ((!is_negative_overflow_infinity (vr->min)
9531 && !is_positive_overflow_infinity (vr->max))
9532 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
9534 /* If the range overflowed and the user has asked for warnings
9535 when strict overflow semantics were used to optimize code,
9536 issue an appropriate warning. */
9537 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
9538 && (is_negative_overflow_infinity (vr->min)
9539 || is_positive_overflow_infinity (vr->max))
9540 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
9542 location_t location;
9544 if (!gimple_has_location (stmt))
9545 location = input_location;
9546 else
9547 location = gimple_location (stmt);
9548 warning_at (location, OPT_Wstrict_overflow,
9549 "assuming signed overflow does not occur when "
9550 "simplifying conditional");
9553 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9554 gimple_cond_set_lhs (stmt, innerop);
9555 gimple_cond_set_rhs (stmt, newconst);
9556 return true;
9561 return false;
9564 /* Simplify a switch statement using the value range of the switch
9565 argument. */
9567 static bool
9568 simplify_switch_using_ranges (gswitch *stmt)
9570 tree op = gimple_switch_index (stmt);
9571 value_range_t *vr;
9572 bool take_default;
9573 edge e;
9574 edge_iterator ei;
9575 size_t i = 0, j = 0, n, n2;
9576 tree vec2;
9577 switch_update su;
9578 size_t k = 1, l = 0;
9580 if (TREE_CODE (op) == SSA_NAME)
9582 vr = get_value_range (op);
9584 /* We can only handle integer ranges. */
9585 if ((vr->type != VR_RANGE
9586 && vr->type != VR_ANTI_RANGE)
9587 || symbolic_range_p (vr))
9588 return false;
9590 /* Find case label for min/max of the value range. */
9591 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9593 else if (TREE_CODE (op) == INTEGER_CST)
9595 take_default = !find_case_label_index (stmt, 1, op, &i);
9596 if (take_default)
9598 i = 1;
9599 j = 0;
9601 else
9603 j = i;
9606 else
9607 return false;
9609 n = gimple_switch_num_labels (stmt);
9611 /* Bail out if this is just all edges taken. */
9612 if (i == 1
9613 && j == n - 1
9614 && take_default)
9615 return false;
9617 /* Build a new vector of taken case labels. */
9618 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9619 n2 = 0;
9621 /* Add the default edge, if necessary. */
9622 if (take_default)
9623 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9625 for (; i <= j; ++i, ++n2)
9626 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9628 for (; k <= l; ++k, ++n2)
9629 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9631 /* Mark needed edges. */
9632 for (i = 0; i < n2; ++i)
9634 e = find_edge (gimple_bb (stmt),
9635 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9636 e->aux = (void *)-1;
9639 /* Queue not needed edges for later removal. */
9640 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9642 if (e->aux == (void *)-1)
9644 e->aux = NULL;
9645 continue;
9648 if (dump_file && (dump_flags & TDF_DETAILS))
9650 fprintf (dump_file, "removing unreachable case label\n");
9652 to_remove_edges.safe_push (e);
9653 e->flags &= ~EDGE_EXECUTABLE;
9656 /* And queue an update for the stmt. */
9657 su.stmt = stmt;
9658 su.vec = vec2;
9659 to_update_switch_stmts.safe_push (su);
9660 return false;
9663 /* Simplify an integral conversion from an SSA name in STMT. */
9665 static bool
9666 simplify_conversion_using_ranges (gimple *stmt)
9668 tree innerop, middleop, finaltype;
9669 gimple *def_stmt;
9670 value_range_t *innervr;
9671 signop inner_sgn, middle_sgn, final_sgn;
9672 unsigned inner_prec, middle_prec, final_prec;
9673 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9675 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9676 if (!INTEGRAL_TYPE_P (finaltype))
9677 return false;
9678 middleop = gimple_assign_rhs1 (stmt);
9679 def_stmt = SSA_NAME_DEF_STMT (middleop);
9680 if (!is_gimple_assign (def_stmt)
9681 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9682 return false;
9683 innerop = gimple_assign_rhs1 (def_stmt);
9684 if (TREE_CODE (innerop) != SSA_NAME
9685 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9686 return false;
9688 /* Get the value-range of the inner operand. */
9689 innervr = get_value_range (innerop);
9690 if (innervr->type != VR_RANGE
9691 || TREE_CODE (innervr->min) != INTEGER_CST
9692 || TREE_CODE (innervr->max) != INTEGER_CST)
9693 return false;
9695 /* Simulate the conversion chain to check if the result is equal if
9696 the middle conversion is removed. */
9697 innermin = wi::to_widest (innervr->min);
9698 innermax = wi::to_widest (innervr->max);
9700 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9701 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9702 final_prec = TYPE_PRECISION (finaltype);
9704 /* If the first conversion is not injective, the second must not
9705 be widening. */
9706 if (wi::gtu_p (innermax - innermin,
9707 wi::mask <widest_int> (middle_prec, false))
9708 && middle_prec < final_prec)
9709 return false;
9710 /* We also want a medium value so that we can track the effect that
9711 narrowing conversions with sign change have. */
9712 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9713 if (inner_sgn == UNSIGNED)
9714 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
9715 else
9716 innermed = 0;
9717 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9718 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
9719 innermed = innermin;
9721 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9722 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9723 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9724 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
9726 /* Require that the final conversion applied to both the original
9727 and the intermediate range produces the same result. */
9728 final_sgn = TYPE_SIGN (finaltype);
9729 if (wi::ext (middlemin, final_prec, final_sgn)
9730 != wi::ext (innermin, final_prec, final_sgn)
9731 || wi::ext (middlemed, final_prec, final_sgn)
9732 != wi::ext (innermed, final_prec, final_sgn)
9733 || wi::ext (middlemax, final_prec, final_sgn)
9734 != wi::ext (innermax, final_prec, final_sgn))
9735 return false;
9737 gimple_assign_set_rhs1 (stmt, innerop);
9738 update_stmt (stmt);
9739 return true;
9742 /* Simplify a conversion from integral SSA name to float in STMT. */
9744 static bool
9745 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
9746 gimple *stmt)
9748 tree rhs1 = gimple_assign_rhs1 (stmt);
9749 value_range_t *vr = get_value_range (rhs1);
9750 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9751 machine_mode mode;
9752 tree tem;
9753 gassign *conv;
9755 /* We can only handle constant ranges. */
9756 if (vr->type != VR_RANGE
9757 || TREE_CODE (vr->min) != INTEGER_CST
9758 || TREE_CODE (vr->max) != INTEGER_CST)
9759 return false;
9761 /* First check if we can use a signed type in place of an unsigned. */
9762 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9763 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9764 != CODE_FOR_nothing)
9765 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
9766 mode = TYPE_MODE (TREE_TYPE (rhs1));
9767 /* If we can do the conversion in the current input mode do nothing. */
9768 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9769 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9770 return false;
9771 /* Otherwise search for a mode we can use, starting from the narrowest
9772 integer mode available. */
9773 else
9775 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9778 /* If we cannot do a signed conversion to float from mode
9779 or if the value-range does not fit in the signed type
9780 try with a wider mode. */
9781 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9782 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
9783 break;
9785 mode = GET_MODE_WIDER_MODE (mode);
9786 /* But do not widen the input. Instead leave that to the
9787 optabs expansion code. */
9788 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9789 return false;
9791 while (mode != VOIDmode);
9792 if (mode == VOIDmode)
9793 return false;
9796 /* It works, insert a truncation or sign-change before the
9797 float conversion. */
9798 tem = make_ssa_name (build_nonstandard_integer_type
9799 (GET_MODE_PRECISION (mode), 0));
9800 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
9801 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9802 gimple_assign_set_rhs1 (stmt, tem);
9803 update_stmt (stmt);
9805 return true;
9808 /* Simplify an internal fn call using ranges if possible. */
9810 static bool
9811 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9813 enum tree_code subcode;
9814 bool is_ubsan = false;
9815 bool ovf = false;
9816 switch (gimple_call_internal_fn (stmt))
9818 case IFN_UBSAN_CHECK_ADD:
9819 subcode = PLUS_EXPR;
9820 is_ubsan = true;
9821 break;
9822 case IFN_UBSAN_CHECK_SUB:
9823 subcode = MINUS_EXPR;
9824 is_ubsan = true;
9825 break;
9826 case IFN_UBSAN_CHECK_MUL:
9827 subcode = MULT_EXPR;
9828 is_ubsan = true;
9829 break;
9830 case IFN_ADD_OVERFLOW:
9831 subcode = PLUS_EXPR;
9832 break;
9833 case IFN_SUB_OVERFLOW:
9834 subcode = MINUS_EXPR;
9835 break;
9836 case IFN_MUL_OVERFLOW:
9837 subcode = MULT_EXPR;
9838 break;
9839 default:
9840 return false;
9843 tree op0 = gimple_call_arg (stmt, 0);
9844 tree op1 = gimple_call_arg (stmt, 1);
9845 tree type;
9846 if (is_ubsan)
9847 type = TREE_TYPE (op0);
9848 else if (gimple_call_lhs (stmt) == NULL_TREE)
9849 return false;
9850 else
9851 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
9852 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
9853 || (is_ubsan && ovf))
9854 return false;
9856 gimple *g;
9857 location_t loc = gimple_location (stmt);
9858 if (is_ubsan)
9859 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
9860 else
9862 int prec = TYPE_PRECISION (type);
9863 tree utype = type;
9864 if (ovf
9865 || !useless_type_conversion_p (type, TREE_TYPE (op0))
9866 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
9867 utype = build_nonstandard_integer_type (prec, 1);
9868 if (TREE_CODE (op0) == INTEGER_CST)
9869 op0 = fold_convert (utype, op0);
9870 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
9872 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
9873 gimple_set_location (g, loc);
9874 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9875 op0 = gimple_assign_lhs (g);
9877 if (TREE_CODE (op1) == INTEGER_CST)
9878 op1 = fold_convert (utype, op1);
9879 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
9881 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
9882 gimple_set_location (g, loc);
9883 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9884 op1 = gimple_assign_lhs (g);
9886 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
9887 gimple_set_location (g, loc);
9888 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9889 if (utype != type)
9891 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
9892 gimple_assign_lhs (g));
9893 gimple_set_location (g, loc);
9894 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9896 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
9897 gimple_assign_lhs (g),
9898 build_int_cst (type, ovf));
9900 gimple_set_location (g, loc);
9901 gsi_replace (gsi, g, false);
9902 return true;
9905 /* Simplify STMT using ranges if possible. */
9907 static bool
9908 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9910 gimple *stmt = gsi_stmt (*gsi);
9911 if (is_gimple_assign (stmt))
9913 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9914 tree rhs1 = gimple_assign_rhs1 (stmt);
9916 switch (rhs_code)
9918 case EQ_EXPR:
9919 case NE_EXPR:
9920 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9921 if the RHS is zero or one, and the LHS are known to be boolean
9922 values. */
9923 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9924 return simplify_truth_ops_using_ranges (gsi, stmt);
9925 break;
9927 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9928 and BIT_AND_EXPR respectively if the first operand is greater
9929 than zero and the second operand is an exact power of two.
9930 Also optimize TRUNC_MOD_EXPR away if the second operand is
9931 constant and the first operand already has the right value
9932 range. */
9933 case TRUNC_DIV_EXPR:
9934 case TRUNC_MOD_EXPR:
9935 if (TREE_CODE (rhs1) == SSA_NAME
9936 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9937 return simplify_div_or_mod_using_ranges (stmt);
9938 break;
9940 /* Transform ABS (X) into X or -X as appropriate. */
9941 case ABS_EXPR:
9942 if (TREE_CODE (rhs1) == SSA_NAME
9943 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9944 return simplify_abs_using_ranges (stmt);
9945 break;
9947 case BIT_AND_EXPR:
9948 case BIT_IOR_EXPR:
9949 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9950 if all the bits being cleared are already cleared or
9951 all the bits being set are already set. */
9952 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9953 return simplify_bit_ops_using_ranges (gsi, stmt);
9954 break;
9956 CASE_CONVERT:
9957 if (TREE_CODE (rhs1) == SSA_NAME
9958 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9959 return simplify_conversion_using_ranges (stmt);
9960 break;
9962 case FLOAT_EXPR:
9963 if (TREE_CODE (rhs1) == SSA_NAME
9964 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9965 return simplify_float_conversion_using_ranges (gsi, stmt);
9966 break;
9968 case MIN_EXPR:
9969 case MAX_EXPR:
9970 return simplify_min_or_max_using_ranges (stmt);
9971 break;
9973 default:
9974 break;
9977 else if (gimple_code (stmt) == GIMPLE_COND)
9978 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
9979 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9980 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
9981 else if (is_gimple_call (stmt)
9982 && gimple_call_internal_p (stmt))
9983 return simplify_internal_call_using_ranges (gsi, stmt);
9985 return false;
9988 /* If the statement pointed by SI has a predicate whose value can be
9989 computed using the value range information computed by VRP, compute
9990 its value and return true. Otherwise, return false. */
9992 static bool
9993 fold_predicate_in (gimple_stmt_iterator *si)
9995 bool assignment_p = false;
9996 tree val;
9997 gimple *stmt = gsi_stmt (*si);
9999 if (is_gimple_assign (stmt)
10000 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10002 assignment_p = true;
10003 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10004 gimple_assign_rhs1 (stmt),
10005 gimple_assign_rhs2 (stmt),
10006 stmt);
10008 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10009 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10010 gimple_cond_lhs (cond_stmt),
10011 gimple_cond_rhs (cond_stmt),
10012 stmt);
10013 else
10014 return false;
10016 if (val)
10018 if (assignment_p)
10019 val = fold_convert (gimple_expr_type (stmt), val);
10021 if (dump_file)
10023 fprintf (dump_file, "Folding predicate ");
10024 print_gimple_expr (dump_file, stmt, 0, 0);
10025 fprintf (dump_file, " to ");
10026 print_generic_expr (dump_file, val, 0);
10027 fprintf (dump_file, "\n");
10030 if (is_gimple_assign (stmt))
10031 gimple_assign_set_rhs_from_tree (si, val);
10032 else
10034 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10035 gcond *cond_stmt = as_a <gcond *> (stmt);
10036 if (integer_zerop (val))
10037 gimple_cond_make_false (cond_stmt);
10038 else if (integer_onep (val))
10039 gimple_cond_make_true (cond_stmt);
10040 else
10041 gcc_unreachable ();
10044 return true;
10047 return false;
10050 /* Callback for substitute_and_fold folding the stmt at *SI. */
10052 static bool
10053 vrp_fold_stmt (gimple_stmt_iterator *si)
10055 if (fold_predicate_in (si))
10056 return true;
10058 return simplify_stmt_using_ranges (si);
10061 /* Unwindable const/copy equivalences. */
10062 const_and_copies *equiv_stack;
10064 /* A trivial wrapper so that we can present the generic jump threading
10065 code with a simple API for simplifying statements. STMT is the
10066 statement we want to simplify, WITHIN_STMT provides the location
10067 for any overflow warnings. */
10069 static tree
10070 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10071 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10073 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10074 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10075 gimple_cond_lhs (cond_stmt),
10076 gimple_cond_rhs (cond_stmt),
10077 within_stmt);
10079 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10081 value_range_t new_vr = VR_INITIALIZER;
10082 tree lhs = gimple_assign_lhs (assign_stmt);
10084 if (TREE_CODE (lhs) == SSA_NAME
10085 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10086 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10088 extract_range_from_assignment (&new_vr, assign_stmt);
10089 if (range_int_cst_singleton_p (&new_vr))
10090 return new_vr.min;
10094 return NULL_TREE;
10097 /* Blocks which have more than one predecessor and more than
10098 one successor present jump threading opportunities, i.e.,
10099 when the block is reached from a specific predecessor, we
10100 may be able to determine which of the outgoing edges will
10101 be traversed. When this optimization applies, we are able
10102 to avoid conditionals at runtime and we may expose secondary
10103 optimization opportunities.
10105 This routine is effectively a driver for the generic jump
10106 threading code. It basically just presents the generic code
10107 with edges that may be suitable for jump threading.
10109 Unlike DOM, we do not iterate VRP if jump threading was successful.
10110 While iterating may expose new opportunities for VRP, it is expected
10111 those opportunities would be very limited and the compile time cost
10112 to expose those opportunities would be significant.
10114 As jump threading opportunities are discovered, they are registered
10115 for later realization. */
10117 static void
10118 identify_jump_threads (void)
10120 basic_block bb;
10121 gcond *dummy;
10122 int i;
10123 edge e;
10125 /* Ugh. When substituting values earlier in this pass we can
10126 wipe the dominance information. So rebuild the dominator
10127 information as we need it within the jump threading code. */
10128 calculate_dominance_info (CDI_DOMINATORS);
10130 /* We do not allow VRP information to be used for jump threading
10131 across a back edge in the CFG. Otherwise it becomes too
10132 difficult to avoid eliminating loop exit tests. Of course
10133 EDGE_DFS_BACK is not accurate at this time so we have to
10134 recompute it. */
10135 mark_dfs_back_edges ();
10137 /* Do not thread across edges we are about to remove. Just marking
10138 them as EDGE_DFS_BACK will do. */
10139 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10140 e->flags |= EDGE_DFS_BACK;
10142 /* Allocate our unwinder stack to unwind any temporary equivalences
10143 that might be recorded. */
10144 equiv_stack = new const_and_copies ();
10146 /* To avoid lots of silly node creation, we create a single
10147 conditional and just modify it in-place when attempting to
10148 thread jumps. */
10149 dummy = gimple_build_cond (EQ_EXPR,
10150 integer_zero_node, integer_zero_node,
10151 NULL, NULL);
10153 /* Walk through all the blocks finding those which present a
10154 potential jump threading opportunity. We could set this up
10155 as a dominator walker and record data during the walk, but
10156 I doubt it's worth the effort for the classes of jump
10157 threading opportunities we are trying to identify at this
10158 point in compilation. */
10159 FOR_EACH_BB_FN (bb, cfun)
10161 gimple *last;
10163 /* If the generic jump threading code does not find this block
10164 interesting, then there is nothing to do. */
10165 if (! potentially_threadable_block (bb))
10166 continue;
10168 last = last_stmt (bb);
10170 /* We're basically looking for a switch or any kind of conditional with
10171 integral or pointer type arguments. Note the type of the second
10172 argument will be the same as the first argument, so no need to
10173 check it explicitly.
10175 We also handle the case where there are no statements in the
10176 block. This come up with forwarder blocks that are not
10177 optimized away because they lead to a loop header. But we do
10178 want to thread through them as we can sometimes thread to the
10179 loop exit which is obviously profitable. */
10180 if (!last
10181 || gimple_code (last) == GIMPLE_SWITCH
10182 || (gimple_code (last) == GIMPLE_COND
10183 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10184 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10185 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10186 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10187 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10189 edge_iterator ei;
10191 /* We've got a block with multiple predecessors and multiple
10192 successors which also ends in a suitable conditional or
10193 switch statement. For each predecessor, see if we can thread
10194 it to a specific successor. */
10195 FOR_EACH_EDGE (e, ei, bb->preds)
10197 /* Do not thread across back edges or abnormal edges
10198 in the CFG. */
10199 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
10200 continue;
10202 thread_across_edge (dummy, e, true, equiv_stack, NULL,
10203 simplify_stmt_for_jump_threading);
10208 /* We do not actually update the CFG or SSA graphs at this point as
10209 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10210 handle ASSERT_EXPRs gracefully. */
10213 /* We identified all the jump threading opportunities earlier, but could
10214 not transform the CFG at that time. This routine transforms the
10215 CFG and arranges for the dominator tree to be rebuilt if necessary.
10217 Note the SSA graph update will occur during the normal TODO
10218 processing by the pass manager. */
10219 static void
10220 finalize_jump_threads (void)
10222 thread_through_all_blocks (false);
10223 delete equiv_stack;
10227 /* Traverse all the blocks folding conditionals with known ranges. */
10229 static void
10230 vrp_finalize (void)
10232 size_t i;
10234 values_propagated = true;
10236 if (dump_file)
10238 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10239 dump_all_value_ranges (dump_file);
10240 fprintf (dump_file, "\n");
10243 substitute_and_fold (op_with_constant_singleton_value_range,
10244 vrp_fold_stmt, false);
10246 if (warn_array_bounds && first_pass_instance)
10247 check_all_array_refs ();
10249 /* We must identify jump threading opportunities before we release
10250 the datastructures built by VRP. */
10251 identify_jump_threads ();
10253 /* Set value range to non pointer SSA_NAMEs. */
10254 for (i = 0; i < num_vr_values; i++)
10255 if (vr_value[i])
10257 tree name = ssa_name (i);
10259 if (!name
10260 || POINTER_TYPE_P (TREE_TYPE (name))
10261 || (vr_value[i]->type == VR_VARYING)
10262 || (vr_value[i]->type == VR_UNDEFINED))
10263 continue;
10265 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
10266 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)
10267 && (vr_value[i]->type == VR_RANGE
10268 || vr_value[i]->type == VR_ANTI_RANGE))
10269 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10270 vr_value[i]->max);
10273 /* Free allocated memory. */
10274 for (i = 0; i < num_vr_values; i++)
10275 if (vr_value[i])
10277 BITMAP_FREE (vr_value[i]->equiv);
10278 free (vr_value[i]);
10281 free (vr_value);
10282 free (vr_phi_edge_counts);
10284 /* So that we can distinguish between VRP data being available
10285 and not available. */
10286 vr_value = NULL;
10287 vr_phi_edge_counts = NULL;
10291 /* Main entry point to VRP (Value Range Propagation). This pass is
10292 loosely based on J. R. C. Patterson, ``Accurate Static Branch
10293 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
10294 Programming Language Design and Implementation, pp. 67-78, 1995.
10295 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
10297 This is essentially an SSA-CCP pass modified to deal with ranges
10298 instead of constants.
10300 While propagating ranges, we may find that two or more SSA name
10301 have equivalent, though distinct ranges. For instance,
10303 1 x_9 = p_3->a;
10304 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
10305 3 if (p_4 == q_2)
10306 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
10307 5 endif
10308 6 if (q_2)
10310 In the code above, pointer p_5 has range [q_2, q_2], but from the
10311 code we can also determine that p_5 cannot be NULL and, if q_2 had
10312 a non-varying range, p_5's range should also be compatible with it.
10314 These equivalences are created by two expressions: ASSERT_EXPR and
10315 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
10316 result of another assertion, then we can use the fact that p_5 and
10317 p_4 are equivalent when evaluating p_5's range.
10319 Together with value ranges, we also propagate these equivalences
10320 between names so that we can take advantage of information from
10321 multiple ranges when doing final replacement. Note that this
10322 equivalency relation is transitive but not symmetric.
10324 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
10325 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
10326 in contexts where that assertion does not hold (e.g., in line 6).
10328 TODO, the main difference between this pass and Patterson's is that
10329 we do not propagate edge probabilities. We only compute whether
10330 edges can be taken or not. That is, instead of having a spectrum
10331 of jump probabilities between 0 and 1, we only deal with 0, 1 and
10332 DON'T KNOW. In the future, it may be worthwhile to propagate
10333 probabilities to aid branch prediction. */
10335 static unsigned int
10336 execute_vrp (void)
10338 int i;
10339 edge e;
10340 switch_update *su;
10342 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
10343 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
10344 scev_initialize ();
10346 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
10347 Inserting assertions may split edges which will invalidate
10348 EDGE_DFS_BACK. */
10349 insert_range_assertions ();
10351 to_remove_edges.create (10);
10352 to_update_switch_stmts.create (5);
10353 threadedge_initialize_values ();
10355 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
10356 mark_dfs_back_edges ();
10358 vrp_initialize ();
10359 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
10360 vrp_finalize ();
10362 free_numbers_of_iterations_estimates ();
10364 /* ASSERT_EXPRs must be removed before finalizing jump threads
10365 as finalizing jump threads calls the CFG cleanup code which
10366 does not properly handle ASSERT_EXPRs. */
10367 remove_range_assertions ();
10369 /* If we exposed any new variables, go ahead and put them into
10370 SSA form now, before we handle jump threading. This simplifies
10371 interactions between rewriting of _DECL nodes into SSA form
10372 and rewriting SSA_NAME nodes into SSA form after block
10373 duplication and CFG manipulation. */
10374 update_ssa (TODO_update_ssa);
10376 finalize_jump_threads ();
10378 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
10379 CFG in a broken state and requires a cfg_cleanup run. */
10380 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10381 remove_edge (e);
10382 /* Update SWITCH_EXPR case label vector. */
10383 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
10385 size_t j;
10386 size_t n = TREE_VEC_LENGTH (su->vec);
10387 tree label;
10388 gimple_switch_set_num_labels (su->stmt, n);
10389 for (j = 0; j < n; j++)
10390 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
10391 /* As we may have replaced the default label with a regular one
10392 make sure to make it a real default label again. This ensures
10393 optimal expansion. */
10394 label = gimple_switch_label (su->stmt, 0);
10395 CASE_LOW (label) = NULL_TREE;
10396 CASE_HIGH (label) = NULL_TREE;
10399 if (to_remove_edges.length () > 0)
10401 free_dominance_info (CDI_DOMINATORS);
10402 loops_state_set (LOOPS_NEED_FIXUP);
10405 to_remove_edges.release ();
10406 to_update_switch_stmts.release ();
10407 threadedge_finalize_values ();
10409 scev_finalize ();
10410 loop_optimizer_finalize ();
10411 return 0;
10414 namespace {
10416 const pass_data pass_data_vrp =
10418 GIMPLE_PASS, /* type */
10419 "vrp", /* name */
10420 OPTGROUP_NONE, /* optinfo_flags */
10421 TV_TREE_VRP, /* tv_id */
10422 PROP_ssa, /* properties_required */
10423 0, /* properties_provided */
10424 0, /* properties_destroyed */
10425 0, /* todo_flags_start */
10426 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
10429 class pass_vrp : public gimple_opt_pass
10431 public:
10432 pass_vrp (gcc::context *ctxt)
10433 : gimple_opt_pass (pass_data_vrp, ctxt)
10436 /* opt_pass methods: */
10437 opt_pass * clone () { return new pass_vrp (m_ctxt); }
10438 virtual bool gate (function *) { return flag_tree_vrp != 0; }
10439 virtual unsigned int execute (function *) { return execute_vrp (); }
10441 }; // class pass_vrp
10443 } // anon namespace
10445 gimple_opt_pass *
10446 make_pass_vrp (gcc::context *ctxt)
10448 return new pass_vrp (ctxt);