PR middle-end/86864
[official-gcc.git] / gcc / tree-vrp.c
blob1adb919a6dfdea20fe4e3043573b60cc4d8dca9c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Location information for ASSERT_EXPRs. Each instance of this
86 structure describes an ASSERT_EXPR for an SSA name. Since a single
87 SSA name may have more than one assertion associated with it, these
88 locations are kept in a linked list attached to the corresponding
89 SSA name. */
90 struct assert_locus
92 /* Basic block where the assertion would be inserted. */
93 basic_block bb;
95 /* Some assertions need to be inserted on an edge (e.g., assertions
96 generated by COND_EXPRs). In those cases, BB will be NULL. */
97 edge e;
99 /* Pointer to the statement that generated this assertion. */
100 gimple_stmt_iterator si;
102 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
103 enum tree_code comp_code;
105 /* Value being compared against. */
106 tree val;
108 /* Expression to compare. */
109 tree expr;
111 /* Next node in the linked list. */
112 assert_locus *next;
115 /* If bit I is present, it means that SSA name N_i has a list of
116 assertions that should be inserted in the IL. */
117 static bitmap need_assert_for;
119 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
120 holds a list of ASSERT_LOCUS_T nodes that describe where
121 ASSERT_EXPRs for SSA name N_I should be inserted. */
122 static assert_locus **asserts_for;
124 vec<edge> to_remove_edges;
125 vec<switch_update> to_update_switch_stmts;
128 /* Return the maximum value for TYPE. */
130 tree
131 vrp_val_max (const_tree type)
133 if (!INTEGRAL_TYPE_P (type))
134 return NULL_TREE;
136 return TYPE_MAX_VALUE (type);
139 /* Return the minimum value for TYPE. */
141 tree
142 vrp_val_min (const_tree type)
144 if (!INTEGRAL_TYPE_P (type))
145 return NULL_TREE;
147 return TYPE_MIN_VALUE (type);
150 /* Return whether VAL is equal to the maximum value of its type.
151 We can't do a simple equality comparison with TYPE_MAX_VALUE because
152 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
153 is not == to the integer constant with the same value in the type. */
155 bool
156 vrp_val_is_max (const_tree val)
158 tree type_max = vrp_val_max (TREE_TYPE (val));
159 return (val == type_max
160 || (type_max != NULL_TREE
161 && operand_equal_p (val, type_max, 0)));
164 /* Return whether VAL is equal to the minimum value of its type. */
166 bool
167 vrp_val_is_min (const_tree val)
169 tree type_min = vrp_val_min (TREE_TYPE (val));
170 return (val == type_min
171 || (type_min != NULL_TREE
172 && operand_equal_p (val, type_min, 0)));
175 /* VR_TYPE describes a range with mininum value *MIN and maximum
176 value *MAX. Restrict the range to the set of values that have
177 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
178 return the new range type.
180 SGN gives the sign of the values described by the range. */
182 enum value_range_type
183 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
184 wide_int *min, wide_int *max,
185 const wide_int &nonzero_bits,
186 signop sgn)
188 if (vr_type == VR_ANTI_RANGE)
190 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
191 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
192 to create an inclusive upper bound for A and an inclusive lower
193 bound for B. */
194 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
195 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
197 /* If the calculation of A_MAX wrapped, A is effectively empty
198 and A_MAX is the highest value that satisfies NONZERO_BITS.
199 Likewise if the calculation of B_MIN wrapped, B is effectively
200 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
201 bool a_empty = wi::ge_p (a_max, *min, sgn);
202 bool b_empty = wi::le_p (b_min, *max, sgn);
204 /* If both A and B are empty, there are no valid values. */
205 if (a_empty && b_empty)
206 return VR_UNDEFINED;
208 /* If exactly one of A or B is empty, return a VR_RANGE for the
209 other one. */
210 if (a_empty || b_empty)
212 *min = b_min;
213 *max = a_max;
214 gcc_checking_assert (wi::le_p (*min, *max, sgn));
215 return VR_RANGE;
218 /* Update the VR_ANTI_RANGE bounds. */
219 *min = a_max + 1;
220 *max = b_min - 1;
221 gcc_checking_assert (wi::le_p (*min, *max, sgn));
223 /* Now check whether the excluded range includes any values that
224 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
225 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
227 unsigned int precision = min->get_precision ();
228 *min = wi::min_value (precision, sgn);
229 *max = wi::max_value (precision, sgn);
230 vr_type = VR_RANGE;
233 if (vr_type == VR_RANGE)
235 *max = wi::round_down_for_mask (*max, nonzero_bits);
237 /* Check that the range contains at least one valid value. */
238 if (wi::gt_p (*min, *max, sgn))
239 return VR_UNDEFINED;
241 *min = wi::round_up_for_mask (*min, nonzero_bits);
242 gcc_checking_assert (wi::le_p (*min, *max, sgn));
244 return vr_type;
247 /* Set value range VR to VR_UNDEFINED. */
249 static inline void
250 set_value_range_to_undefined (value_range *vr)
252 vr->type = VR_UNDEFINED;
253 vr->min = vr->max = NULL_TREE;
254 if (vr->equiv)
255 bitmap_clear (vr->equiv);
258 /* Set value range VR to VR_VARYING. */
260 void
261 set_value_range_to_varying (value_range *vr)
263 vr->type = VR_VARYING;
264 vr->min = vr->max = NULL_TREE;
265 if (vr->equiv)
266 bitmap_clear (vr->equiv);
269 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
271 void
272 set_value_range (value_range *vr, enum value_range_type t, tree min,
273 tree max, bitmap equiv)
275 /* Check the validity of the range. */
276 if (flag_checking
277 && (t == VR_RANGE || t == VR_ANTI_RANGE))
279 int cmp;
281 gcc_assert (min && max);
283 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
285 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
286 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
288 cmp = compare_values (min, max);
289 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
292 if (flag_checking
293 && (t == VR_UNDEFINED || t == VR_VARYING))
295 gcc_assert (min == NULL_TREE && max == NULL_TREE);
296 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
299 vr->type = t;
300 vr->min = min;
301 vr->max = max;
303 /* Since updating the equivalence set involves deep copying the
304 bitmaps, only do it if absolutely necessary.
306 All equivalence bitmaps are allocated from the same obstack. So
307 we can use the obstack associated with EQUIV to allocate vr->equiv. */
308 if (vr->equiv == NULL
309 && equiv != NULL)
310 vr->equiv = BITMAP_ALLOC (equiv->obstack);
312 if (equiv != vr->equiv)
314 if (equiv && !bitmap_empty_p (equiv))
315 bitmap_copy (vr->equiv, equiv);
316 else
317 bitmap_clear (vr->equiv);
322 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
323 This means adjusting T, MIN and MAX representing the case of a
324 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
325 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
326 In corner cases where MAX+1 or MIN-1 wraps this will fall back
327 to varying.
328 This routine exists to ease canonicalization in the case where we
329 extract ranges from var + CST op limit. */
331 void
332 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
333 tree min, tree max, bitmap equiv)
335 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
336 if (t == VR_UNDEFINED)
338 set_value_range_to_undefined (vr);
339 return;
341 else if (t == VR_VARYING)
343 set_value_range_to_varying (vr);
344 return;
347 /* Nothing to canonicalize for symbolic ranges. */
348 if (TREE_CODE (min) != INTEGER_CST
349 || TREE_CODE (max) != INTEGER_CST)
351 set_value_range (vr, t, min, max, equiv);
352 return;
355 /* Wrong order for min and max, to swap them and the VR type we need
356 to adjust them. */
357 if (tree_int_cst_lt (max, min))
359 tree one, tmp;
361 /* For one bit precision if max < min, then the swapped
362 range covers all values, so for VR_RANGE it is varying and
363 for VR_ANTI_RANGE empty range, so drop to varying as well. */
364 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
366 set_value_range_to_varying (vr);
367 return;
370 one = build_int_cst (TREE_TYPE (min), 1);
371 tmp = int_const_binop (PLUS_EXPR, max, one);
372 max = int_const_binop (MINUS_EXPR, min, one);
373 min = tmp;
375 /* There's one corner case, if we had [C+1, C] before we now have
376 that again. But this represents an empty value range, so drop
377 to varying in this case. */
378 if (tree_int_cst_lt (max, min))
380 set_value_range_to_varying (vr);
381 return;
384 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
387 /* Anti-ranges that can be represented as ranges should be so. */
388 if (t == VR_ANTI_RANGE)
390 /* For -fstrict-enums we may receive out-of-range ranges so consider
391 values < -INF and values > INF as -INF/INF as well. */
392 tree type = TREE_TYPE (min);
393 bool is_min = (INTEGRAL_TYPE_P (type)
394 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
395 bool is_max = (INTEGRAL_TYPE_P (type)
396 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
398 if (is_min && is_max)
400 /* We cannot deal with empty ranges, drop to varying.
401 ??? This could be VR_UNDEFINED instead. */
402 set_value_range_to_varying (vr);
403 return;
405 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
406 && (is_min || is_max))
408 /* Non-empty boolean ranges can always be represented
409 as a singleton range. */
410 if (is_min)
411 min = max = vrp_val_max (TREE_TYPE (min));
412 else
413 min = max = vrp_val_min (TREE_TYPE (min));
414 t = VR_RANGE;
416 else if (is_min
417 /* As a special exception preserve non-null ranges. */
418 && !(TYPE_UNSIGNED (TREE_TYPE (min))
419 && integer_zerop (max)))
421 tree one = build_int_cst (TREE_TYPE (max), 1);
422 min = int_const_binop (PLUS_EXPR, max, one);
423 max = vrp_val_max (TREE_TYPE (max));
424 t = VR_RANGE;
426 else if (is_max)
428 tree one = build_int_cst (TREE_TYPE (min), 1);
429 max = int_const_binop (MINUS_EXPR, min, one);
430 min = vrp_val_min (TREE_TYPE (min));
431 t = VR_RANGE;
435 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
436 to make sure VRP iteration terminates, otherwise we can get into
437 oscillations. */
439 set_value_range (vr, t, min, max, equiv);
442 /* Copy value range FROM into value range TO. */
444 void
445 copy_value_range (value_range *to, const value_range *from)
447 set_value_range (to, from->type, from->min, from->max, from->equiv);
450 /* Set value range VR to a single value. This function is only called
451 with values we get from statements, and exists to clear the
452 TREE_OVERFLOW flag. */
454 void
455 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
457 gcc_assert (is_gimple_min_invariant (val));
458 if (TREE_OVERFLOW_P (val))
459 val = drop_tree_overflow (val);
460 set_value_range (vr, VR_RANGE, val, val, equiv);
463 /* Set value range VR to a non-NULL range of type TYPE. */
465 void
466 set_value_range_to_nonnull (value_range *vr, tree type)
468 tree zero = build_int_cst (type, 0);
469 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
473 /* Set value range VR to a NULL range of type TYPE. */
475 void
476 set_value_range_to_null (value_range *vr, tree type)
478 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
481 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
483 bool
484 vrp_operand_equal_p (const_tree val1, const_tree val2)
486 if (val1 == val2)
487 return true;
488 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
489 return false;
490 return true;
493 /* Return true, if the bitmaps B1 and B2 are equal. */
495 bool
496 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
498 return (b1 == b2
499 || ((!b1 || bitmap_empty_p (b1))
500 && (!b2 || bitmap_empty_p (b2)))
501 || (b1 && b2
502 && bitmap_equal_p (b1, b2)));
505 /* Return true if VR is [0, 0]. */
507 static inline bool
508 range_is_null (const value_range *vr)
510 return vr->type == VR_RANGE
511 && integer_zerop (vr->min)
512 && integer_zerop (vr->max);
515 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
516 a singleton. */
518 bool
519 range_int_cst_p (const value_range *vr)
521 return (vr->type == VR_RANGE
522 && TREE_CODE (vr->max) == INTEGER_CST
523 && TREE_CODE (vr->min) == INTEGER_CST);
526 /* Return true if VR is a INTEGER_CST singleton. */
528 bool
529 range_int_cst_singleton_p (const value_range *vr)
531 return (range_int_cst_p (vr)
532 && tree_int_cst_equal (vr->min, vr->max));
535 /* Return true if value range VR involves at least one symbol. */
537 bool
538 symbolic_range_p (const value_range *vr)
540 return (!is_gimple_min_invariant (vr->min)
541 || !is_gimple_min_invariant (vr->max));
544 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
545 otherwise. We only handle additive operations and set NEG to true if the
546 symbol is negated and INV to the invariant part, if any. */
548 tree
549 get_single_symbol (tree t, bool *neg, tree *inv)
551 bool neg_;
552 tree inv_;
554 *inv = NULL_TREE;
555 *neg = false;
557 if (TREE_CODE (t) == PLUS_EXPR
558 || TREE_CODE (t) == POINTER_PLUS_EXPR
559 || TREE_CODE (t) == MINUS_EXPR)
561 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
563 neg_ = (TREE_CODE (t) == MINUS_EXPR);
564 inv_ = TREE_OPERAND (t, 0);
565 t = TREE_OPERAND (t, 1);
567 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
569 neg_ = false;
570 inv_ = TREE_OPERAND (t, 1);
571 t = TREE_OPERAND (t, 0);
573 else
574 return NULL_TREE;
576 else
578 neg_ = false;
579 inv_ = NULL_TREE;
582 if (TREE_CODE (t) == NEGATE_EXPR)
584 t = TREE_OPERAND (t, 0);
585 neg_ = !neg_;
588 if (TREE_CODE (t) != SSA_NAME)
589 return NULL_TREE;
591 if (inv_ && TREE_OVERFLOW_P (inv_))
592 inv_ = drop_tree_overflow (inv_);
594 *neg = neg_;
595 *inv = inv_;
596 return t;
599 /* The reverse operation: build a symbolic expression with TYPE
600 from symbol SYM, negated according to NEG, and invariant INV. */
602 static tree
603 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
605 const bool pointer_p = POINTER_TYPE_P (type);
606 tree t = sym;
608 if (neg)
609 t = build1 (NEGATE_EXPR, type, t);
611 if (integer_zerop (inv))
612 return t;
614 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
617 /* Return
618 1 if VAL < VAL2
619 0 if !(VAL < VAL2)
620 -2 if those are incomparable. */
622 operand_less_p (tree val, tree val2)
624 /* LT is folded faster than GE and others. Inline the common case. */
625 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
626 return tree_int_cst_lt (val, val2);
627 else
629 tree tcmp;
631 fold_defer_overflow_warnings ();
633 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
635 fold_undefer_and_ignore_overflow_warnings ();
637 if (!tcmp
638 || TREE_CODE (tcmp) != INTEGER_CST)
639 return -2;
641 if (!integer_zerop (tcmp))
642 return 1;
645 return 0;
648 /* Compare two values VAL1 and VAL2. Return
650 -2 if VAL1 and VAL2 cannot be compared at compile-time,
651 -1 if VAL1 < VAL2,
652 0 if VAL1 == VAL2,
653 +1 if VAL1 > VAL2, and
654 +2 if VAL1 != VAL2
656 This is similar to tree_int_cst_compare but supports pointer values
657 and values that cannot be compared at compile time.
659 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
660 true if the return value is only valid if we assume that signed
661 overflow is undefined. */
664 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
666 if (val1 == val2)
667 return 0;
669 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
670 both integers. */
671 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
672 == POINTER_TYPE_P (TREE_TYPE (val2)));
674 /* Convert the two values into the same type. This is needed because
675 sizetype causes sign extension even for unsigned types. */
676 val2 = fold_convert (TREE_TYPE (val1), val2);
677 STRIP_USELESS_TYPE_CONVERSION (val2);
679 const bool overflow_undefined
680 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
681 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
682 tree inv1, inv2;
683 bool neg1, neg2;
684 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
685 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
687 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
688 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
689 if (sym1 && sym2)
691 /* Both values must use the same name with the same sign. */
692 if (sym1 != sym2 || neg1 != neg2)
693 return -2;
695 /* [-]NAME + CST == [-]NAME + CST. */
696 if (inv1 == inv2)
697 return 0;
699 /* If overflow is defined we cannot simplify more. */
700 if (!overflow_undefined)
701 return -2;
703 if (strict_overflow_p != NULL
704 /* Symbolic range building sets TREE_NO_WARNING to declare
705 that overflow doesn't happen. */
706 && (!inv1 || !TREE_NO_WARNING (val1))
707 && (!inv2 || !TREE_NO_WARNING (val2)))
708 *strict_overflow_p = true;
710 if (!inv1)
711 inv1 = build_int_cst (TREE_TYPE (val1), 0);
712 if (!inv2)
713 inv2 = build_int_cst (TREE_TYPE (val2), 0);
715 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
716 TYPE_SIGN (TREE_TYPE (val1)));
719 const bool cst1 = is_gimple_min_invariant (val1);
720 const bool cst2 = is_gimple_min_invariant (val2);
722 /* If one is of the form '[-]NAME + CST' and the other is constant, then
723 it might be possible to say something depending on the constants. */
724 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
726 if (!overflow_undefined)
727 return -2;
729 if (strict_overflow_p != NULL
730 /* Symbolic range building sets TREE_NO_WARNING to declare
731 that overflow doesn't happen. */
732 && (!sym1 || !TREE_NO_WARNING (val1))
733 && (!sym2 || !TREE_NO_WARNING (val2)))
734 *strict_overflow_p = true;
736 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
737 tree cst = cst1 ? val1 : val2;
738 tree inv = cst1 ? inv2 : inv1;
740 /* Compute the difference between the constants. If it overflows or
741 underflows, this means that we can trivially compare the NAME with
742 it and, consequently, the two values with each other. */
743 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
744 if (wi::cmp (0, wi::to_wide (inv), sgn)
745 != wi::cmp (diff, wi::to_wide (cst), sgn))
747 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
748 return cst1 ? res : -res;
751 return -2;
754 /* We cannot say anything more for non-constants. */
755 if (!cst1 || !cst2)
756 return -2;
758 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
760 /* We cannot compare overflowed values. */
761 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
762 return -2;
764 if (TREE_CODE (val1) == INTEGER_CST
765 && TREE_CODE (val2) == INTEGER_CST)
766 return tree_int_cst_compare (val1, val2);
768 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
770 if (known_eq (wi::to_poly_widest (val1),
771 wi::to_poly_widest (val2)))
772 return 0;
773 if (known_lt (wi::to_poly_widest (val1),
774 wi::to_poly_widest (val2)))
775 return -1;
776 if (known_gt (wi::to_poly_widest (val1),
777 wi::to_poly_widest (val2)))
778 return 1;
781 return -2;
783 else
785 tree t;
787 /* First see if VAL1 and VAL2 are not the same. */
788 if (val1 == val2 || operand_equal_p (val1, val2, 0))
789 return 0;
791 /* If VAL1 is a lower address than VAL2, return -1. */
792 if (operand_less_p (val1, val2) == 1)
793 return -1;
795 /* If VAL1 is a higher address than VAL2, return +1. */
796 if (operand_less_p (val2, val1) == 1)
797 return 1;
799 /* If VAL1 is different than VAL2, return +2.
800 For integer constants we either have already returned -1 or 1
801 or they are equivalent. We still might succeed in proving
802 something about non-trivial operands. */
803 if (TREE_CODE (val1) != INTEGER_CST
804 || TREE_CODE (val2) != INTEGER_CST)
806 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
807 if (t && integer_onep (t))
808 return 2;
811 return -2;
815 /* Compare values like compare_values_warnv. */
818 compare_values (tree val1, tree val2)
820 bool sop;
821 return compare_values_warnv (val1, val2, &sop);
825 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
826 0 if VAL is not inside [MIN, MAX],
827 -2 if we cannot tell either way.
829 Benchmark compile/20001226-1.c compilation time after changing this
830 function. */
833 value_inside_range (tree val, tree min, tree max)
835 int cmp1, cmp2;
837 cmp1 = operand_less_p (val, min);
838 if (cmp1 == -2)
839 return -2;
840 if (cmp1 == 1)
841 return 0;
843 cmp2 = operand_less_p (max, val);
844 if (cmp2 == -2)
845 return -2;
847 return !cmp2;
851 /* Return true if value ranges VR0 and VR1 have a non-empty
852 intersection.
854 Benchmark compile/20001226-1.c compilation time after changing this
855 function.
858 static inline bool
859 value_ranges_intersect_p (const value_range *vr0, const value_range *vr1)
861 /* The value ranges do not intersect if the maximum of the first range is
862 less than the minimum of the second range or vice versa.
863 When those relations are unknown, we can't do any better. */
864 if (operand_less_p (vr0->max, vr1->min) != 0)
865 return false;
866 if (operand_less_p (vr1->max, vr0->min) != 0)
867 return false;
868 return true;
872 /* Return TRUE if *VR includes the value zero. */
874 bool
875 range_includes_zero_p (const value_range *vr)
877 if (vr->type == VR_VARYING)
878 return true;
880 /* Ughh, we don't know. We choose not to optimize. */
881 if (vr->type == VR_UNDEFINED)
882 return true;
884 tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
885 if (vr->type == VR_ANTI_RANGE)
887 int res = value_inside_range (zero, vr->min, vr->max);
888 return res == 0 || res == -2;
890 return value_inside_range (zero, vr->min, vr->max) != 0;
893 /* Return true if *VR is know to only contain nonnegative values. */
895 static inline bool
896 value_range_nonnegative_p (const value_range *vr)
898 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
899 which would return a useful value should be encoded as a
900 VR_RANGE. */
901 if (vr->type == VR_RANGE)
903 int result = compare_values (vr->min, integer_zero_node);
904 return (result == 0 || result == 1);
907 return false;
910 /* If *VR has a value rante that is a single constant value return that,
911 otherwise return NULL_TREE. */
913 tree
914 value_range_constant_singleton (const value_range *vr)
916 if (vr->type == VR_RANGE
917 && vrp_operand_equal_p (vr->min, vr->max)
918 && is_gimple_min_invariant (vr->min))
919 return vr->min;
921 return NULL_TREE;
924 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
926 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
928 Return TRUE if VR was a constant range and we were able to compute
929 the bit masks. */
931 bool
932 vrp_set_zero_nonzero_bits (const tree expr_type,
933 const value_range *vr,
934 wide_int *may_be_nonzero,
935 wide_int *must_be_nonzero)
937 if (!range_int_cst_p (vr))
939 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
940 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
941 return false;
943 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
944 wi::to_wide (vr->min),
945 wi::to_wide (vr->max),
946 *may_be_nonzero, *must_be_nonzero);
947 return true;
950 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
951 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
952 false otherwise. If *AR can be represented with a single range
953 *VR1 will be VR_UNDEFINED. */
955 static bool
956 ranges_from_anti_range (const value_range *ar,
957 value_range *vr0, value_range *vr1)
959 tree type = TREE_TYPE (ar->min);
961 vr0->type = VR_UNDEFINED;
962 vr1->type = VR_UNDEFINED;
964 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
965 [A+1, +INF]. Not sure if this helps in practice, though. */
967 if (ar->type != VR_ANTI_RANGE
968 || TREE_CODE (ar->min) != INTEGER_CST
969 || TREE_CODE (ar->max) != INTEGER_CST
970 || !vrp_val_min (type)
971 || !vrp_val_max (type))
972 return false;
974 if (!vrp_val_is_min (ar->min))
976 vr0->type = VR_RANGE;
977 vr0->min = vrp_val_min (type);
978 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
980 if (!vrp_val_is_max (ar->max))
982 vr1->type = VR_RANGE;
983 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
984 vr1->max = vrp_val_max (type);
986 if (vr0->type == VR_UNDEFINED)
988 *vr0 = *vr1;
989 vr1->type = VR_UNDEFINED;
992 return vr0->type != VR_UNDEFINED;
995 /* Extract the components of a value range into a pair of wide ints in
996 [WMIN, WMAX].
998 If the value range is anything but a VR_*RANGE of constants, the
999 resulting wide ints are set to [-MIN, +MAX] for the type. */
1001 static void inline
1002 extract_range_into_wide_ints (const value_range *vr,
1003 signop sign, unsigned prec,
1004 wide_int &wmin, wide_int &wmax)
1006 if ((vr->type == VR_RANGE
1007 || vr->type == VR_ANTI_RANGE)
1008 && TREE_CODE (vr->min) == INTEGER_CST
1009 && TREE_CODE (vr->max) == INTEGER_CST)
1011 wmin = wi::to_wide (vr->min);
1012 wmax = wi::to_wide (vr->max);
1014 else
1016 wmin = wi::min_value (prec, sign);
1017 wmax = wi::max_value (prec, sign);
1021 /* Value range wrapper for wide_int_range_multiplicative_op:
1023 *VR = *VR0 .CODE. *VR1. */
1025 static void
1026 extract_range_from_multiplicative_op (value_range *vr,
1027 enum tree_code code,
1028 const value_range *vr0,
1029 const value_range *vr1)
1031 gcc_assert (code == MULT_EXPR
1032 || code == TRUNC_DIV_EXPR
1033 || code == FLOOR_DIV_EXPR
1034 || code == CEIL_DIV_EXPR
1035 || code == EXACT_DIV_EXPR
1036 || code == ROUND_DIV_EXPR
1037 || code == RSHIFT_EXPR
1038 || code == LSHIFT_EXPR);
1039 gcc_assert (vr0->type == VR_RANGE && vr0->type == vr1->type);
1041 tree type = TREE_TYPE (vr0->min);
1042 wide_int res_lb, res_ub;
1043 wide_int vr0_lb = wi::to_wide (vr0->min);
1044 wide_int vr0_ub = wi::to_wide (vr0->max);
1045 wide_int vr1_lb = wi::to_wide (vr1->min);
1046 wide_int vr1_ub = wi::to_wide (vr1->max);
1047 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1048 bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
1049 unsigned prec = TYPE_PRECISION (type);
1051 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1052 code, TYPE_SIGN (type), prec,
1053 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1054 overflow_undefined, overflow_wraps))
1055 set_and_canonicalize_value_range (vr, VR_RANGE,
1056 wide_int_to_tree (type, res_lb),
1057 wide_int_to_tree (type, res_ub), NULL);
1058 else
1059 set_value_range_to_varying (vr);
1062 /* If BOUND will include a symbolic bound, adjust it accordingly,
1063 otherwise leave it as is.
1065 CODE is the original operation that combined the bounds (PLUS_EXPR
1066 or MINUS_EXPR).
1068 TYPE is the type of the original operation.
1070 SYM_OPn is the symbolic for OPn if it has a symbolic.
1072 NEG_OPn is TRUE if the OPn was negated. */
1074 static void
1075 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1076 tree sym_op0, tree sym_op1,
1077 bool neg_op0, bool neg_op1)
1079 bool minus_p = (code == MINUS_EXPR);
1080 /* If the result bound is constant, we're done; otherwise, build the
1081 symbolic lower bound. */
1082 if (sym_op0 == sym_op1)
1084 else if (sym_op0)
1085 bound = build_symbolic_expr (type, sym_op0,
1086 neg_op0, bound);
1087 else if (sym_op1)
1089 /* We may not negate if that might introduce
1090 undefined overflow. */
1091 if (!minus_p
1092 || neg_op1
1093 || TYPE_OVERFLOW_WRAPS (type))
1094 bound = build_symbolic_expr (type, sym_op1,
1095 neg_op1 ^ minus_p, bound);
1096 else
1097 bound = NULL_TREE;
1101 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1102 int bound according to CODE. CODE is the operation combining the
1103 bound (either a PLUS_EXPR or a MINUS_EXPR).
1105 TYPE is the type of the combine operation.
1107 WI is the wide int to store the result.
1109 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1110 if over/underflow occurred. */
1112 static void
1113 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1114 tree type, tree op0, tree op1)
1116 bool minus_p = (code == MINUS_EXPR);
1117 const signop sgn = TYPE_SIGN (type);
1118 const unsigned int prec = TYPE_PRECISION (type);
1120 /* Combine the bounds, if any. */
1121 if (op0 && op1)
1123 if (minus_p)
1124 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1125 else
1126 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1128 else if (op0)
1129 wi = wi::to_wide (op0);
1130 else if (op1)
1132 if (minus_p)
1133 wi = wi::neg (wi::to_wide (op1), &ovf);
1134 else
1135 wi = wi::to_wide (op1);
1137 else
1138 wi = wi::shwi (0, prec);
1141 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1142 put the result in VR.
1144 TYPE is the type of the range.
1146 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1147 occurred while originally calculating WMIN or WMAX. -1 indicates
1148 underflow. +1 indicates overflow. 0 indicates neither. */
1150 static void
1151 set_value_range_with_overflow (value_range &vr,
1152 tree type,
1153 const wide_int &wmin, const wide_int &wmax,
1154 wi::overflow_type min_ovf,
1155 wi::overflow_type max_ovf)
1157 const signop sgn = TYPE_SIGN (type);
1158 const unsigned int prec = TYPE_PRECISION (type);
1159 vr.type = VR_RANGE;
1160 vr.equiv = NULL;
1161 if (TYPE_OVERFLOW_WRAPS (type))
1163 /* If overflow wraps, truncate the values and adjust the
1164 range kind and bounds appropriately. */
1165 wide_int tmin = wide_int::from (wmin, prec, sgn);
1166 wide_int tmax = wide_int::from (wmax, prec, sgn);
1167 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1169 /* No overflow or both overflow or underflow. The
1170 range kind stays VR_RANGE. */
1171 vr.min = wide_int_to_tree (type, tmin);
1172 vr.max = wide_int_to_tree (type, tmax);
1174 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1175 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1177 /* Min underflow or max overflow. The range kind
1178 changes to VR_ANTI_RANGE. */
1179 bool covers = false;
1180 wide_int tem = tmin;
1181 vr.type = VR_ANTI_RANGE;
1182 tmin = tmax + 1;
1183 if (wi::cmp (tmin, tmax, sgn) < 0)
1184 covers = true;
1185 tmax = tem - 1;
1186 if (wi::cmp (tmax, tem, sgn) > 0)
1187 covers = true;
1188 /* If the anti-range would cover nothing, drop to varying.
1189 Likewise if the anti-range bounds are outside of the
1190 types values. */
1191 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1193 set_value_range_to_varying (&vr);
1194 return;
1196 vr.min = wide_int_to_tree (type, tmin);
1197 vr.max = wide_int_to_tree (type, tmax);
1199 else
1201 /* Other underflow and/or overflow, drop to VR_VARYING. */
1202 set_value_range_to_varying (&vr);
1203 return;
1206 else
1208 /* If overflow does not wrap, saturate to the types min/max
1209 value. */
1210 wide_int type_min = wi::min_value (prec, sgn);
1211 wide_int type_max = wi::max_value (prec, sgn);
1212 if (min_ovf == wi::OVF_UNDERFLOW)
1213 vr.min = wide_int_to_tree (type, type_min);
1214 else if (min_ovf == wi::OVF_OVERFLOW)
1215 vr.min = wide_int_to_tree (type, type_max);
1216 else
1217 vr.min = wide_int_to_tree (type, wmin);
1219 if (max_ovf == wi::OVF_UNDERFLOW)
1220 vr.max = wide_int_to_tree (type, type_min);
1221 else if (max_ovf == wi::OVF_OVERFLOW)
1222 vr.max = wide_int_to_tree (type, type_max);
1223 else
1224 vr.max = wide_int_to_tree (type, wmax);
1228 /* Extract range information from a binary operation CODE based on
1229 the ranges of each of its operands *VR0 and *VR1 with resulting
1230 type EXPR_TYPE. The resulting range is stored in *VR. */
1232 void
1233 extract_range_from_binary_expr_1 (value_range *vr,
1234 enum tree_code code, tree expr_type,
1235 const value_range *vr0_,
1236 const value_range *vr1_)
1238 signop sign = TYPE_SIGN (expr_type);
1239 unsigned int prec = TYPE_PRECISION (expr_type);
1240 value_range vr0 = *vr0_, vr1 = *vr1_;
1241 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1242 enum value_range_type type;
1243 tree min = NULL_TREE, max = NULL_TREE;
1244 int cmp;
1246 if (!INTEGRAL_TYPE_P (expr_type)
1247 && !POINTER_TYPE_P (expr_type))
1249 set_value_range_to_varying (vr);
1250 return;
1253 /* Not all binary expressions can be applied to ranges in a
1254 meaningful way. Handle only arithmetic operations. */
1255 if (code != PLUS_EXPR
1256 && code != MINUS_EXPR
1257 && code != POINTER_PLUS_EXPR
1258 && code != MULT_EXPR
1259 && code != TRUNC_DIV_EXPR
1260 && code != FLOOR_DIV_EXPR
1261 && code != CEIL_DIV_EXPR
1262 && code != EXACT_DIV_EXPR
1263 && code != ROUND_DIV_EXPR
1264 && code != TRUNC_MOD_EXPR
1265 && code != RSHIFT_EXPR
1266 && code != LSHIFT_EXPR
1267 && code != MIN_EXPR
1268 && code != MAX_EXPR
1269 && code != BIT_AND_EXPR
1270 && code != BIT_IOR_EXPR
1271 && code != BIT_XOR_EXPR)
1273 set_value_range_to_varying (vr);
1274 return;
1277 /* If both ranges are UNDEFINED, so is the result. */
1278 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1280 set_value_range_to_undefined (vr);
1281 return;
1283 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1284 code. At some point we may want to special-case operations that
1285 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1286 operand. */
1287 else if (vr0.type == VR_UNDEFINED)
1288 set_value_range_to_varying (&vr0);
1289 else if (vr1.type == VR_UNDEFINED)
1290 set_value_range_to_varying (&vr1);
1292 /* We get imprecise results from ranges_from_anti_range when
1293 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1294 range, but then we also need to hack up vrp_meet. It's just
1295 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1296 if (code == EXACT_DIV_EXPR
1297 && vr0.type == VR_ANTI_RANGE
1298 && vr0.min == vr0.max
1299 && integer_zerop (vr0.min))
1301 set_value_range_to_nonnull (vr, expr_type);
1302 return;
1305 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1306 and express ~[] op X as ([]' op X) U ([]'' op X). */
1307 if (vr0.type == VR_ANTI_RANGE
1308 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1310 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1311 if (vrtem1.type != VR_UNDEFINED)
1313 value_range vrres = VR_INITIALIZER;
1314 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1315 &vrtem1, vr1_);
1316 vrp_meet (vr, &vrres);
1318 return;
1320 /* Likewise for X op ~[]. */
1321 if (vr1.type == VR_ANTI_RANGE
1322 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1324 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1325 if (vrtem1.type != VR_UNDEFINED)
1327 value_range vrres = VR_INITIALIZER;
1328 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1329 vr0_, &vrtem1);
1330 vrp_meet (vr, &vrres);
1332 return;
1335 /* The type of the resulting value range defaults to VR0.TYPE. */
1336 type = vr0.type;
1338 /* Refuse to operate on VARYING ranges, ranges of different kinds
1339 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1340 because we may be able to derive a useful range even if one of
1341 the operands is VR_VARYING or symbolic range. Similarly for
1342 divisions, MIN/MAX and PLUS/MINUS.
1344 TODO, we may be able to derive anti-ranges in some cases. */
1345 if (code != BIT_AND_EXPR
1346 && code != BIT_IOR_EXPR
1347 && code != TRUNC_DIV_EXPR
1348 && code != FLOOR_DIV_EXPR
1349 && code != CEIL_DIV_EXPR
1350 && code != EXACT_DIV_EXPR
1351 && code != ROUND_DIV_EXPR
1352 && code != TRUNC_MOD_EXPR
1353 && code != MIN_EXPR
1354 && code != MAX_EXPR
1355 && code != PLUS_EXPR
1356 && code != MINUS_EXPR
1357 && code != RSHIFT_EXPR
1358 && code != POINTER_PLUS_EXPR
1359 && (vr0.type == VR_VARYING
1360 || vr1.type == VR_VARYING
1361 || vr0.type != vr1.type
1362 || symbolic_range_p (&vr0)
1363 || symbolic_range_p (&vr1)))
1365 set_value_range_to_varying (vr);
1366 return;
1369 /* Now evaluate the expression to determine the new range. */
1370 if (POINTER_TYPE_P (expr_type))
1372 if (code == MIN_EXPR || code == MAX_EXPR)
1374 /* For MIN/MAX expressions with pointers, we only care about
1375 nullness, if both are non null, then the result is nonnull.
1376 If both are null, then the result is null. Otherwise they
1377 are varying. */
1378 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1379 set_value_range_to_nonnull (vr, expr_type);
1380 else if (range_is_null (&vr0) && range_is_null (&vr1))
1381 set_value_range_to_null (vr, expr_type);
1382 else
1383 set_value_range_to_varying (vr);
1385 else if (code == POINTER_PLUS_EXPR)
1387 /* For pointer types, we are really only interested in asserting
1388 whether the expression evaluates to non-NULL. */
1389 if (!range_includes_zero_p (&vr0)
1390 || !range_includes_zero_p (&vr1))
1391 set_value_range_to_nonnull (vr, expr_type);
1392 else if (range_is_null (&vr0) && range_is_null (&vr1))
1393 set_value_range_to_null (vr, expr_type);
1394 else
1395 set_value_range_to_varying (vr);
1397 else if (code == BIT_AND_EXPR)
1399 /* For pointer types, we are really only interested in asserting
1400 whether the expression evaluates to non-NULL. */
1401 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1402 set_value_range_to_nonnull (vr, expr_type);
1403 else if (range_is_null (&vr0) || range_is_null (&vr1))
1404 set_value_range_to_null (vr, expr_type);
1405 else
1406 set_value_range_to_varying (vr);
1408 else
1409 set_value_range_to_varying (vr);
1411 return;
1414 /* For integer ranges, apply the operation to each end of the
1415 range and see what we end up with. */
1416 if (code == PLUS_EXPR || code == MINUS_EXPR)
1418 /* This will normalize things such that calculating
1419 [0,0] - VR_VARYING is not dropped to varying, but is
1420 calculated as [MIN+1, MAX]. */
1421 if (vr0.type == VR_VARYING)
1423 vr0.type = VR_RANGE;
1424 vr0.min = vrp_val_min (expr_type);
1425 vr0.max = vrp_val_max (expr_type);
1427 if (vr1.type == VR_VARYING)
1429 vr1.type = VR_RANGE;
1430 vr1.min = vrp_val_min (expr_type);
1431 vr1.max = vrp_val_max (expr_type);
1434 const bool minus_p = (code == MINUS_EXPR);
1435 tree min_op0 = vr0.min;
1436 tree min_op1 = minus_p ? vr1.max : vr1.min;
1437 tree max_op0 = vr0.max;
1438 tree max_op1 = minus_p ? vr1.min : vr1.max;
1439 tree sym_min_op0 = NULL_TREE;
1440 tree sym_min_op1 = NULL_TREE;
1441 tree sym_max_op0 = NULL_TREE;
1442 tree sym_max_op1 = NULL_TREE;
1443 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1445 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1447 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1448 single-symbolic ranges, try to compute the precise resulting range,
1449 but only if we know that this resulting range will also be constant
1450 or single-symbolic. */
1451 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1452 && (TREE_CODE (min_op0) == INTEGER_CST
1453 || (sym_min_op0
1454 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1455 && (TREE_CODE (min_op1) == INTEGER_CST
1456 || (sym_min_op1
1457 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1458 && (!(sym_min_op0 && sym_min_op1)
1459 || (sym_min_op0 == sym_min_op1
1460 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1461 && (TREE_CODE (max_op0) == INTEGER_CST
1462 || (sym_max_op0
1463 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1464 && (TREE_CODE (max_op1) == INTEGER_CST
1465 || (sym_max_op1
1466 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1467 && (!(sym_max_op0 && sym_max_op1)
1468 || (sym_max_op0 == sym_max_op1
1469 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1471 wide_int wmin, wmax;
1472 wi::overflow_type min_ovf = wi::OVF_NONE;
1473 wi::overflow_type max_ovf = wi::OVF_NONE;
1475 /* Build the bounds. */
1476 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1477 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1479 /* If we have overflow for the constant part and the resulting
1480 range will be symbolic, drop to VR_VARYING. */
1481 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1482 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1484 set_value_range_to_varying (vr);
1485 return;
1488 /* Adjust the range for possible overflow. */
1489 set_value_range_with_overflow (*vr, expr_type,
1490 wmin, wmax, min_ovf, max_ovf);
1491 if (vr->type == VR_VARYING)
1492 return;
1494 /* Build the symbolic bounds if needed. */
1495 adjust_symbolic_bound (vr->min, code, expr_type,
1496 sym_min_op0, sym_min_op1,
1497 neg_min_op0, neg_min_op1);
1498 adjust_symbolic_bound (vr->max, code, expr_type,
1499 sym_max_op0, sym_max_op1,
1500 neg_max_op0, neg_max_op1);
1501 /* ?? It would probably be cleaner to eliminate min/max/type
1502 entirely and hold these values in VR directly. */
1503 min = vr->min;
1504 max = vr->max;
1505 type = vr->type;
1507 else
1509 /* For other cases, for example if we have a PLUS_EXPR with two
1510 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1511 to compute a precise range for such a case.
1512 ??? General even mixed range kind operations can be expressed
1513 by for example transforming ~[3, 5] + [1, 2] to range-only
1514 operations and a union primitive:
1515 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1516 [-INF+1, 4] U [6, +INF(OVF)]
1517 though usually the union is not exactly representable with
1518 a single range or anti-range as the above is
1519 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1520 but one could use a scheme similar to equivalences for this. */
1521 set_value_range_to_varying (vr);
1522 return;
1525 else if (code == MIN_EXPR
1526 || code == MAX_EXPR)
1528 wide_int wmin, wmax;
1529 wide_int vr0_min, vr0_max;
1530 wide_int vr1_min, vr1_max;
1531 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1532 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1533 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1534 vr0_min, vr0_max, vr1_min, vr1_max))
1535 set_value_range (vr, VR_RANGE,
1536 wide_int_to_tree (expr_type, wmin),
1537 wide_int_to_tree (expr_type, wmax), NULL);
1538 else
1539 set_value_range_to_varying (vr);
1540 return;
1542 else if (code == MULT_EXPR)
1544 if (!range_int_cst_p (&vr0)
1545 || !range_int_cst_p (&vr1))
1547 set_value_range_to_varying (vr);
1548 return;
1550 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1551 return;
1553 else if (code == RSHIFT_EXPR
1554 || code == LSHIFT_EXPR)
1556 if (range_int_cst_p (&vr1)
1557 && !wide_int_range_shift_undefined_p (prec,
1558 wi::to_wide (vr1.min),
1559 wi::to_wide (vr1.max)))
1561 if (code == RSHIFT_EXPR)
1563 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1564 useful ranges just from the shift count. E.g.
1565 x >> 63 for signed 64-bit x is always [-1, 0]. */
1566 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1568 vr0.type = type = VR_RANGE;
1569 vr0.min = vrp_val_min (expr_type);
1570 vr0.max = vrp_val_max (expr_type);
1572 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1573 return;
1575 else if (code == LSHIFT_EXPR
1576 && range_int_cst_p (&vr0))
1578 wide_int res_lb, res_ub;
1579 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1580 wi::to_wide (vr0.min),
1581 wi::to_wide (vr0.max),
1582 wi::to_wide (vr1.min),
1583 wi::to_wide (vr1.max),
1584 TYPE_OVERFLOW_UNDEFINED (expr_type),
1585 TYPE_OVERFLOW_WRAPS (expr_type)))
1587 min = wide_int_to_tree (expr_type, res_lb);
1588 max = wide_int_to_tree (expr_type, res_ub);
1589 set_and_canonicalize_value_range (vr, VR_RANGE,
1590 min, max, NULL);
1591 return;
1595 set_value_range_to_varying (vr);
1596 return;
1598 else if (code == TRUNC_DIV_EXPR
1599 || code == FLOOR_DIV_EXPR
1600 || code == CEIL_DIV_EXPR
1601 || code == EXACT_DIV_EXPR
1602 || code == ROUND_DIV_EXPR)
1604 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1605 wide_int wmin, wmax, extra_min, extra_max;
1606 bool extra_range_p;
1608 /* Special case explicit division by zero as undefined. */
1609 if (range_is_null (&vr1))
1611 set_value_range_to_undefined (vr);
1612 return;
1615 /* First, normalize ranges into constants we can handle. Note
1616 that VR_ANTI_RANGE's of constants were already normalized
1617 before arriving here.
1619 NOTE: As a future improvement, we may be able to do better
1620 with mixed symbolic (anti-)ranges like [0, A]. See note in
1621 ranges_from_anti_range. */
1622 extract_range_into_wide_ints (&vr0, sign, prec,
1623 dividend_min, dividend_max);
1624 extract_range_into_wide_ints (&vr1, sign, prec,
1625 divisor_min, divisor_max);
1626 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1627 dividend_min, dividend_max,
1628 divisor_min, divisor_max,
1629 TYPE_OVERFLOW_UNDEFINED (expr_type),
1630 TYPE_OVERFLOW_WRAPS (expr_type),
1631 extra_range_p, extra_min, extra_max))
1633 set_value_range_to_varying (vr);
1634 return;
1636 set_value_range (vr, VR_RANGE,
1637 wide_int_to_tree (expr_type, wmin),
1638 wide_int_to_tree (expr_type, wmax), NULL);
1639 if (extra_range_p)
1641 value_range extra_range = VR_INITIALIZER;
1642 set_value_range (&extra_range, VR_RANGE,
1643 wide_int_to_tree (expr_type, extra_min),
1644 wide_int_to_tree (expr_type, extra_max), NULL);
1645 vrp_meet (vr, &extra_range);
1647 return;
1649 else if (code == TRUNC_MOD_EXPR)
1651 if (range_is_null (&vr1))
1653 set_value_range_to_undefined (vr);
1654 return;
1656 wide_int wmin, wmax, tmp;
1657 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1658 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1659 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1660 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1661 vr0_min, vr0_max, vr1_min, vr1_max);
1662 min = wide_int_to_tree (expr_type, wmin);
1663 max = wide_int_to_tree (expr_type, wmax);
1664 set_value_range (vr, VR_RANGE, min, max, NULL);
1665 return;
1667 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1669 wide_int may_be_nonzero0, may_be_nonzero1;
1670 wide_int must_be_nonzero0, must_be_nonzero1;
1671 wide_int wmin, wmax;
1672 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1673 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1674 &may_be_nonzero0, &must_be_nonzero0);
1675 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1676 &may_be_nonzero1, &must_be_nonzero1);
1677 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1678 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1679 if (code == BIT_AND_EXPR)
1681 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1682 vr0_min, vr0_max,
1683 vr1_min, vr1_max,
1684 must_be_nonzero0,
1685 may_be_nonzero0,
1686 must_be_nonzero1,
1687 may_be_nonzero1))
1689 min = wide_int_to_tree (expr_type, wmin);
1690 max = wide_int_to_tree (expr_type, wmax);
1691 set_value_range (vr, VR_RANGE, min, max, NULL);
1693 else
1694 set_value_range_to_varying (vr);
1695 return;
1697 else if (code == BIT_IOR_EXPR)
1699 if (wide_int_range_bit_ior (wmin, wmax, sign,
1700 vr0_min, vr0_max,
1701 vr1_min, vr1_max,
1702 must_be_nonzero0,
1703 may_be_nonzero0,
1704 must_be_nonzero1,
1705 may_be_nonzero1))
1707 min = wide_int_to_tree (expr_type, wmin);
1708 max = wide_int_to_tree (expr_type, wmax);
1709 set_value_range (vr, VR_RANGE, min, max, NULL);
1711 else
1712 set_value_range_to_varying (vr);
1713 return;
1715 else if (code == BIT_XOR_EXPR)
1717 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
1718 must_be_nonzero0,
1719 may_be_nonzero0,
1720 must_be_nonzero1,
1721 may_be_nonzero1))
1723 min = wide_int_to_tree (expr_type, wmin);
1724 max = wide_int_to_tree (expr_type, wmax);
1725 set_value_range (vr, VR_RANGE, min, max, NULL);
1727 else
1728 set_value_range_to_varying (vr);
1729 return;
1732 else
1733 gcc_unreachable ();
1735 /* If either MIN or MAX overflowed, then set the resulting range to
1736 VARYING. */
1737 if (min == NULL_TREE
1738 || TREE_OVERFLOW_P (min)
1739 || max == NULL_TREE
1740 || TREE_OVERFLOW_P (max))
1742 set_value_range_to_varying (vr);
1743 return;
1746 /* We punt for [-INF, +INF].
1747 We learn nothing when we have INF on both sides.
1748 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1749 if (vrp_val_is_min (min) && vrp_val_is_max (max))
1751 set_value_range_to_varying (vr);
1752 return;
1755 cmp = compare_values (min, max);
1756 if (cmp == -2 || cmp == 1)
1758 /* If the new range has its limits swapped around (MIN > MAX),
1759 then the operation caused one of them to wrap around, mark
1760 the new range VARYING. */
1761 set_value_range_to_varying (vr);
1763 else
1764 set_value_range (vr, type, min, max, NULL);
1767 /* Extract range information from a unary operation CODE based on
1768 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
1769 The resulting range is stored in *VR. */
1771 void
1772 extract_range_from_unary_expr (value_range *vr,
1773 enum tree_code code, tree type,
1774 const value_range *vr0_, tree op0_type)
1776 signop sign = TYPE_SIGN (type);
1777 unsigned int prec = TYPE_PRECISION (type);
1778 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1780 /* VRP only operates on integral and pointer types. */
1781 if (!(INTEGRAL_TYPE_P (op0_type)
1782 || POINTER_TYPE_P (op0_type))
1783 || !(INTEGRAL_TYPE_P (type)
1784 || POINTER_TYPE_P (type)))
1786 set_value_range_to_varying (vr);
1787 return;
1790 /* If VR0 is UNDEFINED, so is the result. */
1791 if (vr0.type == VR_UNDEFINED)
1793 set_value_range_to_undefined (vr);
1794 return;
1797 /* Handle operations that we express in terms of others. */
1798 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
1800 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
1801 copy_value_range (vr, &vr0);
1802 return;
1804 else if (code == NEGATE_EXPR)
1806 /* -X is simply 0 - X, so re-use existing code that also handles
1807 anti-ranges fine. */
1808 value_range zero = VR_INITIALIZER;
1809 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
1810 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
1811 return;
1813 else if (code == BIT_NOT_EXPR)
1815 /* ~X is simply -1 - X, so re-use existing code that also handles
1816 anti-ranges fine. */
1817 value_range minusone = VR_INITIALIZER;
1818 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
1819 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
1820 type, &minusone, &vr0);
1821 return;
1824 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1825 and express op ~[] as (op []') U (op []''). */
1826 if (vr0.type == VR_ANTI_RANGE
1827 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1829 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
1830 if (vrtem1.type != VR_UNDEFINED)
1832 value_range vrres = VR_INITIALIZER;
1833 extract_range_from_unary_expr (&vrres, code, type,
1834 &vrtem1, op0_type);
1835 vrp_meet (vr, &vrres);
1837 return;
1840 if (CONVERT_EXPR_CODE_P (code))
1842 tree inner_type = op0_type;
1843 tree outer_type = type;
1845 /* If the expression evaluates to a pointer, we are only interested in
1846 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
1847 if (POINTER_TYPE_P (type))
1849 if (!range_includes_zero_p (&vr0))
1850 set_value_range_to_nonnull (vr, type);
1851 else if (range_is_null (&vr0))
1852 set_value_range_to_null (vr, type);
1853 else
1854 set_value_range_to_varying (vr);
1855 return;
1858 /* We normalize everything to a VR_RANGE, but for constant
1859 anti-ranges we must handle them by leaving the final result
1860 as an anti range. This allows us to convert things like
1861 ~[0,5] seamlessly. */
1862 value_range_type vr_type = VR_RANGE;
1863 if (vr0.type == VR_ANTI_RANGE
1864 && TREE_CODE (vr0.min) == INTEGER_CST
1865 && TREE_CODE (vr0.max) == INTEGER_CST)
1866 vr_type = VR_ANTI_RANGE;
1868 /* NOTES: Previously we were returning VARYING for all symbolics, but
1869 we can do better by treating them as [-MIN, +MAX]. For
1870 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
1871 we can return: ~[0x8000000, 0xffffffff7fffffff].
1873 We were also failing to convert ~[0,0] from char* to unsigned,
1874 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
1875 wide_int vr0_min, vr0_max, wmin, wmax;
1876 signop inner_sign = TYPE_SIGN (inner_type);
1877 signop outer_sign = TYPE_SIGN (outer_type);
1878 unsigned inner_prec = TYPE_PRECISION (inner_type);
1879 unsigned outer_prec = TYPE_PRECISION (outer_type);
1880 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
1881 vr0_min, vr0_max);
1882 if (wide_int_range_convert (wmin, wmax,
1883 inner_sign, inner_prec,
1884 outer_sign, outer_prec,
1885 vr0_min, vr0_max))
1887 tree min = wide_int_to_tree (outer_type, wmin);
1888 tree max = wide_int_to_tree (outer_type, wmax);
1889 set_and_canonicalize_value_range (vr, vr_type, min, max, NULL);
1891 else
1892 set_value_range_to_varying (vr);
1893 return;
1895 else if (code == ABS_EXPR)
1897 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1899 set_value_range_to_varying (vr);
1900 return;
1902 wide_int wmin, wmax;
1903 wide_int vr0_min, vr0_max;
1904 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1905 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
1906 TYPE_OVERFLOW_UNDEFINED (type)))
1907 set_value_range (vr, VR_RANGE,
1908 wide_int_to_tree (type, wmin),
1909 wide_int_to_tree (type, wmax), NULL);
1910 else
1911 set_value_range_to_varying (vr);
1912 return;
1915 /* For unhandled operations fall back to varying. */
1916 set_value_range_to_varying (vr);
1917 return;
1920 /* Debugging dumps. */
1922 void dump_value_range (FILE *, const value_range *);
1923 void debug_value_range (const value_range *);
1924 void dump_all_value_ranges (FILE *);
1925 void dump_vr_equiv (FILE *, bitmap);
1926 void debug_vr_equiv (bitmap);
1929 /* Dump value range VR to FILE. */
1931 void
1932 dump_value_range (FILE *file, const value_range *vr)
1934 if (vr == NULL)
1935 fprintf (file, "[]");
1936 else if (vr->type == VR_UNDEFINED)
1937 fprintf (file, "UNDEFINED");
1938 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
1940 tree type = TREE_TYPE (vr->min);
1942 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
1944 if (INTEGRAL_TYPE_P (type)
1945 && !TYPE_UNSIGNED (type)
1946 && vrp_val_is_min (vr->min))
1947 fprintf (file, "-INF");
1948 else
1949 print_generic_expr (file, vr->min);
1951 fprintf (file, ", ");
1953 if (INTEGRAL_TYPE_P (type)
1954 && vrp_val_is_max (vr->max))
1955 fprintf (file, "+INF");
1956 else
1957 print_generic_expr (file, vr->max);
1959 fprintf (file, "]");
1961 if (vr->equiv)
1963 bitmap_iterator bi;
1964 unsigned i, c = 0;
1966 fprintf (file, " EQUIVALENCES: { ");
1968 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
1970 print_generic_expr (file, ssa_name (i));
1971 fprintf (file, " ");
1972 c++;
1975 fprintf (file, "} (%u elements)", c);
1978 else if (vr->type == VR_VARYING)
1979 fprintf (file, "VARYING");
1980 else
1981 fprintf (file, "INVALID RANGE");
1985 /* Dump value range VR to stderr. */
1987 DEBUG_FUNCTION void
1988 debug_value_range (const value_range *vr)
1990 dump_value_range (stderr, vr);
1991 fprintf (stderr, "\n");
1994 void
1995 value_range::dump () const
1997 debug_value_range (this);
2001 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2002 create a new SSA name N and return the assertion assignment
2003 'N = ASSERT_EXPR <V, V OP W>'. */
2005 static gimple *
2006 build_assert_expr_for (tree cond, tree v)
2008 tree a;
2009 gassign *assertion;
2011 gcc_assert (TREE_CODE (v) == SSA_NAME
2012 && COMPARISON_CLASS_P (cond));
2014 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2015 assertion = gimple_build_assign (NULL_TREE, a);
2017 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2018 operand of the ASSERT_EXPR. Create it so the new name and the old one
2019 are registered in the replacement table so that we can fix the SSA web
2020 after adding all the ASSERT_EXPRs. */
2021 tree new_def = create_new_def_for (v, assertion, NULL);
2022 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2023 given we have to be able to fully propagate those out to re-create
2024 valid SSA when removing the asserts. */
2025 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2026 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2028 return assertion;
2032 /* Return false if EXPR is a predicate expression involving floating
2033 point values. */
2035 static inline bool
2036 fp_predicate (gimple *stmt)
2038 GIMPLE_CHECK (stmt, GIMPLE_COND);
2040 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2043 /* If the range of values taken by OP can be inferred after STMT executes,
2044 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2045 describes the inferred range. Return true if a range could be
2046 inferred. */
2048 bool
2049 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2051 *val_p = NULL_TREE;
2052 *comp_code_p = ERROR_MARK;
2054 /* Do not attempt to infer anything in names that flow through
2055 abnormal edges. */
2056 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2057 return false;
2059 /* If STMT is the last statement of a basic block with no normal
2060 successors, there is no point inferring anything about any of its
2061 operands. We would not be able to find a proper insertion point
2062 for the assertion, anyway. */
2063 if (stmt_ends_bb_p (stmt))
2065 edge_iterator ei;
2066 edge e;
2068 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2069 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2070 break;
2071 if (e == NULL)
2072 return false;
2075 if (infer_nonnull_range (stmt, op))
2077 *val_p = build_int_cst (TREE_TYPE (op), 0);
2078 *comp_code_p = NE_EXPR;
2079 return true;
2082 return false;
2086 void dump_asserts_for (FILE *, tree);
2087 void debug_asserts_for (tree);
2088 void dump_all_asserts (FILE *);
2089 void debug_all_asserts (void);
2091 /* Dump all the registered assertions for NAME to FILE. */
2093 void
2094 dump_asserts_for (FILE *file, tree name)
2096 assert_locus *loc;
2098 fprintf (file, "Assertions to be inserted for ");
2099 print_generic_expr (file, name);
2100 fprintf (file, "\n");
2102 loc = asserts_for[SSA_NAME_VERSION (name)];
2103 while (loc)
2105 fprintf (file, "\t");
2106 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2107 fprintf (file, "\n\tBB #%d", loc->bb->index);
2108 if (loc->e)
2110 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2111 loc->e->dest->index);
2112 dump_edge_info (file, loc->e, dump_flags, 0);
2114 fprintf (file, "\n\tPREDICATE: ");
2115 print_generic_expr (file, loc->expr);
2116 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2117 print_generic_expr (file, loc->val);
2118 fprintf (file, "\n\n");
2119 loc = loc->next;
2122 fprintf (file, "\n");
2126 /* Dump all the registered assertions for NAME to stderr. */
2128 DEBUG_FUNCTION void
2129 debug_asserts_for (tree name)
2131 dump_asserts_for (stderr, name);
2135 /* Dump all the registered assertions for all the names to FILE. */
2137 void
2138 dump_all_asserts (FILE *file)
2140 unsigned i;
2141 bitmap_iterator bi;
2143 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2144 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2145 dump_asserts_for (file, ssa_name (i));
2146 fprintf (file, "\n");
2150 /* Dump all the registered assertions for all the names to stderr. */
2152 DEBUG_FUNCTION void
2153 debug_all_asserts (void)
2155 dump_all_asserts (stderr);
2158 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2160 static void
2161 add_assert_info (vec<assert_info> &asserts,
2162 tree name, tree expr, enum tree_code comp_code, tree val)
2164 assert_info info;
2165 info.comp_code = comp_code;
2166 info.name = name;
2167 if (TREE_OVERFLOW_P (val))
2168 val = drop_tree_overflow (val);
2169 info.val = val;
2170 info.expr = expr;
2171 asserts.safe_push (info);
2174 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2175 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2176 E->DEST, then register this location as a possible insertion point
2177 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2179 BB, E and SI provide the exact insertion point for the new
2180 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2181 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2182 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2183 must not be NULL. */
2185 static void
2186 register_new_assert_for (tree name, tree expr,
2187 enum tree_code comp_code,
2188 tree val,
2189 basic_block bb,
2190 edge e,
2191 gimple_stmt_iterator si)
2193 assert_locus *n, *loc, *last_loc;
2194 basic_block dest_bb;
2196 gcc_checking_assert (bb == NULL || e == NULL);
2198 if (e == NULL)
2199 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2200 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2202 /* Never build an assert comparing against an integer constant with
2203 TREE_OVERFLOW set. This confuses our undefined overflow warning
2204 machinery. */
2205 if (TREE_OVERFLOW_P (val))
2206 val = drop_tree_overflow (val);
2208 /* The new assertion A will be inserted at BB or E. We need to
2209 determine if the new location is dominated by a previously
2210 registered location for A. If we are doing an edge insertion,
2211 assume that A will be inserted at E->DEST. Note that this is not
2212 necessarily true.
2214 If E is a critical edge, it will be split. But even if E is
2215 split, the new block will dominate the same set of blocks that
2216 E->DEST dominates.
2218 The reverse, however, is not true, blocks dominated by E->DEST
2219 will not be dominated by the new block created to split E. So,
2220 if the insertion location is on a critical edge, we will not use
2221 the new location to move another assertion previously registered
2222 at a block dominated by E->DEST. */
2223 dest_bb = (bb) ? bb : e->dest;
2225 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2226 VAL at a block dominating DEST_BB, then we don't need to insert a new
2227 one. Similarly, if the same assertion already exists at a block
2228 dominated by DEST_BB and the new location is not on a critical
2229 edge, then update the existing location for the assertion (i.e.,
2230 move the assertion up in the dominance tree).
2232 Note, this is implemented as a simple linked list because there
2233 should not be more than a handful of assertions registered per
2234 name. If this becomes a performance problem, a table hashed by
2235 COMP_CODE and VAL could be implemented. */
2236 loc = asserts_for[SSA_NAME_VERSION (name)];
2237 last_loc = loc;
2238 while (loc)
2240 if (loc->comp_code == comp_code
2241 && (loc->val == val
2242 || operand_equal_p (loc->val, val, 0))
2243 && (loc->expr == expr
2244 || operand_equal_p (loc->expr, expr, 0)))
2246 /* If E is not a critical edge and DEST_BB
2247 dominates the existing location for the assertion, move
2248 the assertion up in the dominance tree by updating its
2249 location information. */
2250 if ((e == NULL || !EDGE_CRITICAL_P (e))
2251 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2253 loc->bb = dest_bb;
2254 loc->e = e;
2255 loc->si = si;
2256 return;
2260 /* Update the last node of the list and move to the next one. */
2261 last_loc = loc;
2262 loc = loc->next;
2265 /* If we didn't find an assertion already registered for
2266 NAME COMP_CODE VAL, add a new one at the end of the list of
2267 assertions associated with NAME. */
2268 n = XNEW (struct assert_locus);
2269 n->bb = dest_bb;
2270 n->e = e;
2271 n->si = si;
2272 n->comp_code = comp_code;
2273 n->val = val;
2274 n->expr = expr;
2275 n->next = NULL;
2277 if (last_loc)
2278 last_loc->next = n;
2279 else
2280 asserts_for[SSA_NAME_VERSION (name)] = n;
2282 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2285 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2286 Extract a suitable test code and value and store them into *CODE_P and
2287 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2289 If no extraction was possible, return FALSE, otherwise return TRUE.
2291 If INVERT is true, then we invert the result stored into *CODE_P. */
2293 static bool
2294 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2295 tree cond_op0, tree cond_op1,
2296 bool invert, enum tree_code *code_p,
2297 tree *val_p)
2299 enum tree_code comp_code;
2300 tree val;
2302 /* Otherwise, we have a comparison of the form NAME COMP VAL
2303 or VAL COMP NAME. */
2304 if (name == cond_op1)
2306 /* If the predicate is of the form VAL COMP NAME, flip
2307 COMP around because we need to register NAME as the
2308 first operand in the predicate. */
2309 comp_code = swap_tree_comparison (cond_code);
2310 val = cond_op0;
2312 else if (name == cond_op0)
2314 /* The comparison is of the form NAME COMP VAL, so the
2315 comparison code remains unchanged. */
2316 comp_code = cond_code;
2317 val = cond_op1;
2319 else
2320 gcc_unreachable ();
2322 /* Invert the comparison code as necessary. */
2323 if (invert)
2324 comp_code = invert_tree_comparison (comp_code, 0);
2326 /* VRP only handles integral and pointer types. */
2327 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2328 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2329 return false;
2331 /* Do not register always-false predicates.
2332 FIXME: this works around a limitation in fold() when dealing with
2333 enumerations. Given 'enum { N1, N2 } x;', fold will not
2334 fold 'if (x > N2)' to 'if (0)'. */
2335 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2336 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2338 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2339 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2341 if (comp_code == GT_EXPR
2342 && (!max
2343 || compare_values (val, max) == 0))
2344 return false;
2346 if (comp_code == LT_EXPR
2347 && (!min
2348 || compare_values (val, min) == 0))
2349 return false;
2351 *code_p = comp_code;
2352 *val_p = val;
2353 return true;
2356 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2357 (otherwise return VAL). VAL and MASK must be zero-extended for
2358 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2359 (to transform signed values into unsigned) and at the end xor
2360 SGNBIT back. */
2362 static wide_int
2363 masked_increment (const wide_int &val_in, const wide_int &mask,
2364 const wide_int &sgnbit, unsigned int prec)
2366 wide_int bit = wi::one (prec), res;
2367 unsigned int i;
2369 wide_int val = val_in ^ sgnbit;
2370 for (i = 0; i < prec; i++, bit += bit)
2372 res = mask;
2373 if ((res & bit) == 0)
2374 continue;
2375 res = bit - 1;
2376 res = wi::bit_and_not (val + bit, res);
2377 res &= mask;
2378 if (wi::gtu_p (res, val))
2379 return res ^ sgnbit;
2381 return val ^ sgnbit;
2384 /* Helper for overflow_comparison_p
2386 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2387 OP1's defining statement to see if it ultimately has the form
2388 OP0 CODE (OP0 PLUS INTEGER_CST)
2390 If so, return TRUE indicating this is an overflow test and store into
2391 *NEW_CST an updated constant that can be used in a narrowed range test.
2393 REVERSED indicates if the comparison was originally:
2395 OP1 CODE' OP0.
2397 This affects how we build the updated constant. */
2399 static bool
2400 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2401 bool follow_assert_exprs, bool reversed, tree *new_cst)
2403 /* See if this is a relational operation between two SSA_NAMES with
2404 unsigned, overflow wrapping values. If so, check it more deeply. */
2405 if ((code == LT_EXPR || code == LE_EXPR
2406 || code == GE_EXPR || code == GT_EXPR)
2407 && TREE_CODE (op0) == SSA_NAME
2408 && TREE_CODE (op1) == SSA_NAME
2409 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2410 && TYPE_UNSIGNED (TREE_TYPE (op0))
2411 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2413 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2415 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2416 if (follow_assert_exprs)
2418 while (gimple_assign_single_p (op1_def)
2419 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2421 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2422 if (TREE_CODE (op1) != SSA_NAME)
2423 break;
2424 op1_def = SSA_NAME_DEF_STMT (op1);
2428 /* Now look at the defining statement of OP1 to see if it adds
2429 or subtracts a nonzero constant from another operand. */
2430 if (op1_def
2431 && is_gimple_assign (op1_def)
2432 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2433 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2434 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2436 tree target = gimple_assign_rhs1 (op1_def);
2438 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2439 for one where TARGET appears on the RHS. */
2440 if (follow_assert_exprs)
2442 /* Now see if that "other operand" is op0, following the chain
2443 of ASSERT_EXPRs if necessary. */
2444 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2445 while (op0 != target
2446 && gimple_assign_single_p (op0_def)
2447 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2449 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2450 if (TREE_CODE (op0) != SSA_NAME)
2451 break;
2452 op0_def = SSA_NAME_DEF_STMT (op0);
2456 /* If we did not find our target SSA_NAME, then this is not
2457 an overflow test. */
2458 if (op0 != target)
2459 return false;
2461 tree type = TREE_TYPE (op0);
2462 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2463 tree inc = gimple_assign_rhs2 (op1_def);
2464 if (reversed)
2465 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2466 else
2467 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2468 return true;
2471 return false;
2474 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2475 OP1's defining statement to see if it ultimately has the form
2476 OP0 CODE (OP0 PLUS INTEGER_CST)
2478 If so, return TRUE indicating this is an overflow test and store into
2479 *NEW_CST an updated constant that can be used in a narrowed range test.
2481 These statements are left as-is in the IL to facilitate discovery of
2482 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2483 the alternate range representation is often useful within VRP. */
2485 bool
2486 overflow_comparison_p (tree_code code, tree name, tree val,
2487 bool use_equiv_p, tree *new_cst)
2489 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2490 return true;
2491 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2492 use_equiv_p, true, new_cst);
2496 /* Try to register an edge assertion for SSA name NAME on edge E for
2497 the condition COND contributing to the conditional jump pointed to by BSI.
2498 Invert the condition COND if INVERT is true. */
2500 static void
2501 register_edge_assert_for_2 (tree name, edge e,
2502 enum tree_code cond_code,
2503 tree cond_op0, tree cond_op1, bool invert,
2504 vec<assert_info> &asserts)
2506 tree val;
2507 enum tree_code comp_code;
2509 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2510 cond_op0,
2511 cond_op1,
2512 invert, &comp_code, &val))
2513 return;
2515 /* Queue the assert. */
2516 tree x;
2517 if (overflow_comparison_p (comp_code, name, val, false, &x))
2519 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2520 ? GT_EXPR : LE_EXPR);
2521 add_assert_info (asserts, name, name, new_code, x);
2523 add_assert_info (asserts, name, name, comp_code, val);
2525 /* In the case of NAME <= CST and NAME being defined as
2526 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2527 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2528 This catches range and anti-range tests. */
2529 if ((comp_code == LE_EXPR
2530 || comp_code == GT_EXPR)
2531 && TREE_CODE (val) == INTEGER_CST
2532 && TYPE_UNSIGNED (TREE_TYPE (val)))
2534 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2535 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2537 /* Extract CST2 from the (optional) addition. */
2538 if (is_gimple_assign (def_stmt)
2539 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2541 name2 = gimple_assign_rhs1 (def_stmt);
2542 cst2 = gimple_assign_rhs2 (def_stmt);
2543 if (TREE_CODE (name2) == SSA_NAME
2544 && TREE_CODE (cst2) == INTEGER_CST)
2545 def_stmt = SSA_NAME_DEF_STMT (name2);
2548 /* Extract NAME2 from the (optional) sign-changing cast. */
2549 if (gimple_assign_cast_p (def_stmt))
2551 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2552 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2553 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2554 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2555 name3 = gimple_assign_rhs1 (def_stmt);
2558 /* If name3 is used later, create an ASSERT_EXPR for it. */
2559 if (name3 != NULL_TREE
2560 && TREE_CODE (name3) == SSA_NAME
2561 && (cst2 == NULL_TREE
2562 || TREE_CODE (cst2) == INTEGER_CST)
2563 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2565 tree tmp;
2567 /* Build an expression for the range test. */
2568 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2569 if (cst2 != NULL_TREE)
2570 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2572 if (dump_file)
2574 fprintf (dump_file, "Adding assert for ");
2575 print_generic_expr (dump_file, name3);
2576 fprintf (dump_file, " from ");
2577 print_generic_expr (dump_file, tmp);
2578 fprintf (dump_file, "\n");
2581 add_assert_info (asserts, name3, tmp, comp_code, val);
2584 /* If name2 is used later, create an ASSERT_EXPR for it. */
2585 if (name2 != NULL_TREE
2586 && TREE_CODE (name2) == SSA_NAME
2587 && TREE_CODE (cst2) == INTEGER_CST
2588 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2590 tree tmp;
2592 /* Build an expression for the range test. */
2593 tmp = name2;
2594 if (TREE_TYPE (name) != TREE_TYPE (name2))
2595 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2596 if (cst2 != NULL_TREE)
2597 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2599 if (dump_file)
2601 fprintf (dump_file, "Adding assert for ");
2602 print_generic_expr (dump_file, name2);
2603 fprintf (dump_file, " from ");
2604 print_generic_expr (dump_file, tmp);
2605 fprintf (dump_file, "\n");
2608 add_assert_info (asserts, name2, tmp, comp_code, val);
2612 /* In the case of post-in/decrement tests like if (i++) ... and uses
2613 of the in/decremented value on the edge the extra name we want to
2614 assert for is not on the def chain of the name compared. Instead
2615 it is in the set of use stmts.
2616 Similar cases happen for conversions that were simplified through
2617 fold_{sign_changed,widened}_comparison. */
2618 if ((comp_code == NE_EXPR
2619 || comp_code == EQ_EXPR)
2620 && TREE_CODE (val) == INTEGER_CST)
2622 imm_use_iterator ui;
2623 gimple *use_stmt;
2624 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2626 if (!is_gimple_assign (use_stmt))
2627 continue;
2629 /* Cut off to use-stmts that are dominating the predecessor. */
2630 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2631 continue;
2633 tree name2 = gimple_assign_lhs (use_stmt);
2634 if (TREE_CODE (name2) != SSA_NAME)
2635 continue;
2637 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2638 tree cst;
2639 if (code == PLUS_EXPR
2640 || code == MINUS_EXPR)
2642 cst = gimple_assign_rhs2 (use_stmt);
2643 if (TREE_CODE (cst) != INTEGER_CST)
2644 continue;
2645 cst = int_const_binop (code, val, cst);
2647 else if (CONVERT_EXPR_CODE_P (code))
2649 /* For truncating conversions we cannot record
2650 an inequality. */
2651 if (comp_code == NE_EXPR
2652 && (TYPE_PRECISION (TREE_TYPE (name2))
2653 < TYPE_PRECISION (TREE_TYPE (name))))
2654 continue;
2655 cst = fold_convert (TREE_TYPE (name2), val);
2657 else
2658 continue;
2660 if (TREE_OVERFLOW_P (cst))
2661 cst = drop_tree_overflow (cst);
2662 add_assert_info (asserts, name2, name2, comp_code, cst);
2666 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2667 && TREE_CODE (val) == INTEGER_CST)
2669 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2670 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2671 tree val2 = NULL_TREE;
2672 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2673 wide_int mask = wi::zero (prec);
2674 unsigned int nprec = prec;
2675 enum tree_code rhs_code = ERROR_MARK;
2677 if (is_gimple_assign (def_stmt))
2678 rhs_code = gimple_assign_rhs_code (def_stmt);
2680 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2681 assert that A != CST1 -+ CST2. */
2682 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2683 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2685 tree op0 = gimple_assign_rhs1 (def_stmt);
2686 tree op1 = gimple_assign_rhs2 (def_stmt);
2687 if (TREE_CODE (op0) == SSA_NAME
2688 && TREE_CODE (op1) == INTEGER_CST)
2690 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2691 ? MINUS_EXPR : PLUS_EXPR);
2692 op1 = int_const_binop (reverse_op, val, op1);
2693 if (TREE_OVERFLOW (op1))
2694 op1 = drop_tree_overflow (op1);
2695 add_assert_info (asserts, op0, op0, comp_code, op1);
2699 /* Add asserts for NAME cmp CST and NAME being defined
2700 as NAME = (int) NAME2. */
2701 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2702 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2703 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2704 && gimple_assign_cast_p (def_stmt))
2706 name2 = gimple_assign_rhs1 (def_stmt);
2707 if (CONVERT_EXPR_CODE_P (rhs_code)
2708 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2709 && TYPE_UNSIGNED (TREE_TYPE (name2))
2710 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2711 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2712 || !tree_int_cst_equal (val,
2713 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2715 tree tmp, cst;
2716 enum tree_code new_comp_code = comp_code;
2718 cst = fold_convert (TREE_TYPE (name2),
2719 TYPE_MIN_VALUE (TREE_TYPE (val)));
2720 /* Build an expression for the range test. */
2721 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2722 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2723 fold_convert (TREE_TYPE (name2), val));
2724 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2726 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2727 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2728 build_int_cst (TREE_TYPE (name2), 1));
2731 if (dump_file)
2733 fprintf (dump_file, "Adding assert for ");
2734 print_generic_expr (dump_file, name2);
2735 fprintf (dump_file, " from ");
2736 print_generic_expr (dump_file, tmp);
2737 fprintf (dump_file, "\n");
2740 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2744 /* Add asserts for NAME cmp CST and NAME being defined as
2745 NAME = NAME2 >> CST2.
2747 Extract CST2 from the right shift. */
2748 if (rhs_code == RSHIFT_EXPR)
2750 name2 = gimple_assign_rhs1 (def_stmt);
2751 cst2 = gimple_assign_rhs2 (def_stmt);
2752 if (TREE_CODE (name2) == SSA_NAME
2753 && tree_fits_uhwi_p (cst2)
2754 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2755 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2756 && type_has_mode_precision_p (TREE_TYPE (val)))
2758 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2759 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2762 if (val2 != NULL_TREE
2763 && TREE_CODE (val2) == INTEGER_CST
2764 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2765 TREE_TYPE (val),
2766 val2, cst2), val))
2768 enum tree_code new_comp_code = comp_code;
2769 tree tmp, new_val;
2771 tmp = name2;
2772 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2774 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2776 tree type = build_nonstandard_integer_type (prec, 1);
2777 tmp = build1 (NOP_EXPR, type, name2);
2778 val2 = fold_convert (type, val2);
2780 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2781 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2782 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2784 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2786 wide_int minval
2787 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2788 new_val = val2;
2789 if (minval == wi::to_wide (new_val))
2790 new_val = NULL_TREE;
2792 else
2794 wide_int maxval
2795 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2796 mask |= wi::to_wide (val2);
2797 if (wi::eq_p (mask, maxval))
2798 new_val = NULL_TREE;
2799 else
2800 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2803 if (new_val)
2805 if (dump_file)
2807 fprintf (dump_file, "Adding assert for ");
2808 print_generic_expr (dump_file, name2);
2809 fprintf (dump_file, " from ");
2810 print_generic_expr (dump_file, tmp);
2811 fprintf (dump_file, "\n");
2814 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2818 /* Add asserts for NAME cmp CST and NAME being defined as
2819 NAME = NAME2 & CST2.
2821 Extract CST2 from the and.
2823 Also handle
2824 NAME = (unsigned) NAME2;
2825 casts where NAME's type is unsigned and has smaller precision
2826 than NAME2's type as if it was NAME = NAME2 & MASK. */
2827 names[0] = NULL_TREE;
2828 names[1] = NULL_TREE;
2829 cst2 = NULL_TREE;
2830 if (rhs_code == BIT_AND_EXPR
2831 || (CONVERT_EXPR_CODE_P (rhs_code)
2832 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2833 && TYPE_UNSIGNED (TREE_TYPE (val))
2834 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2835 > prec))
2837 name2 = gimple_assign_rhs1 (def_stmt);
2838 if (rhs_code == BIT_AND_EXPR)
2839 cst2 = gimple_assign_rhs2 (def_stmt);
2840 else
2842 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2843 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2845 if (TREE_CODE (name2) == SSA_NAME
2846 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2847 && TREE_CODE (cst2) == INTEGER_CST
2848 && !integer_zerop (cst2)
2849 && (nprec > 1
2850 || TYPE_UNSIGNED (TREE_TYPE (val))))
2852 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2853 if (gimple_assign_cast_p (def_stmt2))
2855 names[1] = gimple_assign_rhs1 (def_stmt2);
2856 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2857 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2858 || (TYPE_PRECISION (TREE_TYPE (name2))
2859 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2860 names[1] = NULL_TREE;
2862 names[0] = name2;
2865 if (names[0] || names[1])
2867 wide_int minv, maxv, valv, cst2v;
2868 wide_int tem, sgnbit;
2869 bool valid_p = false, valn, cst2n;
2870 enum tree_code ccode = comp_code;
2872 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2873 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2874 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2875 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2876 /* If CST2 doesn't have most significant bit set,
2877 but VAL is negative, we have comparison like
2878 if ((x & 0x123) > -4) (always true). Just give up. */
2879 if (!cst2n && valn)
2880 ccode = ERROR_MARK;
2881 if (cst2n)
2882 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2883 else
2884 sgnbit = wi::zero (nprec);
2885 minv = valv & cst2v;
2886 switch (ccode)
2888 case EQ_EXPR:
2889 /* Minimum unsigned value for equality is VAL & CST2
2890 (should be equal to VAL, otherwise we probably should
2891 have folded the comparison into false) and
2892 maximum unsigned value is VAL | ~CST2. */
2893 maxv = valv | ~cst2v;
2894 valid_p = true;
2895 break;
2897 case NE_EXPR:
2898 tem = valv | ~cst2v;
2899 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2900 if (valv == 0)
2902 cst2n = false;
2903 sgnbit = wi::zero (nprec);
2904 goto gt_expr;
2906 /* If (VAL | ~CST2) is all ones, handle it as
2907 (X & CST2) < VAL. */
2908 if (tem == -1)
2910 cst2n = false;
2911 valn = false;
2912 sgnbit = wi::zero (nprec);
2913 goto lt_expr;
2915 if (!cst2n && wi::neg_p (cst2v))
2916 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2917 if (sgnbit != 0)
2919 if (valv == sgnbit)
2921 cst2n = true;
2922 valn = true;
2923 goto gt_expr;
2925 if (tem == wi::mask (nprec - 1, false, nprec))
2927 cst2n = true;
2928 goto lt_expr;
2930 if (!cst2n)
2931 sgnbit = wi::zero (nprec);
2933 break;
2935 case GE_EXPR:
2936 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2937 is VAL and maximum unsigned value is ~0. For signed
2938 comparison, if CST2 doesn't have most significant bit
2939 set, handle it similarly. If CST2 has MSB set,
2940 the minimum is the same, and maximum is ~0U/2. */
2941 if (minv != valv)
2943 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2944 VAL. */
2945 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2946 if (minv == valv)
2947 break;
2949 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2950 valid_p = true;
2951 break;
2953 case GT_EXPR:
2954 gt_expr:
2955 /* Find out smallest MINV where MINV > VAL
2956 && (MINV & CST2) == MINV, if any. If VAL is signed and
2957 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2958 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2959 if (minv == valv)
2960 break;
2961 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2962 valid_p = true;
2963 break;
2965 case LE_EXPR:
2966 /* Minimum unsigned value for <= is 0 and maximum
2967 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2968 Otherwise, find smallest VAL2 where VAL2 > VAL
2969 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2970 as maximum.
2971 For signed comparison, if CST2 doesn't have most
2972 significant bit set, handle it similarly. If CST2 has
2973 MSB set, the maximum is the same and minimum is INT_MIN. */
2974 if (minv == valv)
2975 maxv = valv;
2976 else
2978 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2979 if (maxv == valv)
2980 break;
2981 maxv -= 1;
2983 maxv |= ~cst2v;
2984 minv = sgnbit;
2985 valid_p = true;
2986 break;
2988 case LT_EXPR:
2989 lt_expr:
2990 /* Minimum unsigned value for < is 0 and maximum
2991 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2992 Otherwise, find smallest VAL2 where VAL2 > VAL
2993 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2994 as maximum.
2995 For signed comparison, if CST2 doesn't have most
2996 significant bit set, handle it similarly. If CST2 has
2997 MSB set, the maximum is the same and minimum is INT_MIN. */
2998 if (minv == valv)
3000 if (valv == sgnbit)
3001 break;
3002 maxv = valv;
3004 else
3006 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3007 if (maxv == valv)
3008 break;
3010 maxv -= 1;
3011 maxv |= ~cst2v;
3012 minv = sgnbit;
3013 valid_p = true;
3014 break;
3016 default:
3017 break;
3019 if (valid_p
3020 && (maxv - minv) != -1)
3022 tree tmp, new_val, type;
3023 int i;
3025 for (i = 0; i < 2; i++)
3026 if (names[i])
3028 wide_int maxv2 = maxv;
3029 tmp = names[i];
3030 type = TREE_TYPE (names[i]);
3031 if (!TYPE_UNSIGNED (type))
3033 type = build_nonstandard_integer_type (nprec, 1);
3034 tmp = build1 (NOP_EXPR, type, names[i]);
3036 if (minv != 0)
3038 tmp = build2 (PLUS_EXPR, type, tmp,
3039 wide_int_to_tree (type, -minv));
3040 maxv2 = maxv - minv;
3042 new_val = wide_int_to_tree (type, maxv2);
3044 if (dump_file)
3046 fprintf (dump_file, "Adding assert for ");
3047 print_generic_expr (dump_file, names[i]);
3048 fprintf (dump_file, " from ");
3049 print_generic_expr (dump_file, tmp);
3050 fprintf (dump_file, "\n");
3053 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3060 /* OP is an operand of a truth value expression which is known to have
3061 a particular value. Register any asserts for OP and for any
3062 operands in OP's defining statement.
3064 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3065 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3067 static void
3068 register_edge_assert_for_1 (tree op, enum tree_code code,
3069 edge e, vec<assert_info> &asserts)
3071 gimple *op_def;
3072 tree val;
3073 enum tree_code rhs_code;
3075 /* We only care about SSA_NAMEs. */
3076 if (TREE_CODE (op) != SSA_NAME)
3077 return;
3079 /* We know that OP will have a zero or nonzero value. */
3080 val = build_int_cst (TREE_TYPE (op), 0);
3081 add_assert_info (asserts, op, op, code, val);
3083 /* Now look at how OP is set. If it's set from a comparison,
3084 a truth operation or some bit operations, then we may be able
3085 to register information about the operands of that assignment. */
3086 op_def = SSA_NAME_DEF_STMT (op);
3087 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3088 return;
3090 rhs_code = gimple_assign_rhs_code (op_def);
3092 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3094 bool invert = (code == EQ_EXPR ? true : false);
3095 tree op0 = gimple_assign_rhs1 (op_def);
3096 tree op1 = gimple_assign_rhs2 (op_def);
3098 if (TREE_CODE (op0) == SSA_NAME)
3099 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3100 if (TREE_CODE (op1) == SSA_NAME)
3101 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3103 else if ((code == NE_EXPR
3104 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3105 || (code == EQ_EXPR
3106 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3108 /* Recurse on each operand. */
3109 tree op0 = gimple_assign_rhs1 (op_def);
3110 tree op1 = gimple_assign_rhs2 (op_def);
3111 if (TREE_CODE (op0) == SSA_NAME
3112 && has_single_use (op0))
3113 register_edge_assert_for_1 (op0, code, e, asserts);
3114 if (TREE_CODE (op1) == SSA_NAME
3115 && has_single_use (op1))
3116 register_edge_assert_for_1 (op1, code, e, asserts);
3118 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3119 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3121 /* Recurse, flipping CODE. */
3122 code = invert_tree_comparison (code, false);
3123 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3125 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3127 /* Recurse through the copy. */
3128 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3130 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3132 /* Recurse through the type conversion, unless it is a narrowing
3133 conversion or conversion from non-integral type. */
3134 tree rhs = gimple_assign_rhs1 (op_def);
3135 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3136 && (TYPE_PRECISION (TREE_TYPE (rhs))
3137 <= TYPE_PRECISION (TREE_TYPE (op))))
3138 register_edge_assert_for_1 (rhs, code, e, asserts);
3142 /* Check if comparison
3143 NAME COND_OP INTEGER_CST
3144 has a form of
3145 (X & 11...100..0) COND_OP XX...X00...0
3146 Such comparison can yield assertions like
3147 X >= XX...X00...0
3148 X <= XX...X11...1
3149 in case of COND_OP being EQ_EXPR or
3150 X < XX...X00...0
3151 X > XX...X11...1
3152 in case of NE_EXPR. */
3154 static bool
3155 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3156 tree *new_name, tree *low, enum tree_code *low_code,
3157 tree *high, enum tree_code *high_code)
3159 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3161 if (!is_gimple_assign (def_stmt)
3162 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3163 return false;
3165 tree t = gimple_assign_rhs1 (def_stmt);
3166 tree maskt = gimple_assign_rhs2 (def_stmt);
3167 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3168 return false;
3170 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3171 wide_int inv_mask = ~mask;
3172 /* Must have been removed by now so don't bother optimizing. */
3173 if (mask == 0 || inv_mask == 0)
3174 return false;
3176 /* Assume VALT is INTEGER_CST. */
3177 wi::tree_to_wide_ref val = wi::to_wide (valt);
3179 if ((inv_mask & (inv_mask + 1)) != 0
3180 || (val & mask) != val)
3181 return false;
3183 bool is_range = cond_code == EQ_EXPR;
3185 tree type = TREE_TYPE (t);
3186 wide_int min = wi::min_value (type),
3187 max = wi::max_value (type);
3189 if (is_range)
3191 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3192 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3194 else
3196 /* We can still generate assertion if one of alternatives
3197 is known to always be false. */
3198 if (val == min)
3200 *low_code = (enum tree_code) 0;
3201 *high_code = GT_EXPR;
3203 else if ((val | inv_mask) == max)
3205 *low_code = LT_EXPR;
3206 *high_code = (enum tree_code) 0;
3208 else
3209 return false;
3212 *new_name = t;
3213 *low = wide_int_to_tree (type, val);
3214 *high = wide_int_to_tree (type, val | inv_mask);
3216 return true;
3219 /* Try to register an edge assertion for SSA name NAME on edge E for
3220 the condition COND contributing to the conditional jump pointed to by
3221 SI. */
3223 void
3224 register_edge_assert_for (tree name, edge e,
3225 enum tree_code cond_code, tree cond_op0,
3226 tree cond_op1, vec<assert_info> &asserts)
3228 tree val;
3229 enum tree_code comp_code;
3230 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3232 /* Do not attempt to infer anything in names that flow through
3233 abnormal edges. */
3234 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3235 return;
3237 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3238 cond_op0, cond_op1,
3239 is_else_edge,
3240 &comp_code, &val))
3241 return;
3243 /* Register ASSERT_EXPRs for name. */
3244 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3245 cond_op1, is_else_edge, asserts);
3248 /* If COND is effectively an equality test of an SSA_NAME against
3249 the value zero or one, then we may be able to assert values
3250 for SSA_NAMEs which flow into COND. */
3252 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3253 statement of NAME we can assert both operands of the BIT_AND_EXPR
3254 have nonzero value. */
3255 if (((comp_code == EQ_EXPR && integer_onep (val))
3256 || (comp_code == NE_EXPR && integer_zerop (val))))
3258 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3260 if (is_gimple_assign (def_stmt)
3261 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3263 tree op0 = gimple_assign_rhs1 (def_stmt);
3264 tree op1 = gimple_assign_rhs2 (def_stmt);
3265 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3266 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3270 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3271 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3272 have zero value. */
3273 if (((comp_code == EQ_EXPR && integer_zerop (val))
3274 || (comp_code == NE_EXPR && integer_onep (val))))
3276 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3278 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3279 necessarily zero value, or if type-precision is one. */
3280 if (is_gimple_assign (def_stmt)
3281 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3282 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3283 || comp_code == EQ_EXPR)))
3285 tree op0 = gimple_assign_rhs1 (def_stmt);
3286 tree op1 = gimple_assign_rhs2 (def_stmt);
3287 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3288 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3292 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3293 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3294 && TREE_CODE (val) == INTEGER_CST)
3296 enum tree_code low_code, high_code;
3297 tree low, high;
3298 if (is_masked_range_test (name, val, comp_code, &name, &low,
3299 &low_code, &high, &high_code))
3301 if (low_code != ERROR_MARK)
3302 register_edge_assert_for_2 (name, e, low_code, name,
3303 low, /*invert*/false, asserts);
3304 if (high_code != ERROR_MARK)
3305 register_edge_assert_for_2 (name, e, high_code, name,
3306 high, /*invert*/false, asserts);
3311 /* Finish found ASSERTS for E and register them at GSI. */
3313 static void
3314 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3315 vec<assert_info> &asserts)
3317 for (unsigned i = 0; i < asserts.length (); ++i)
3318 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3319 reachable from E. */
3320 if (live_on_edge (e, asserts[i].name))
3321 register_new_assert_for (asserts[i].name, asserts[i].expr,
3322 asserts[i].comp_code, asserts[i].val,
3323 NULL, e, gsi);
3328 /* Determine whether the outgoing edges of BB should receive an
3329 ASSERT_EXPR for each of the operands of BB's LAST statement.
3330 The last statement of BB must be a COND_EXPR.
3332 If any of the sub-graphs rooted at BB have an interesting use of
3333 the predicate operands, an assert location node is added to the
3334 list of assertions for the corresponding operands. */
3336 static void
3337 find_conditional_asserts (basic_block bb, gcond *last)
3339 gimple_stmt_iterator bsi;
3340 tree op;
3341 edge_iterator ei;
3342 edge e;
3343 ssa_op_iter iter;
3345 bsi = gsi_for_stmt (last);
3347 /* Look for uses of the operands in each of the sub-graphs
3348 rooted at BB. We need to check each of the outgoing edges
3349 separately, so that we know what kind of ASSERT_EXPR to
3350 insert. */
3351 FOR_EACH_EDGE (e, ei, bb->succs)
3353 if (e->dest == bb)
3354 continue;
3356 /* Register the necessary assertions for each operand in the
3357 conditional predicate. */
3358 auto_vec<assert_info, 8> asserts;
3359 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3360 register_edge_assert_for (op, e,
3361 gimple_cond_code (last),
3362 gimple_cond_lhs (last),
3363 gimple_cond_rhs (last), asserts);
3364 finish_register_edge_assert_for (e, bsi, asserts);
3368 struct case_info
3370 tree expr;
3371 basic_block bb;
3374 /* Compare two case labels sorting first by the destination bb index
3375 and then by the case value. */
3377 static int
3378 compare_case_labels (const void *p1, const void *p2)
3380 const struct case_info *ci1 = (const struct case_info *) p1;
3381 const struct case_info *ci2 = (const struct case_info *) p2;
3382 int idx1 = ci1->bb->index;
3383 int idx2 = ci2->bb->index;
3385 if (idx1 < idx2)
3386 return -1;
3387 else if (idx1 == idx2)
3389 /* Make sure the default label is first in a group. */
3390 if (!CASE_LOW (ci1->expr))
3391 return -1;
3392 else if (!CASE_LOW (ci2->expr))
3393 return 1;
3394 else
3395 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3396 CASE_LOW (ci2->expr));
3398 else
3399 return 1;
3402 /* Determine whether the outgoing edges of BB should receive an
3403 ASSERT_EXPR for each of the operands of BB's LAST statement.
3404 The last statement of BB must be a SWITCH_EXPR.
3406 If any of the sub-graphs rooted at BB have an interesting use of
3407 the predicate operands, an assert location node is added to the
3408 list of assertions for the corresponding operands. */
3410 static void
3411 find_switch_asserts (basic_block bb, gswitch *last)
3413 gimple_stmt_iterator bsi;
3414 tree op;
3415 edge e;
3416 struct case_info *ci;
3417 size_t n = gimple_switch_num_labels (last);
3418 #if GCC_VERSION >= 4000
3419 unsigned int idx;
3420 #else
3421 /* Work around GCC 3.4 bug (PR 37086). */
3422 volatile unsigned int idx;
3423 #endif
3425 bsi = gsi_for_stmt (last);
3426 op = gimple_switch_index (last);
3427 if (TREE_CODE (op) != SSA_NAME)
3428 return;
3430 /* Build a vector of case labels sorted by destination label. */
3431 ci = XNEWVEC (struct case_info, n);
3432 for (idx = 0; idx < n; ++idx)
3434 ci[idx].expr = gimple_switch_label (last, idx);
3435 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3437 edge default_edge = find_edge (bb, ci[0].bb);
3438 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3440 for (idx = 0; idx < n; ++idx)
3442 tree min, max;
3443 tree cl = ci[idx].expr;
3444 basic_block cbb = ci[idx].bb;
3446 min = CASE_LOW (cl);
3447 max = CASE_HIGH (cl);
3449 /* If there are multiple case labels with the same destination
3450 we need to combine them to a single value range for the edge. */
3451 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3453 /* Skip labels until the last of the group. */
3454 do {
3455 ++idx;
3456 } while (idx < n && cbb == ci[idx].bb);
3457 --idx;
3459 /* Pick up the maximum of the case label range. */
3460 if (CASE_HIGH (ci[idx].expr))
3461 max = CASE_HIGH (ci[idx].expr);
3462 else
3463 max = CASE_LOW (ci[idx].expr);
3466 /* Can't extract a useful assertion out of a range that includes the
3467 default label. */
3468 if (min == NULL_TREE)
3469 continue;
3471 /* Find the edge to register the assert expr on. */
3472 e = find_edge (bb, cbb);
3474 /* Register the necessary assertions for the operand in the
3475 SWITCH_EXPR. */
3476 auto_vec<assert_info, 8> asserts;
3477 register_edge_assert_for (op, e,
3478 max ? GE_EXPR : EQ_EXPR,
3479 op, fold_convert (TREE_TYPE (op), min),
3480 asserts);
3481 if (max)
3482 register_edge_assert_for (op, e, LE_EXPR, op,
3483 fold_convert (TREE_TYPE (op), max),
3484 asserts);
3485 finish_register_edge_assert_for (e, bsi, asserts);
3488 XDELETEVEC (ci);
3490 if (!live_on_edge (default_edge, op))
3491 return;
3493 /* Now register along the default label assertions that correspond to the
3494 anti-range of each label. */
3495 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3496 if (insertion_limit == 0)
3497 return;
3499 /* We can't do this if the default case shares a label with another case. */
3500 tree default_cl = gimple_switch_default_label (last);
3501 for (idx = 1; idx < n; idx++)
3503 tree min, max;
3504 tree cl = gimple_switch_label (last, idx);
3505 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3506 continue;
3508 min = CASE_LOW (cl);
3509 max = CASE_HIGH (cl);
3511 /* Combine contiguous case ranges to reduce the number of assertions
3512 to insert. */
3513 for (idx = idx + 1; idx < n; idx++)
3515 tree next_min, next_max;
3516 tree next_cl = gimple_switch_label (last, idx);
3517 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3518 break;
3520 next_min = CASE_LOW (next_cl);
3521 next_max = CASE_HIGH (next_cl);
3523 wide_int difference = (wi::to_wide (next_min)
3524 - wi::to_wide (max ? max : min));
3525 if (wi::eq_p (difference, 1))
3526 max = next_max ? next_max : next_min;
3527 else
3528 break;
3530 idx--;
3532 if (max == NULL_TREE)
3534 /* Register the assertion OP != MIN. */
3535 auto_vec<assert_info, 8> asserts;
3536 min = fold_convert (TREE_TYPE (op), min);
3537 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3538 asserts);
3539 finish_register_edge_assert_for (default_edge, bsi, asserts);
3541 else
3543 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3544 which will give OP the anti-range ~[MIN,MAX]. */
3545 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3546 min = fold_convert (TREE_TYPE (uop), min);
3547 max = fold_convert (TREE_TYPE (uop), max);
3549 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3550 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3551 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3552 NULL, default_edge, bsi);
3555 if (--insertion_limit == 0)
3556 break;
3561 /* Traverse all the statements in block BB looking for statements that
3562 may generate useful assertions for the SSA names in their operand.
3563 If a statement produces a useful assertion A for name N_i, then the
3564 list of assertions already generated for N_i is scanned to
3565 determine if A is actually needed.
3567 If N_i already had the assertion A at a location dominating the
3568 current location, then nothing needs to be done. Otherwise, the
3569 new location for A is recorded instead.
3571 1- For every statement S in BB, all the variables used by S are
3572 added to bitmap FOUND_IN_SUBGRAPH.
3574 2- If statement S uses an operand N in a way that exposes a known
3575 value range for N, then if N was not already generated by an
3576 ASSERT_EXPR, create a new assert location for N. For instance,
3577 if N is a pointer and the statement dereferences it, we can
3578 assume that N is not NULL.
3580 3- COND_EXPRs are a special case of #2. We can derive range
3581 information from the predicate but need to insert different
3582 ASSERT_EXPRs for each of the sub-graphs rooted at the
3583 conditional block. If the last statement of BB is a conditional
3584 expression of the form 'X op Y', then
3586 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3588 b) If the conditional is the only entry point to the sub-graph
3589 corresponding to the THEN_CLAUSE, recurse into it. On
3590 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3591 an ASSERT_EXPR is added for the corresponding variable.
3593 c) Repeat step (b) on the ELSE_CLAUSE.
3595 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3597 For instance,
3599 if (a == 9)
3600 b = a;
3601 else
3602 b = c + 1;
3604 In this case, an assertion on the THEN clause is useful to
3605 determine that 'a' is always 9 on that edge. However, an assertion
3606 on the ELSE clause would be unnecessary.
3608 4- If BB does not end in a conditional expression, then we recurse
3609 into BB's dominator children.
3611 At the end of the recursive traversal, every SSA name will have a
3612 list of locations where ASSERT_EXPRs should be added. When a new
3613 location for name N is found, it is registered by calling
3614 register_new_assert_for. That function keeps track of all the
3615 registered assertions to prevent adding unnecessary assertions.
3616 For instance, if a pointer P_4 is dereferenced more than once in a
3617 dominator tree, only the location dominating all the dereference of
3618 P_4 will receive an ASSERT_EXPR. */
3620 static void
3621 find_assert_locations_1 (basic_block bb, sbitmap live)
3623 gimple *last;
3625 last = last_stmt (bb);
3627 /* If BB's last statement is a conditional statement involving integer
3628 operands, determine if we need to add ASSERT_EXPRs. */
3629 if (last
3630 && gimple_code (last) == GIMPLE_COND
3631 && !fp_predicate (last)
3632 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3633 find_conditional_asserts (bb, as_a <gcond *> (last));
3635 /* If BB's last statement is a switch statement involving integer
3636 operands, determine if we need to add ASSERT_EXPRs. */
3637 if (last
3638 && gimple_code (last) == GIMPLE_SWITCH
3639 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3640 find_switch_asserts (bb, as_a <gswitch *> (last));
3642 /* Traverse all the statements in BB marking used names and looking
3643 for statements that may infer assertions for their used operands. */
3644 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3645 gsi_prev (&si))
3647 gimple *stmt;
3648 tree op;
3649 ssa_op_iter i;
3651 stmt = gsi_stmt (si);
3653 if (is_gimple_debug (stmt))
3654 continue;
3656 /* See if we can derive an assertion for any of STMT's operands. */
3657 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3659 tree value;
3660 enum tree_code comp_code;
3662 /* If op is not live beyond this stmt, do not bother to insert
3663 asserts for it. */
3664 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3665 continue;
3667 /* If OP is used in such a way that we can infer a value
3668 range for it, and we don't find a previous assertion for
3669 it, create a new assertion location node for OP. */
3670 if (infer_value_range (stmt, op, &comp_code, &value))
3672 /* If we are able to infer a nonzero value range for OP,
3673 then walk backwards through the use-def chain to see if OP
3674 was set via a typecast.
3676 If so, then we can also infer a nonzero value range
3677 for the operand of the NOP_EXPR. */
3678 if (comp_code == NE_EXPR && integer_zerop (value))
3680 tree t = op;
3681 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3683 while (is_gimple_assign (def_stmt)
3684 && CONVERT_EXPR_CODE_P
3685 (gimple_assign_rhs_code (def_stmt))
3686 && TREE_CODE
3687 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3688 && POINTER_TYPE_P
3689 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3691 t = gimple_assign_rhs1 (def_stmt);
3692 def_stmt = SSA_NAME_DEF_STMT (t);
3694 /* Note we want to register the assert for the
3695 operand of the NOP_EXPR after SI, not after the
3696 conversion. */
3697 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3698 register_new_assert_for (t, t, comp_code, value,
3699 bb, NULL, si);
3703 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3707 /* Update live. */
3708 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3709 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3710 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3711 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3714 /* Traverse all PHI nodes in BB, updating live. */
3715 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3716 gsi_next (&si))
3718 use_operand_p arg_p;
3719 ssa_op_iter i;
3720 gphi *phi = si.phi ();
3721 tree res = gimple_phi_result (phi);
3723 if (virtual_operand_p (res))
3724 continue;
3726 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3728 tree arg = USE_FROM_PTR (arg_p);
3729 if (TREE_CODE (arg) == SSA_NAME)
3730 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3733 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3737 /* Do an RPO walk over the function computing SSA name liveness
3738 on-the-fly and deciding on assert expressions to insert. */
3740 static void
3741 find_assert_locations (void)
3743 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3744 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3745 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3746 int rpo_cnt, i;
3748 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3749 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3750 for (i = 0; i < rpo_cnt; ++i)
3751 bb_rpo[rpo[i]] = i;
3753 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3754 the order we compute liveness and insert asserts we otherwise
3755 fail to insert asserts into the loop latch. */
3756 loop_p loop;
3757 FOR_EACH_LOOP (loop, 0)
3759 i = loop->latch->index;
3760 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3761 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3762 !gsi_end_p (gsi); gsi_next (&gsi))
3764 gphi *phi = gsi.phi ();
3765 if (virtual_operand_p (gimple_phi_result (phi)))
3766 continue;
3767 tree arg = gimple_phi_arg_def (phi, j);
3768 if (TREE_CODE (arg) == SSA_NAME)
3770 if (live[i] == NULL)
3772 live[i] = sbitmap_alloc (num_ssa_names);
3773 bitmap_clear (live[i]);
3775 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3780 for (i = rpo_cnt - 1; i >= 0; --i)
3782 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3783 edge e;
3784 edge_iterator ei;
3786 if (!live[rpo[i]])
3788 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3789 bitmap_clear (live[rpo[i]]);
3792 /* Process BB and update the live information with uses in
3793 this block. */
3794 find_assert_locations_1 (bb, live[rpo[i]]);
3796 /* Merge liveness into the predecessor blocks and free it. */
3797 if (!bitmap_empty_p (live[rpo[i]]))
3799 int pred_rpo = i;
3800 FOR_EACH_EDGE (e, ei, bb->preds)
3802 int pred = e->src->index;
3803 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3804 continue;
3806 if (!live[pred])
3808 live[pred] = sbitmap_alloc (num_ssa_names);
3809 bitmap_clear (live[pred]);
3811 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3813 if (bb_rpo[pred] < pred_rpo)
3814 pred_rpo = bb_rpo[pred];
3817 /* Record the RPO number of the last visited block that needs
3818 live information from this block. */
3819 last_rpo[rpo[i]] = pred_rpo;
3821 else
3823 sbitmap_free (live[rpo[i]]);
3824 live[rpo[i]] = NULL;
3827 /* We can free all successors live bitmaps if all their
3828 predecessors have been visited already. */
3829 FOR_EACH_EDGE (e, ei, bb->succs)
3830 if (last_rpo[e->dest->index] == i
3831 && live[e->dest->index])
3833 sbitmap_free (live[e->dest->index]);
3834 live[e->dest->index] = NULL;
3838 XDELETEVEC (rpo);
3839 XDELETEVEC (bb_rpo);
3840 XDELETEVEC (last_rpo);
3841 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3842 if (live[i])
3843 sbitmap_free (live[i]);
3844 XDELETEVEC (live);
3847 /* Create an ASSERT_EXPR for NAME and insert it in the location
3848 indicated by LOC. Return true if we made any edge insertions. */
3850 static bool
3851 process_assert_insertions_for (tree name, assert_locus *loc)
3853 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3854 gimple *stmt;
3855 tree cond;
3856 gimple *assert_stmt;
3857 edge_iterator ei;
3858 edge e;
3860 /* If we have X <=> X do not insert an assert expr for that. */
3861 if (loc->expr == loc->val)
3862 return false;
3864 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3865 assert_stmt = build_assert_expr_for (cond, name);
3866 if (loc->e)
3868 /* We have been asked to insert the assertion on an edge. This
3869 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3870 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3871 || (gimple_code (gsi_stmt (loc->si))
3872 == GIMPLE_SWITCH));
3874 gsi_insert_on_edge (loc->e, assert_stmt);
3875 return true;
3878 /* If the stmt iterator points at the end then this is an insertion
3879 at the beginning of a block. */
3880 if (gsi_end_p (loc->si))
3882 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3883 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3884 return false;
3887 /* Otherwise, we can insert right after LOC->SI iff the
3888 statement must not be the last statement in the block. */
3889 stmt = gsi_stmt (loc->si);
3890 if (!stmt_ends_bb_p (stmt))
3892 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3893 return false;
3896 /* If STMT must be the last statement in BB, we can only insert new
3897 assertions on the non-abnormal edge out of BB. Note that since
3898 STMT is not control flow, there may only be one non-abnormal/eh edge
3899 out of BB. */
3900 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3901 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3903 gsi_insert_on_edge (e, assert_stmt);
3904 return true;
3907 gcc_unreachable ();
3910 /* Qsort helper for sorting assert locations. If stable is true, don't
3911 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3912 on the other side some pointers might be NULL. */
3914 template <bool stable>
3915 static int
3916 compare_assert_loc (const void *pa, const void *pb)
3918 assert_locus * const a = *(assert_locus * const *)pa;
3919 assert_locus * const b = *(assert_locus * const *)pb;
3921 /* If stable, some asserts might be optimized away already, sort
3922 them last. */
3923 if (stable)
3925 if (a == NULL)
3926 return b != NULL;
3927 else if (b == NULL)
3928 return -1;
3931 if (a->e == NULL && b->e != NULL)
3932 return 1;
3933 else if (a->e != NULL && b->e == NULL)
3934 return -1;
3936 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3937 no need to test both a->e and b->e. */
3939 /* Sort after destination index. */
3940 if (a->e == NULL)
3942 else if (a->e->dest->index > b->e->dest->index)
3943 return 1;
3944 else if (a->e->dest->index < b->e->dest->index)
3945 return -1;
3947 /* Sort after comp_code. */
3948 if (a->comp_code > b->comp_code)
3949 return 1;
3950 else if (a->comp_code < b->comp_code)
3951 return -1;
3953 hashval_t ha, hb;
3955 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3956 uses DECL_UID of the VAR_DECL, so sorting might differ between
3957 -g and -g0. When doing the removal of redundant assert exprs
3958 and commonization to successors, this does not matter, but for
3959 the final sort needs to be stable. */
3960 if (stable)
3962 ha = 0;
3963 hb = 0;
3965 else
3967 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3968 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3971 /* Break the tie using hashing and source/bb index. */
3972 if (ha == hb)
3973 return (a->e != NULL
3974 ? a->e->src->index - b->e->src->index
3975 : a->bb->index - b->bb->index);
3976 return ha > hb ? 1 : -1;
3979 /* Process all the insertions registered for every name N_i registered
3980 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3981 found in ASSERTS_FOR[i]. */
3983 static void
3984 process_assert_insertions (void)
3986 unsigned i;
3987 bitmap_iterator bi;
3988 bool update_edges_p = false;
3989 int num_asserts = 0;
3991 if (dump_file && (dump_flags & TDF_DETAILS))
3992 dump_all_asserts (dump_file);
3994 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3996 assert_locus *loc = asserts_for[i];
3997 gcc_assert (loc);
3999 auto_vec<assert_locus *, 16> asserts;
4000 for (; loc; loc = loc->next)
4001 asserts.safe_push (loc);
4002 asserts.qsort (compare_assert_loc<false>);
4004 /* Push down common asserts to successors and remove redundant ones. */
4005 unsigned ecnt = 0;
4006 assert_locus *common = NULL;
4007 unsigned commonj = 0;
4008 for (unsigned j = 0; j < asserts.length (); ++j)
4010 loc = asserts[j];
4011 if (! loc->e)
4012 common = NULL;
4013 else if (! common
4014 || loc->e->dest != common->e->dest
4015 || loc->comp_code != common->comp_code
4016 || ! operand_equal_p (loc->val, common->val, 0)
4017 || ! operand_equal_p (loc->expr, common->expr, 0))
4019 commonj = j;
4020 common = loc;
4021 ecnt = 1;
4023 else if (loc->e == asserts[j-1]->e)
4025 /* Remove duplicate asserts. */
4026 if (commonj == j - 1)
4028 commonj = j;
4029 common = loc;
4031 free (asserts[j-1]);
4032 asserts[j-1] = NULL;
4034 else
4036 ecnt++;
4037 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4039 /* We have the same assertion on all incoming edges of a BB.
4040 Insert it at the beginning of that block. */
4041 loc->bb = loc->e->dest;
4042 loc->e = NULL;
4043 loc->si = gsi_none ();
4044 common = NULL;
4045 /* Clear asserts commoned. */
4046 for (; commonj != j; ++commonj)
4047 if (asserts[commonj])
4049 free (asserts[commonj]);
4050 asserts[commonj] = NULL;
4056 /* The asserts vector sorting above might be unstable for
4057 -fcompare-debug, sort again to ensure a stable sort. */
4058 asserts.qsort (compare_assert_loc<true>);
4059 for (unsigned j = 0; j < asserts.length (); ++j)
4061 loc = asserts[j];
4062 if (! loc)
4063 break;
4064 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4065 num_asserts++;
4066 free (loc);
4070 if (update_edges_p)
4071 gsi_commit_edge_inserts ();
4073 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4074 num_asserts);
4078 /* Traverse the flowgraph looking for conditional jumps to insert range
4079 expressions. These range expressions are meant to provide information
4080 to optimizations that need to reason in terms of value ranges. They
4081 will not be expanded into RTL. For instance, given:
4083 x = ...
4084 y = ...
4085 if (x < y)
4086 y = x - 2;
4087 else
4088 x = y + 3;
4090 this pass will transform the code into:
4092 x = ...
4093 y = ...
4094 if (x < y)
4096 x = ASSERT_EXPR <x, x < y>
4097 y = x - 2
4099 else
4101 y = ASSERT_EXPR <y, x >= y>
4102 x = y + 3
4105 The idea is that once copy and constant propagation have run, other
4106 optimizations will be able to determine what ranges of values can 'x'
4107 take in different paths of the code, simply by checking the reaching
4108 definition of 'x'. */
4110 static void
4111 insert_range_assertions (void)
4113 need_assert_for = BITMAP_ALLOC (NULL);
4114 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4116 calculate_dominance_info (CDI_DOMINATORS);
4118 find_assert_locations ();
4119 if (!bitmap_empty_p (need_assert_for))
4121 process_assert_insertions ();
4122 update_ssa (TODO_update_ssa_no_phi);
4125 if (dump_file && (dump_flags & TDF_DETAILS))
4127 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4128 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4131 free (asserts_for);
4132 BITMAP_FREE (need_assert_for);
4135 class vrp_prop : public ssa_propagation_engine
4137 public:
4138 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4139 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4141 void vrp_initialize (void);
4142 void vrp_finalize (bool);
4143 void check_all_array_refs (void);
4144 void check_array_ref (location_t, tree, bool);
4145 void check_mem_ref (location_t, tree, bool);
4146 void search_for_addr_array (tree, location_t);
4148 class vr_values vr_values;
4149 /* Temporary delegator to minimize code churn. */
4150 value_range *get_value_range (const_tree op)
4151 { return vr_values.get_value_range (op); }
4152 void set_defs_to_varying (gimple *stmt)
4153 { return vr_values.set_defs_to_varying (stmt); }
4154 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4155 tree *output_p, value_range *vr)
4156 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4157 bool update_value_range (const_tree op, value_range *vr)
4158 { return vr_values.update_value_range (op, vr); }
4159 void extract_range_basic (value_range *vr, gimple *stmt)
4160 { vr_values.extract_range_basic (vr, stmt); }
4161 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4162 { vr_values.extract_range_from_phi_node (phi, vr); }
4164 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4165 and "struct" hacks. If VRP can determine that the
4166 array subscript is a constant, check if it is outside valid
4167 range. If the array subscript is a RANGE, warn if it is
4168 non-overlapping with valid range.
4169 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4171 void
4172 vrp_prop::check_array_ref (location_t location, tree ref,
4173 bool ignore_off_by_one)
4175 const value_range *vr = NULL;
4176 tree low_sub, up_sub;
4177 tree low_bound, up_bound, up_bound_p1;
4179 if (TREE_NO_WARNING (ref))
4180 return;
4182 low_sub = up_sub = TREE_OPERAND (ref, 1);
4183 up_bound = array_ref_up_bound (ref);
4185 if (!up_bound
4186 || TREE_CODE (up_bound) != INTEGER_CST
4187 || (warn_array_bounds < 2
4188 && array_at_struct_end_p (ref)))
4190 /* Accesses to trailing arrays via pointers may access storage
4191 beyond the types array bounds. For such arrays, or for flexible
4192 array members, as well as for other arrays of an unknown size,
4193 replace the upper bound with a more permissive one that assumes
4194 the size of the largest object is PTRDIFF_MAX. */
4195 tree eltsize = array_ref_element_size (ref);
4197 if (TREE_CODE (eltsize) != INTEGER_CST
4198 || integer_zerop (eltsize))
4200 up_bound = NULL_TREE;
4201 up_bound_p1 = NULL_TREE;
4203 else
4205 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4206 tree arg = TREE_OPERAND (ref, 0);
4207 poly_int64 off;
4209 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4210 maxbound = wide_int_to_tree (sizetype,
4211 wi::sub (wi::to_wide (maxbound),
4212 off));
4213 else
4214 maxbound = fold_convert (sizetype, maxbound);
4216 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4218 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4219 build_int_cst (ptrdiff_type_node, 1));
4222 else
4223 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4224 build_int_cst (TREE_TYPE (up_bound), 1));
4226 low_bound = array_ref_low_bound (ref);
4228 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4230 bool warned = false;
4232 /* Empty array. */
4233 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4234 warned = warning_at (location, OPT_Warray_bounds,
4235 "array subscript %E is above array bounds of %qT",
4236 low_bound, artype);
4238 if (TREE_CODE (low_sub) == SSA_NAME)
4240 vr = get_value_range (low_sub);
4241 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4243 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4244 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4248 if (vr && vr->type == VR_ANTI_RANGE)
4250 if (up_bound
4251 && TREE_CODE (up_sub) == INTEGER_CST
4252 && (ignore_off_by_one
4253 ? tree_int_cst_lt (up_bound, up_sub)
4254 : tree_int_cst_le (up_bound, up_sub))
4255 && TREE_CODE (low_sub) == INTEGER_CST
4256 && tree_int_cst_le (low_sub, low_bound))
4257 warned = warning_at (location, OPT_Warray_bounds,
4258 "array subscript [%E, %E] is outside "
4259 "array bounds of %qT",
4260 low_sub, up_sub, artype);
4262 else if (up_bound
4263 && TREE_CODE (up_sub) == INTEGER_CST
4264 && (ignore_off_by_one
4265 ? !tree_int_cst_le (up_sub, up_bound_p1)
4266 : !tree_int_cst_le (up_sub, up_bound)))
4268 if (dump_file && (dump_flags & TDF_DETAILS))
4270 fprintf (dump_file, "Array bound warning for ");
4271 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4272 fprintf (dump_file, "\n");
4274 warned = warning_at (location, OPT_Warray_bounds,
4275 "array subscript %E is above array bounds of %qT",
4276 up_sub, artype);
4278 else if (TREE_CODE (low_sub) == INTEGER_CST
4279 && tree_int_cst_lt (low_sub, low_bound))
4281 if (dump_file && (dump_flags & TDF_DETAILS))
4283 fprintf (dump_file, "Array bound warning for ");
4284 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4285 fprintf (dump_file, "\n");
4287 warned = warning_at (location, OPT_Warray_bounds,
4288 "array subscript %E is below array bounds of %qT",
4289 low_sub, artype);
4292 if (warned)
4294 ref = TREE_OPERAND (ref, 0);
4296 if (DECL_P (ref))
4297 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4299 TREE_NO_WARNING (ref) = 1;
4303 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4304 references to string constants. If VRP can determine that the array
4305 subscript is a constant, check if it is outside valid range.
4306 If the array subscript is a RANGE, warn if it is non-overlapping
4307 with valid range.
4308 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4309 (used to allow one-past-the-end indices for code that takes
4310 the address of the just-past-the-end element of an array). */
4312 void
4313 vrp_prop::check_mem_ref (location_t location, tree ref,
4314 bool ignore_off_by_one)
4316 if (TREE_NO_WARNING (ref))
4317 return;
4319 tree arg = TREE_OPERAND (ref, 0);
4320 /* The constant and variable offset of the reference. */
4321 tree cstoff = TREE_OPERAND (ref, 1);
4322 tree varoff = NULL_TREE;
4324 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4326 /* The array or string constant bounds in bytes. Initially set
4327 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4328 determined. */
4329 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4331 /* The minimum and maximum intermediate offset. For a reference
4332 to be valid, not only does the final offset/subscript must be
4333 in bounds but all intermediate offsets should be as well.
4334 GCC may be able to deal gracefully with such out-of-bounds
4335 offsets so the checking is only enbaled at -Warray-bounds=2
4336 where it may help detect bugs in uses of the intermediate
4337 offsets that could otherwise not be detectable. */
4338 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4339 offset_int extrema[2] = { 0, wi::abs (ioff) };
4341 /* The range of the byte offset into the reference. */
4342 offset_int offrange[2] = { 0, 0 };
4344 const value_range *vr = NULL;
4346 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4347 The loop computes the the range of the final offset for expressions
4348 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4349 in some range. */
4350 while (TREE_CODE (arg) == SSA_NAME)
4352 gimple *def = SSA_NAME_DEF_STMT (arg);
4353 if (!is_gimple_assign (def))
4354 break;
4356 tree_code code = gimple_assign_rhs_code (def);
4357 if (code == POINTER_PLUS_EXPR)
4359 arg = gimple_assign_rhs1 (def);
4360 varoff = gimple_assign_rhs2 (def);
4362 else if (code == ASSERT_EXPR)
4364 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4365 continue;
4367 else
4368 return;
4370 /* VAROFF should always be a SSA_NAME here (and not even
4371 INTEGER_CST) but there's no point in taking chances. */
4372 if (TREE_CODE (varoff) != SSA_NAME)
4373 break;
4375 vr = get_value_range (varoff);
4376 if (!vr || vr->type == VR_UNDEFINED || !vr->min || !vr->max)
4377 break;
4379 if (TREE_CODE (vr->min) != INTEGER_CST
4380 || TREE_CODE (vr->max) != INTEGER_CST)
4381 break;
4383 if (vr->type == VR_RANGE)
4385 if (tree_int_cst_lt (vr->min, vr->max))
4387 offset_int min
4388 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min));
4389 offset_int max
4390 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max));
4391 if (min < max)
4393 offrange[0] += min;
4394 offrange[1] += max;
4396 else
4398 offrange[0] += max;
4399 offrange[1] += min;
4402 else
4404 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4405 to OFFRANGE. */
4406 offrange[0] += arrbounds[0];
4407 offrange[1] += arrbounds[1];
4410 else
4412 /* For an anti-range, analogously to the above, conservatively
4413 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4414 offrange[0] += arrbounds[0];
4415 offrange[1] += arrbounds[1];
4418 /* Keep track of the minimum and maximum offset. */
4419 if (offrange[1] < 0 && offrange[1] < extrema[0])
4420 extrema[0] = offrange[1];
4421 if (offrange[0] > 0 && offrange[0] > extrema[1])
4422 extrema[1] = offrange[0];
4424 if (offrange[0] < arrbounds[0])
4425 offrange[0] = arrbounds[0];
4427 if (offrange[1] > arrbounds[1])
4428 offrange[1] = arrbounds[1];
4431 if (TREE_CODE (arg) == ADDR_EXPR)
4433 arg = TREE_OPERAND (arg, 0);
4434 if (TREE_CODE (arg) != STRING_CST
4435 && TREE_CODE (arg) != VAR_DECL)
4436 return;
4438 else
4439 return;
4441 /* The type of the object being referred to. It can be an array,
4442 string literal, or a non-array type when the MEM_REF represents
4443 a reference/subscript via a pointer to an object that is not
4444 an element of an array. References to members of structs and
4445 unions are excluded because MEM_REF doesn't make it possible
4446 to identify the member where the reference originated.
4447 Incomplete types are excluded as well because their size is
4448 not known. */
4449 tree reftype = TREE_TYPE (arg);
4450 if (POINTER_TYPE_P (reftype)
4451 || !COMPLETE_TYPE_P (reftype)
4452 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4453 || RECORD_OR_UNION_TYPE_P (reftype))
4454 return;
4456 offset_int eltsize;
4457 if (TREE_CODE (reftype) == ARRAY_TYPE)
4459 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4461 if (tree dom = TYPE_DOMAIN (reftype))
4463 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4464 if (array_at_struct_end_p (arg)
4465 || !bnds[0] || !bnds[1])
4467 arrbounds[0] = 0;
4468 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4470 else
4472 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4473 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4476 else
4478 arrbounds[0] = 0;
4479 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4482 if (TREE_CODE (ref) == MEM_REF)
4484 /* For MEM_REF determine a tighter bound of the non-array
4485 element type. */
4486 tree eltype = TREE_TYPE (reftype);
4487 while (TREE_CODE (eltype) == ARRAY_TYPE)
4488 eltype = TREE_TYPE (eltype);
4489 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4492 else
4494 eltsize = 1;
4495 arrbounds[0] = 0;
4496 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4499 offrange[0] += ioff;
4500 offrange[1] += ioff;
4502 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4503 is set (when taking the address of the one-past-last element
4504 of an array) but always use the stricter bound in diagnostics. */
4505 offset_int ubound = arrbounds[1];
4506 if (ignore_off_by_one)
4507 ubound += 1;
4509 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4511 /* Treat a reference to a non-array object as one to an array
4512 of a single element. */
4513 if (TREE_CODE (reftype) != ARRAY_TYPE)
4514 reftype = build_array_type_nelts (reftype, 1);
4516 if (TREE_CODE (ref) == MEM_REF)
4518 /* Extract the element type out of MEM_REF and use its size
4519 to compute the index to print in the diagnostic; arrays
4520 in MEM_REF don't mean anything. */
4521 tree type = TREE_TYPE (ref);
4522 while (TREE_CODE (type) == ARRAY_TYPE)
4523 type = TREE_TYPE (type);
4524 tree size = TYPE_SIZE_UNIT (type);
4525 offrange[0] = offrange[0] / wi::to_offset (size);
4526 offrange[1] = offrange[1] / wi::to_offset (size);
4528 else
4530 /* For anything other than MEM_REF, compute the index to
4531 print in the diagnostic as the offset over element size. */
4532 offrange[0] = offrange[0] / eltsize;
4533 offrange[1] = offrange[1] / eltsize;
4536 bool warned;
4537 if (offrange[0] == offrange[1])
4538 warned = warning_at (location, OPT_Warray_bounds,
4539 "array subscript %wi is outside array bounds "
4540 "of %qT",
4541 offrange[0].to_shwi (), reftype);
4542 else
4543 warned = warning_at (location, OPT_Warray_bounds,
4544 "array subscript [%wi, %wi] is outside "
4545 "array bounds of %qT",
4546 offrange[0].to_shwi (),
4547 offrange[1].to_shwi (), reftype);
4548 if (warned && DECL_P (arg))
4549 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4551 TREE_NO_WARNING (ref) = 1;
4552 return;
4555 if (warn_array_bounds < 2)
4556 return;
4558 /* At level 2 check also intermediate offsets. */
4559 int i = 0;
4560 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4562 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4564 warning_at (location, OPT_Warray_bounds,
4565 "intermediate array offset %wi is outside array bounds "
4566 "of %qT",
4567 tmpidx, reftype);
4568 TREE_NO_WARNING (ref) = 1;
4572 /* Searches if the expr T, located at LOCATION computes
4573 address of an ARRAY_REF, and call check_array_ref on it. */
4575 void
4576 vrp_prop::search_for_addr_array (tree t, location_t location)
4578 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4581 if (TREE_CODE (t) == ARRAY_REF)
4582 check_array_ref (location, t, true /*ignore_off_by_one*/);
4583 else if (TREE_CODE (t) == MEM_REF)
4584 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4586 t = TREE_OPERAND (t, 0);
4588 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4590 if (TREE_CODE (t) != MEM_REF
4591 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4592 || TREE_NO_WARNING (t))
4593 return;
4595 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4596 tree low_bound, up_bound, el_sz;
4597 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4598 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4599 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4600 return;
4602 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4603 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4604 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4605 if (!low_bound
4606 || TREE_CODE (low_bound) != INTEGER_CST
4607 || !up_bound
4608 || TREE_CODE (up_bound) != INTEGER_CST
4609 || !el_sz
4610 || TREE_CODE (el_sz) != INTEGER_CST)
4611 return;
4613 offset_int idx;
4614 if (!mem_ref_offset (t).is_constant (&idx))
4615 return;
4617 bool warned = false;
4618 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4619 if (idx < 0)
4621 if (dump_file && (dump_flags & TDF_DETAILS))
4623 fprintf (dump_file, "Array bound warning for ");
4624 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4625 fprintf (dump_file, "\n");
4627 warned = warning_at (location, OPT_Warray_bounds,
4628 "array subscript %wi is below "
4629 "array bounds of %qT",
4630 idx.to_shwi (), TREE_TYPE (tem));
4632 else if (idx > (wi::to_offset (up_bound)
4633 - wi::to_offset (low_bound) + 1))
4635 if (dump_file && (dump_flags & TDF_DETAILS))
4637 fprintf (dump_file, "Array bound warning for ");
4638 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4639 fprintf (dump_file, "\n");
4641 warned = warning_at (location, OPT_Warray_bounds,
4642 "array subscript %wu is above "
4643 "array bounds of %qT",
4644 idx.to_uhwi (), TREE_TYPE (tem));
4647 if (warned)
4649 if (DECL_P (t))
4650 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4652 TREE_NO_WARNING (t) = 1;
4656 /* walk_tree() callback that checks if *TP is
4657 an ARRAY_REF inside an ADDR_EXPR (in which an array
4658 subscript one outside the valid range is allowed). Call
4659 check_array_ref for each ARRAY_REF found. The location is
4660 passed in DATA. */
4662 static tree
4663 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4665 tree t = *tp;
4666 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4667 location_t location;
4669 if (EXPR_HAS_LOCATION (t))
4670 location = EXPR_LOCATION (t);
4671 else
4672 location = gimple_location (wi->stmt);
4674 *walk_subtree = TRUE;
4676 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4677 if (TREE_CODE (t) == ARRAY_REF)
4678 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4679 else if (TREE_CODE (t) == MEM_REF)
4680 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4681 else if (TREE_CODE (t) == ADDR_EXPR)
4683 vrp_prop->search_for_addr_array (t, location);
4684 *walk_subtree = FALSE;
4687 return NULL_TREE;
4690 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4691 to walk over all statements of all reachable BBs and call
4692 check_array_bounds on them. */
4694 class check_array_bounds_dom_walker : public dom_walker
4696 public:
4697 check_array_bounds_dom_walker (vrp_prop *prop)
4698 : dom_walker (CDI_DOMINATORS,
4699 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4700 flags, so that we can merge in information on
4701 non-executable edges from vrp_folder . */
4702 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4703 m_prop (prop) {}
4704 ~check_array_bounds_dom_walker () {}
4706 edge before_dom_children (basic_block) FINAL OVERRIDE;
4708 private:
4709 vrp_prop *m_prop;
4712 /* Implementation of dom_walker::before_dom_children.
4714 Walk over all statements of BB and call check_array_bounds on them,
4715 and determine if there's a unique successor edge. */
4717 edge
4718 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4720 gimple_stmt_iterator si;
4721 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4723 gimple *stmt = gsi_stmt (si);
4724 struct walk_stmt_info wi;
4725 if (!gimple_has_location (stmt)
4726 || is_gimple_debug (stmt))
4727 continue;
4729 memset (&wi, 0, sizeof (wi));
4731 wi.info = m_prop;
4733 walk_gimple_op (stmt, check_array_bounds, &wi);
4736 /* Determine if there's a unique successor edge, and if so, return
4737 that back to dom_walker, ensuring that we don't visit blocks that
4738 became unreachable during the VRP propagation
4739 (PR tree-optimization/83312). */
4740 return find_taken_edge (bb, NULL_TREE);
4743 /* Walk over all statements of all reachable BBs and call check_array_bounds
4744 on them. */
4746 void
4747 vrp_prop::check_all_array_refs ()
4749 check_array_bounds_dom_walker w (this);
4750 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4753 /* Return true if all imm uses of VAR are either in STMT, or
4754 feed (optionally through a chain of single imm uses) GIMPLE_COND
4755 in basic block COND_BB. */
4757 static bool
4758 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4760 use_operand_p use_p, use2_p;
4761 imm_use_iterator iter;
4763 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4764 if (USE_STMT (use_p) != stmt)
4766 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4767 if (is_gimple_debug (use_stmt))
4768 continue;
4769 while (is_gimple_assign (use_stmt)
4770 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4771 && single_imm_use (gimple_assign_lhs (use_stmt),
4772 &use2_p, &use_stmt2))
4773 use_stmt = use_stmt2;
4774 if (gimple_code (use_stmt) != GIMPLE_COND
4775 || gimple_bb (use_stmt) != cond_bb)
4776 return false;
4778 return true;
4781 /* Handle
4782 _4 = x_3 & 31;
4783 if (_4 != 0)
4784 goto <bb 6>;
4785 else
4786 goto <bb 7>;
4787 <bb 6>:
4788 __builtin_unreachable ();
4789 <bb 7>:
4790 x_5 = ASSERT_EXPR <x_3, ...>;
4791 If x_3 has no other immediate uses (checked by caller),
4792 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4793 from the non-zero bitmask. */
4795 void
4796 maybe_set_nonzero_bits (edge e, tree var)
4798 basic_block cond_bb = e->src;
4799 gimple *stmt = last_stmt (cond_bb);
4800 tree cst;
4802 if (stmt == NULL
4803 || gimple_code (stmt) != GIMPLE_COND
4804 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4805 ? EQ_EXPR : NE_EXPR)
4806 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4807 || !integer_zerop (gimple_cond_rhs (stmt)))
4808 return;
4810 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4811 if (!is_gimple_assign (stmt)
4812 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4813 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4814 return;
4815 if (gimple_assign_rhs1 (stmt) != var)
4817 gimple *stmt2;
4819 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4820 return;
4821 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4822 if (!gimple_assign_cast_p (stmt2)
4823 || gimple_assign_rhs1 (stmt2) != var
4824 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4825 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4826 != TYPE_PRECISION (TREE_TYPE (var))))
4827 return;
4829 cst = gimple_assign_rhs2 (stmt);
4830 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4831 wi::to_wide (cst)));
4834 /* Convert range assertion expressions into the implied copies and
4835 copy propagate away the copies. Doing the trivial copy propagation
4836 here avoids the need to run the full copy propagation pass after
4837 VRP.
4839 FIXME, this will eventually lead to copy propagation removing the
4840 names that had useful range information attached to them. For
4841 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4842 then N_i will have the range [3, +INF].
4844 However, by converting the assertion into the implied copy
4845 operation N_i = N_j, we will then copy-propagate N_j into the uses
4846 of N_i and lose the range information. We may want to hold on to
4847 ASSERT_EXPRs a little while longer as the ranges could be used in
4848 things like jump threading.
4850 The problem with keeping ASSERT_EXPRs around is that passes after
4851 VRP need to handle them appropriately.
4853 Another approach would be to make the range information a first
4854 class property of the SSA_NAME so that it can be queried from
4855 any pass. This is made somewhat more complex by the need for
4856 multiple ranges to be associated with one SSA_NAME. */
4858 static void
4859 remove_range_assertions (void)
4861 basic_block bb;
4862 gimple_stmt_iterator si;
4863 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4864 a basic block preceeded by GIMPLE_COND branching to it and
4865 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4866 int is_unreachable;
4868 /* Note that the BSI iterator bump happens at the bottom of the
4869 loop and no bump is necessary if we're removing the statement
4870 referenced by the current BSI. */
4871 FOR_EACH_BB_FN (bb, cfun)
4872 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4874 gimple *stmt = gsi_stmt (si);
4876 if (is_gimple_assign (stmt)
4877 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4879 tree lhs = gimple_assign_lhs (stmt);
4880 tree rhs = gimple_assign_rhs1 (stmt);
4881 tree var;
4883 var = ASSERT_EXPR_VAR (rhs);
4885 if (TREE_CODE (var) == SSA_NAME
4886 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4887 && SSA_NAME_RANGE_INFO (lhs))
4889 if (is_unreachable == -1)
4891 is_unreachable = 0;
4892 if (single_pred_p (bb)
4893 && assert_unreachable_fallthru_edge_p
4894 (single_pred_edge (bb)))
4895 is_unreachable = 1;
4897 /* Handle
4898 if (x_7 >= 10 && x_7 < 20)
4899 __builtin_unreachable ();
4900 x_8 = ASSERT_EXPR <x_7, ...>;
4901 if the only uses of x_7 are in the ASSERT_EXPR and
4902 in the condition. In that case, we can copy the
4903 range info from x_8 computed in this pass also
4904 for x_7. */
4905 if (is_unreachable
4906 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4907 single_pred (bb)))
4909 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4910 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4911 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4912 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4916 /* Propagate the RHS into every use of the LHS. For SSA names
4917 also propagate abnormals as it merely restores the original
4918 IL in this case (an replace_uses_by would assert). */
4919 if (TREE_CODE (var) == SSA_NAME)
4921 imm_use_iterator iter;
4922 use_operand_p use_p;
4923 gimple *use_stmt;
4924 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4925 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4926 SET_USE (use_p, var);
4928 else
4929 replace_uses_by (lhs, var);
4931 /* And finally, remove the copy, it is not needed. */
4932 gsi_remove (&si, true);
4933 release_defs (stmt);
4935 else
4937 if (!is_gimple_debug (gsi_stmt (si)))
4938 is_unreachable = 0;
4939 gsi_next (&si);
4944 /* Return true if STMT is interesting for VRP. */
4946 bool
4947 stmt_interesting_for_vrp (gimple *stmt)
4949 if (gimple_code (stmt) == GIMPLE_PHI)
4951 tree res = gimple_phi_result (stmt);
4952 return (!virtual_operand_p (res)
4953 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4954 || POINTER_TYPE_P (TREE_TYPE (res))));
4956 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4958 tree lhs = gimple_get_lhs (stmt);
4960 /* In general, assignments with virtual operands are not useful
4961 for deriving ranges, with the obvious exception of calls to
4962 builtin functions. */
4963 if (lhs && TREE_CODE (lhs) == SSA_NAME
4964 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4965 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4966 && (is_gimple_call (stmt)
4967 || !gimple_vuse (stmt)))
4968 return true;
4969 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4970 switch (gimple_call_internal_fn (stmt))
4972 case IFN_ADD_OVERFLOW:
4973 case IFN_SUB_OVERFLOW:
4974 case IFN_MUL_OVERFLOW:
4975 case IFN_ATOMIC_COMPARE_EXCHANGE:
4976 /* These internal calls return _Complex integer type,
4977 but are interesting to VRP nevertheless. */
4978 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4979 return true;
4980 break;
4981 default:
4982 break;
4985 else if (gimple_code (stmt) == GIMPLE_COND
4986 || gimple_code (stmt) == GIMPLE_SWITCH)
4987 return true;
4989 return false;
4992 /* Initialization required by ssa_propagate engine. */
4994 void
4995 vrp_prop::vrp_initialize ()
4997 basic_block bb;
4999 FOR_EACH_BB_FN (bb, cfun)
5001 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5002 gsi_next (&si))
5004 gphi *phi = si.phi ();
5005 if (!stmt_interesting_for_vrp (phi))
5007 tree lhs = PHI_RESULT (phi);
5008 set_value_range_to_varying (get_value_range (lhs));
5009 prop_set_simulate_again (phi, false);
5011 else
5012 prop_set_simulate_again (phi, true);
5015 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5016 gsi_next (&si))
5018 gimple *stmt = gsi_stmt (si);
5020 /* If the statement is a control insn, then we do not
5021 want to avoid simulating the statement once. Failure
5022 to do so means that those edges will never get added. */
5023 if (stmt_ends_bb_p (stmt))
5024 prop_set_simulate_again (stmt, true);
5025 else if (!stmt_interesting_for_vrp (stmt))
5027 set_defs_to_varying (stmt);
5028 prop_set_simulate_again (stmt, false);
5030 else
5031 prop_set_simulate_again (stmt, true);
5036 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5037 that includes the value VAL. The search is restricted to the range
5038 [START_IDX, n - 1] where n is the size of VEC.
5040 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5041 returned.
5043 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5044 it is placed in IDX and false is returned.
5046 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5047 returned. */
5049 bool
5050 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5052 size_t n = gimple_switch_num_labels (stmt);
5053 size_t low, high;
5055 /* Find case label for minimum of the value range or the next one.
5056 At each iteration we are searching in [low, high - 1]. */
5058 for (low = start_idx, high = n; high != low; )
5060 tree t;
5061 int cmp;
5062 /* Note that i != high, so we never ask for n. */
5063 size_t i = (high + low) / 2;
5064 t = gimple_switch_label (stmt, i);
5066 /* Cache the result of comparing CASE_LOW and val. */
5067 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5069 if (cmp == 0)
5071 /* Ranges cannot be empty. */
5072 *idx = i;
5073 return true;
5075 else if (cmp > 0)
5076 high = i;
5077 else
5079 low = i + 1;
5080 if (CASE_HIGH (t) != NULL
5081 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5083 *idx = i;
5084 return true;
5089 *idx = high;
5090 return false;
5093 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5094 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5095 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5096 then MAX_IDX < MIN_IDX.
5097 Returns true if the default label is not needed. */
5099 bool
5100 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5101 size_t *max_idx)
5103 size_t i, j;
5104 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5105 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5107 if (i == j
5108 && min_take_default
5109 && max_take_default)
5111 /* Only the default case label reached.
5112 Return an empty range. */
5113 *min_idx = 1;
5114 *max_idx = 0;
5115 return false;
5117 else
5119 bool take_default = min_take_default || max_take_default;
5120 tree low, high;
5121 size_t k;
5123 if (max_take_default)
5124 j--;
5126 /* If the case label range is continuous, we do not need
5127 the default case label. Verify that. */
5128 high = CASE_LOW (gimple_switch_label (stmt, i));
5129 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5130 high = CASE_HIGH (gimple_switch_label (stmt, i));
5131 for (k = i + 1; k <= j; ++k)
5133 low = CASE_LOW (gimple_switch_label (stmt, k));
5134 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5136 take_default = true;
5137 break;
5139 high = low;
5140 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5141 high = CASE_HIGH (gimple_switch_label (stmt, k));
5144 *min_idx = i;
5145 *max_idx = j;
5146 return !take_default;
5150 /* Evaluate statement STMT. If the statement produces a useful range,
5151 return SSA_PROP_INTERESTING and record the SSA name with the
5152 interesting range into *OUTPUT_P.
5154 If STMT is a conditional branch and we can determine its truth
5155 value, the taken edge is recorded in *TAKEN_EDGE_P.
5157 If STMT produces a varying value, return SSA_PROP_VARYING. */
5159 enum ssa_prop_result
5160 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5162 value_range vr = VR_INITIALIZER;
5163 tree lhs = gimple_get_lhs (stmt);
5164 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5166 if (*output_p)
5168 if (update_value_range (*output_p, &vr))
5170 if (dump_file && (dump_flags & TDF_DETAILS))
5172 fprintf (dump_file, "Found new range for ");
5173 print_generic_expr (dump_file, *output_p);
5174 fprintf (dump_file, ": ");
5175 dump_value_range (dump_file, &vr);
5176 fprintf (dump_file, "\n");
5179 if (vr.type == VR_VARYING)
5180 return SSA_PROP_VARYING;
5182 return SSA_PROP_INTERESTING;
5184 return SSA_PROP_NOT_INTERESTING;
5187 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5188 switch (gimple_call_internal_fn (stmt))
5190 case IFN_ADD_OVERFLOW:
5191 case IFN_SUB_OVERFLOW:
5192 case IFN_MUL_OVERFLOW:
5193 case IFN_ATOMIC_COMPARE_EXCHANGE:
5194 /* These internal calls return _Complex integer type,
5195 which VRP does not track, but the immediate uses
5196 thereof might be interesting. */
5197 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5199 imm_use_iterator iter;
5200 use_operand_p use_p;
5201 enum ssa_prop_result res = SSA_PROP_VARYING;
5203 set_value_range_to_varying (get_value_range (lhs));
5205 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5207 gimple *use_stmt = USE_STMT (use_p);
5208 if (!is_gimple_assign (use_stmt))
5209 continue;
5210 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5211 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5212 continue;
5213 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5214 tree use_lhs = gimple_assign_lhs (use_stmt);
5215 if (TREE_CODE (rhs1) != rhs_code
5216 || TREE_OPERAND (rhs1, 0) != lhs
5217 || TREE_CODE (use_lhs) != SSA_NAME
5218 || !stmt_interesting_for_vrp (use_stmt)
5219 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5220 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5221 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5222 continue;
5224 /* If there is a change in the value range for any of the
5225 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5226 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5227 or IMAGPART_EXPR immediate uses, but none of them have
5228 a change in their value ranges, return
5229 SSA_PROP_NOT_INTERESTING. If there are no
5230 {REAL,IMAG}PART_EXPR uses at all,
5231 return SSA_PROP_VARYING. */
5232 value_range new_vr = VR_INITIALIZER;
5233 extract_range_basic (&new_vr, use_stmt);
5234 const value_range *old_vr = get_value_range (use_lhs);
5235 if (old_vr->type != new_vr.type
5236 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5237 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5238 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5239 res = SSA_PROP_INTERESTING;
5240 else
5241 res = SSA_PROP_NOT_INTERESTING;
5242 BITMAP_FREE (new_vr.equiv);
5243 if (res == SSA_PROP_INTERESTING)
5245 *output_p = lhs;
5246 return res;
5250 return res;
5252 break;
5253 default:
5254 break;
5257 /* All other statements produce nothing of interest for VRP, so mark
5258 their outputs varying and prevent further simulation. */
5259 set_defs_to_varying (stmt);
5261 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5264 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5265 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5266 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5267 possible such range. The resulting range is not canonicalized. */
5269 static void
5270 union_ranges (enum value_range_type *vr0type,
5271 tree *vr0min, tree *vr0max,
5272 enum value_range_type vr1type,
5273 tree vr1min, tree vr1max)
5275 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5276 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5278 /* [] is vr0, () is vr1 in the following classification comments. */
5279 if (mineq && maxeq)
5281 /* [( )] */
5282 if (*vr0type == vr1type)
5283 /* Nothing to do for equal ranges. */
5285 else if ((*vr0type == VR_RANGE
5286 && vr1type == VR_ANTI_RANGE)
5287 || (*vr0type == VR_ANTI_RANGE
5288 && vr1type == VR_RANGE))
5290 /* For anti-range with range union the result is varying. */
5291 goto give_up;
5293 else
5294 gcc_unreachable ();
5296 else if (operand_less_p (*vr0max, vr1min) == 1
5297 || operand_less_p (vr1max, *vr0min) == 1)
5299 /* [ ] ( ) or ( ) [ ]
5300 If the ranges have an empty intersection, result of the union
5301 operation is the anti-range or if both are anti-ranges
5302 it covers all. */
5303 if (*vr0type == VR_ANTI_RANGE
5304 && vr1type == VR_ANTI_RANGE)
5305 goto give_up;
5306 else if (*vr0type == VR_ANTI_RANGE
5307 && vr1type == VR_RANGE)
5309 else if (*vr0type == VR_RANGE
5310 && vr1type == VR_ANTI_RANGE)
5312 *vr0type = vr1type;
5313 *vr0min = vr1min;
5314 *vr0max = vr1max;
5316 else if (*vr0type == VR_RANGE
5317 && vr1type == VR_RANGE)
5319 /* The result is the convex hull of both ranges. */
5320 if (operand_less_p (*vr0max, vr1min) == 1)
5322 /* If the result can be an anti-range, create one. */
5323 if (TREE_CODE (*vr0max) == INTEGER_CST
5324 && TREE_CODE (vr1min) == INTEGER_CST
5325 && vrp_val_is_min (*vr0min)
5326 && vrp_val_is_max (vr1max))
5328 tree min = int_const_binop (PLUS_EXPR,
5329 *vr0max,
5330 build_int_cst (TREE_TYPE (*vr0max), 1));
5331 tree max = int_const_binop (MINUS_EXPR,
5332 vr1min,
5333 build_int_cst (TREE_TYPE (vr1min), 1));
5334 if (!operand_less_p (max, min))
5336 *vr0type = VR_ANTI_RANGE;
5337 *vr0min = min;
5338 *vr0max = max;
5340 else
5341 *vr0max = vr1max;
5343 else
5344 *vr0max = vr1max;
5346 else
5348 /* If the result can be an anti-range, create one. */
5349 if (TREE_CODE (vr1max) == INTEGER_CST
5350 && TREE_CODE (*vr0min) == INTEGER_CST
5351 && vrp_val_is_min (vr1min)
5352 && vrp_val_is_max (*vr0max))
5354 tree min = int_const_binop (PLUS_EXPR,
5355 vr1max,
5356 build_int_cst (TREE_TYPE (vr1max), 1));
5357 tree max = int_const_binop (MINUS_EXPR,
5358 *vr0min,
5359 build_int_cst (TREE_TYPE (*vr0min), 1));
5360 if (!operand_less_p (max, min))
5362 *vr0type = VR_ANTI_RANGE;
5363 *vr0min = min;
5364 *vr0max = max;
5366 else
5367 *vr0min = vr1min;
5369 else
5370 *vr0min = vr1min;
5373 else
5374 gcc_unreachable ();
5376 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5377 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5379 /* [ ( ) ] or [( ) ] or [ ( )] */
5380 if (*vr0type == VR_RANGE
5381 && vr1type == VR_RANGE)
5383 else if (*vr0type == VR_ANTI_RANGE
5384 && vr1type == VR_ANTI_RANGE)
5386 *vr0type = vr1type;
5387 *vr0min = vr1min;
5388 *vr0max = vr1max;
5390 else if (*vr0type == VR_ANTI_RANGE
5391 && vr1type == VR_RANGE)
5393 /* Arbitrarily choose the right or left gap. */
5394 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5395 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5396 build_int_cst (TREE_TYPE (vr1min), 1));
5397 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5398 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5399 build_int_cst (TREE_TYPE (vr1max), 1));
5400 else
5401 goto give_up;
5403 else if (*vr0type == VR_RANGE
5404 && vr1type == VR_ANTI_RANGE)
5405 /* The result covers everything. */
5406 goto give_up;
5407 else
5408 gcc_unreachable ();
5410 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5411 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5413 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5414 if (*vr0type == VR_RANGE
5415 && vr1type == VR_RANGE)
5417 *vr0type = vr1type;
5418 *vr0min = vr1min;
5419 *vr0max = vr1max;
5421 else if (*vr0type == VR_ANTI_RANGE
5422 && vr1type == VR_ANTI_RANGE)
5424 else if (*vr0type == VR_RANGE
5425 && vr1type == VR_ANTI_RANGE)
5427 *vr0type = VR_ANTI_RANGE;
5428 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5430 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5431 build_int_cst (TREE_TYPE (*vr0min), 1));
5432 *vr0min = vr1min;
5434 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5436 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5437 build_int_cst (TREE_TYPE (*vr0max), 1));
5438 *vr0max = vr1max;
5440 else
5441 goto give_up;
5443 else if (*vr0type == VR_ANTI_RANGE
5444 && vr1type == VR_RANGE)
5445 /* The result covers everything. */
5446 goto give_up;
5447 else
5448 gcc_unreachable ();
5450 else if ((operand_less_p (vr1min, *vr0max) == 1
5451 || operand_equal_p (vr1min, *vr0max, 0))
5452 && operand_less_p (*vr0min, vr1min) == 1
5453 && operand_less_p (*vr0max, vr1max) == 1)
5455 /* [ ( ] ) or [ ]( ) */
5456 if (*vr0type == VR_RANGE
5457 && vr1type == VR_RANGE)
5458 *vr0max = vr1max;
5459 else if (*vr0type == VR_ANTI_RANGE
5460 && vr1type == VR_ANTI_RANGE)
5461 *vr0min = vr1min;
5462 else if (*vr0type == VR_ANTI_RANGE
5463 && vr1type == VR_RANGE)
5465 if (TREE_CODE (vr1min) == INTEGER_CST)
5466 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5467 build_int_cst (TREE_TYPE (vr1min), 1));
5468 else
5469 goto give_up;
5471 else if (*vr0type == VR_RANGE
5472 && vr1type == VR_ANTI_RANGE)
5474 if (TREE_CODE (*vr0max) == INTEGER_CST)
5476 *vr0type = vr1type;
5477 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5478 build_int_cst (TREE_TYPE (*vr0max), 1));
5479 *vr0max = vr1max;
5481 else
5482 goto give_up;
5484 else
5485 gcc_unreachable ();
5487 else if ((operand_less_p (*vr0min, vr1max) == 1
5488 || operand_equal_p (*vr0min, vr1max, 0))
5489 && operand_less_p (vr1min, *vr0min) == 1
5490 && operand_less_p (vr1max, *vr0max) == 1)
5492 /* ( [ ) ] or ( )[ ] */
5493 if (*vr0type == VR_RANGE
5494 && vr1type == VR_RANGE)
5495 *vr0min = vr1min;
5496 else if (*vr0type == VR_ANTI_RANGE
5497 && vr1type == VR_ANTI_RANGE)
5498 *vr0max = vr1max;
5499 else if (*vr0type == VR_ANTI_RANGE
5500 && vr1type == VR_RANGE)
5502 if (TREE_CODE (vr1max) == INTEGER_CST)
5503 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5504 build_int_cst (TREE_TYPE (vr1max), 1));
5505 else
5506 goto give_up;
5508 else if (*vr0type == VR_RANGE
5509 && vr1type == VR_ANTI_RANGE)
5511 if (TREE_CODE (*vr0min) == INTEGER_CST)
5513 *vr0type = vr1type;
5514 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5515 build_int_cst (TREE_TYPE (*vr0min), 1));
5516 *vr0min = vr1min;
5518 else
5519 goto give_up;
5521 else
5522 gcc_unreachable ();
5524 else
5525 goto give_up;
5527 return;
5529 give_up:
5530 *vr0type = VR_VARYING;
5531 *vr0min = NULL_TREE;
5532 *vr0max = NULL_TREE;
5535 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5536 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5537 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5538 possible such range. The resulting range is not canonicalized. */
5540 static void
5541 intersect_ranges (enum value_range_type *vr0type,
5542 tree *vr0min, tree *vr0max,
5543 enum value_range_type vr1type,
5544 tree vr1min, tree vr1max)
5546 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5547 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5549 /* [] is vr0, () is vr1 in the following classification comments. */
5550 if (mineq && maxeq)
5552 /* [( )] */
5553 if (*vr0type == vr1type)
5554 /* Nothing to do for equal ranges. */
5556 else if ((*vr0type == VR_RANGE
5557 && vr1type == VR_ANTI_RANGE)
5558 || (*vr0type == VR_ANTI_RANGE
5559 && vr1type == VR_RANGE))
5561 /* For anti-range with range intersection the result is empty. */
5562 *vr0type = VR_UNDEFINED;
5563 *vr0min = NULL_TREE;
5564 *vr0max = NULL_TREE;
5566 else
5567 gcc_unreachable ();
5569 else if (operand_less_p (*vr0max, vr1min) == 1
5570 || operand_less_p (vr1max, *vr0min) == 1)
5572 /* [ ] ( ) or ( ) [ ]
5573 If the ranges have an empty intersection, the result of the
5574 intersect operation is the range for intersecting an
5575 anti-range with a range or empty when intersecting two ranges. */
5576 if (*vr0type == VR_RANGE
5577 && vr1type == VR_ANTI_RANGE)
5579 else if (*vr0type == VR_ANTI_RANGE
5580 && vr1type == VR_RANGE)
5582 *vr0type = vr1type;
5583 *vr0min = vr1min;
5584 *vr0max = vr1max;
5586 else if (*vr0type == VR_RANGE
5587 && vr1type == VR_RANGE)
5589 *vr0type = VR_UNDEFINED;
5590 *vr0min = NULL_TREE;
5591 *vr0max = NULL_TREE;
5593 else if (*vr0type == VR_ANTI_RANGE
5594 && vr1type == VR_ANTI_RANGE)
5596 /* If the anti-ranges are adjacent to each other merge them. */
5597 if (TREE_CODE (*vr0max) == INTEGER_CST
5598 && TREE_CODE (vr1min) == INTEGER_CST
5599 && operand_less_p (*vr0max, vr1min) == 1
5600 && integer_onep (int_const_binop (MINUS_EXPR,
5601 vr1min, *vr0max)))
5602 *vr0max = vr1max;
5603 else if (TREE_CODE (vr1max) == INTEGER_CST
5604 && TREE_CODE (*vr0min) == INTEGER_CST
5605 && operand_less_p (vr1max, *vr0min) == 1
5606 && integer_onep (int_const_binop (MINUS_EXPR,
5607 *vr0min, vr1max)))
5608 *vr0min = vr1min;
5609 /* Else arbitrarily take VR0. */
5612 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5613 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5615 /* [ ( ) ] or [( ) ] or [ ( )] */
5616 if (*vr0type == VR_RANGE
5617 && vr1type == VR_RANGE)
5619 /* If both are ranges the result is the inner one. */
5620 *vr0type = vr1type;
5621 *vr0min = vr1min;
5622 *vr0max = vr1max;
5624 else if (*vr0type == VR_RANGE
5625 && vr1type == VR_ANTI_RANGE)
5627 /* Choose the right gap if the left one is empty. */
5628 if (mineq)
5630 if (TREE_CODE (vr1max) != INTEGER_CST)
5631 *vr0min = vr1max;
5632 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5633 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5634 *vr0min
5635 = int_const_binop (MINUS_EXPR, vr1max,
5636 build_int_cst (TREE_TYPE (vr1max), -1));
5637 else
5638 *vr0min
5639 = int_const_binop (PLUS_EXPR, vr1max,
5640 build_int_cst (TREE_TYPE (vr1max), 1));
5642 /* Choose the left gap if the right one is empty. */
5643 else if (maxeq)
5645 if (TREE_CODE (vr1min) != INTEGER_CST)
5646 *vr0max = vr1min;
5647 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5648 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5649 *vr0max
5650 = int_const_binop (PLUS_EXPR, vr1min,
5651 build_int_cst (TREE_TYPE (vr1min), -1));
5652 else
5653 *vr0max
5654 = int_const_binop (MINUS_EXPR, vr1min,
5655 build_int_cst (TREE_TYPE (vr1min), 1));
5657 /* Choose the anti-range if the range is effectively varying. */
5658 else if (vrp_val_is_min (*vr0min)
5659 && vrp_val_is_max (*vr0max))
5661 *vr0type = vr1type;
5662 *vr0min = vr1min;
5663 *vr0max = vr1max;
5665 /* Else choose the range. */
5667 else if (*vr0type == VR_ANTI_RANGE
5668 && vr1type == VR_ANTI_RANGE)
5669 /* If both are anti-ranges the result is the outer one. */
5671 else if (*vr0type == VR_ANTI_RANGE
5672 && vr1type == VR_RANGE)
5674 /* The intersection is empty. */
5675 *vr0type = VR_UNDEFINED;
5676 *vr0min = NULL_TREE;
5677 *vr0max = NULL_TREE;
5679 else
5680 gcc_unreachable ();
5682 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5683 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5685 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5686 if (*vr0type == VR_RANGE
5687 && vr1type == VR_RANGE)
5688 /* Choose the inner range. */
5690 else if (*vr0type == VR_ANTI_RANGE
5691 && vr1type == VR_RANGE)
5693 /* Choose the right gap if the left is empty. */
5694 if (mineq)
5696 *vr0type = VR_RANGE;
5697 if (TREE_CODE (*vr0max) != INTEGER_CST)
5698 *vr0min = *vr0max;
5699 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5700 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5701 *vr0min
5702 = int_const_binop (MINUS_EXPR, *vr0max,
5703 build_int_cst (TREE_TYPE (*vr0max), -1));
5704 else
5705 *vr0min
5706 = int_const_binop (PLUS_EXPR, *vr0max,
5707 build_int_cst (TREE_TYPE (*vr0max), 1));
5708 *vr0max = vr1max;
5710 /* Choose the left gap if the right is empty. */
5711 else if (maxeq)
5713 *vr0type = VR_RANGE;
5714 if (TREE_CODE (*vr0min) != INTEGER_CST)
5715 *vr0max = *vr0min;
5716 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5717 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5718 *vr0max
5719 = int_const_binop (PLUS_EXPR, *vr0min,
5720 build_int_cst (TREE_TYPE (*vr0min), -1));
5721 else
5722 *vr0max
5723 = int_const_binop (MINUS_EXPR, *vr0min,
5724 build_int_cst (TREE_TYPE (*vr0min), 1));
5725 *vr0min = vr1min;
5727 /* Choose the anti-range if the range is effectively varying. */
5728 else if (vrp_val_is_min (vr1min)
5729 && vrp_val_is_max (vr1max))
5731 /* Choose the anti-range if it is ~[0,0], that range is special
5732 enough to special case when vr1's range is relatively wide.
5733 At least for types bigger than int - this covers pointers
5734 and arguments to functions like ctz. */
5735 else if (*vr0min == *vr0max
5736 && integer_zerop (*vr0min)
5737 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5738 >= TYPE_PRECISION (integer_type_node))
5739 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5740 && TREE_CODE (vr1max) == INTEGER_CST
5741 && TREE_CODE (vr1min) == INTEGER_CST
5742 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5743 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5745 /* Else choose the range. */
5746 else
5748 *vr0type = vr1type;
5749 *vr0min = vr1min;
5750 *vr0max = vr1max;
5753 else if (*vr0type == VR_ANTI_RANGE
5754 && vr1type == VR_ANTI_RANGE)
5756 /* If both are anti-ranges the result is the outer one. */
5757 *vr0type = vr1type;
5758 *vr0min = vr1min;
5759 *vr0max = vr1max;
5761 else if (vr1type == VR_ANTI_RANGE
5762 && *vr0type == VR_RANGE)
5764 /* The intersection is empty. */
5765 *vr0type = VR_UNDEFINED;
5766 *vr0min = NULL_TREE;
5767 *vr0max = NULL_TREE;
5769 else
5770 gcc_unreachable ();
5772 else if ((operand_less_p (vr1min, *vr0max) == 1
5773 || operand_equal_p (vr1min, *vr0max, 0))
5774 && operand_less_p (*vr0min, vr1min) == 1)
5776 /* [ ( ] ) or [ ]( ) */
5777 if (*vr0type == VR_ANTI_RANGE
5778 && vr1type == VR_ANTI_RANGE)
5779 *vr0max = vr1max;
5780 else if (*vr0type == VR_RANGE
5781 && vr1type == VR_RANGE)
5782 *vr0min = vr1min;
5783 else if (*vr0type == VR_RANGE
5784 && vr1type == VR_ANTI_RANGE)
5786 if (TREE_CODE (vr1min) == INTEGER_CST)
5787 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5788 build_int_cst (TREE_TYPE (vr1min), 1));
5789 else
5790 *vr0max = vr1min;
5792 else if (*vr0type == VR_ANTI_RANGE
5793 && vr1type == VR_RANGE)
5795 *vr0type = VR_RANGE;
5796 if (TREE_CODE (*vr0max) == INTEGER_CST)
5797 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5798 build_int_cst (TREE_TYPE (*vr0max), 1));
5799 else
5800 *vr0min = *vr0max;
5801 *vr0max = vr1max;
5803 else
5804 gcc_unreachable ();
5806 else if ((operand_less_p (*vr0min, vr1max) == 1
5807 || operand_equal_p (*vr0min, vr1max, 0))
5808 && operand_less_p (vr1min, *vr0min) == 1)
5810 /* ( [ ) ] or ( )[ ] */
5811 if (*vr0type == VR_ANTI_RANGE
5812 && vr1type == VR_ANTI_RANGE)
5813 *vr0min = vr1min;
5814 else if (*vr0type == VR_RANGE
5815 && vr1type == VR_RANGE)
5816 *vr0max = vr1max;
5817 else if (*vr0type == VR_RANGE
5818 && vr1type == VR_ANTI_RANGE)
5820 if (TREE_CODE (vr1max) == INTEGER_CST)
5821 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5822 build_int_cst (TREE_TYPE (vr1max), 1));
5823 else
5824 *vr0min = vr1max;
5826 else if (*vr0type == VR_ANTI_RANGE
5827 && vr1type == VR_RANGE)
5829 *vr0type = VR_RANGE;
5830 if (TREE_CODE (*vr0min) == INTEGER_CST)
5831 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5832 build_int_cst (TREE_TYPE (*vr0min), 1));
5833 else
5834 *vr0max = *vr0min;
5835 *vr0min = vr1min;
5837 else
5838 gcc_unreachable ();
5841 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5842 result for the intersection. That's always a conservative
5843 correct estimate unless VR1 is a constant singleton range
5844 in which case we choose that. */
5845 if (vr1type == VR_RANGE
5846 && is_gimple_min_invariant (vr1min)
5847 && vrp_operand_equal_p (vr1min, vr1max))
5849 *vr0type = vr1type;
5850 *vr0min = vr1min;
5851 *vr0max = vr1max;
5854 return;
5858 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
5859 in *VR0. This may not be the smallest possible such range. */
5861 static void
5862 vrp_intersect_ranges_1 (value_range *vr0, const value_range *vr1)
5864 value_range saved;
5866 /* If either range is VR_VARYING the other one wins. */
5867 if (vr1->type == VR_VARYING)
5868 return;
5869 if (vr0->type == VR_VARYING)
5871 copy_value_range (vr0, vr1);
5872 return;
5875 /* When either range is VR_UNDEFINED the resulting range is
5876 VR_UNDEFINED, too. */
5877 if (vr0->type == VR_UNDEFINED)
5878 return;
5879 if (vr1->type == VR_UNDEFINED)
5881 set_value_range_to_undefined (vr0);
5882 return;
5885 /* Save the original vr0 so we can return it as conservative intersection
5886 result when our worker turns things to varying. */
5887 saved = *vr0;
5888 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
5889 vr1->type, vr1->min, vr1->max);
5890 /* Make sure to canonicalize the result though as the inversion of a
5891 VR_RANGE can still be a VR_RANGE. */
5892 set_and_canonicalize_value_range (vr0, vr0->type,
5893 vr0->min, vr0->max, vr0->equiv);
5894 /* If that failed, use the saved original VR0. */
5895 if (vr0->type == VR_VARYING)
5897 *vr0 = saved;
5898 return;
5900 /* If the result is VR_UNDEFINED there is no need to mess with
5901 the equivalencies. */
5902 if (vr0->type == VR_UNDEFINED)
5903 return;
5905 /* The resulting set of equivalences for range intersection is the union of
5906 the two sets. */
5907 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5908 bitmap_ior_into (vr0->equiv, vr1->equiv);
5909 else if (vr1->equiv && !vr0->equiv)
5911 /* All equivalence bitmaps are allocated from the same obstack. So
5912 we can use the obstack associated with VR to allocate vr0->equiv. */
5913 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
5914 bitmap_copy (vr0->equiv, vr1->equiv);
5918 void
5919 vrp_intersect_ranges (value_range *vr0, const value_range *vr1)
5921 if (dump_file && (dump_flags & TDF_DETAILS))
5923 fprintf (dump_file, "Intersecting\n ");
5924 dump_value_range (dump_file, vr0);
5925 fprintf (dump_file, "\nand\n ");
5926 dump_value_range (dump_file, vr1);
5927 fprintf (dump_file, "\n");
5929 vrp_intersect_ranges_1 (vr0, vr1);
5930 if (dump_file && (dump_flags & TDF_DETAILS))
5932 fprintf (dump_file, "to\n ");
5933 dump_value_range (dump_file, vr0);
5934 fprintf (dump_file, "\n");
5938 /* Meet operation for value ranges. Given two value ranges VR0 and
5939 VR1, store in VR0 a range that contains both VR0 and VR1. This
5940 may not be the smallest possible such range. */
5942 static void
5943 vrp_meet_1 (value_range *vr0, const value_range *vr1)
5945 value_range saved;
5947 if (vr0->type == VR_UNDEFINED)
5949 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
5950 return;
5953 if (vr1->type == VR_UNDEFINED)
5955 /* VR0 already has the resulting range. */
5956 return;
5959 if (vr0->type == VR_VARYING)
5961 /* Nothing to do. VR0 already has the resulting range. */
5962 return;
5965 if (vr1->type == VR_VARYING)
5967 set_value_range_to_varying (vr0);
5968 return;
5971 saved = *vr0;
5972 union_ranges (&vr0->type, &vr0->min, &vr0->max,
5973 vr1->type, vr1->min, vr1->max);
5974 if (vr0->type == VR_VARYING)
5976 /* Failed to find an efficient meet. Before giving up and setting
5977 the result to VARYING, see if we can at least derive a useful
5978 anti-range. */
5979 if (range_includes_zero_p (&saved) == 0
5980 && range_includes_zero_p (vr1) == 0)
5982 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
5984 /* Since this meet operation did not result from the meeting of
5985 two equivalent names, VR0 cannot have any equivalences. */
5986 if (vr0->equiv)
5987 bitmap_clear (vr0->equiv);
5988 return;
5991 set_value_range_to_varying (vr0);
5992 return;
5994 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
5995 vr0->equiv);
5996 if (vr0->type == VR_VARYING)
5997 return;
5999 /* The resulting set of equivalences is always the intersection of
6000 the two sets. */
6001 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6002 bitmap_and_into (vr0->equiv, vr1->equiv);
6003 else if (vr0->equiv && !vr1->equiv)
6004 bitmap_clear (vr0->equiv);
6007 void
6008 vrp_meet (value_range *vr0, const value_range *vr1)
6010 if (dump_file && (dump_flags & TDF_DETAILS))
6012 fprintf (dump_file, "Meeting\n ");
6013 dump_value_range (dump_file, vr0);
6014 fprintf (dump_file, "\nand\n ");
6015 dump_value_range (dump_file, vr1);
6016 fprintf (dump_file, "\n");
6018 vrp_meet_1 (vr0, vr1);
6019 if (dump_file && (dump_flags & TDF_DETAILS))
6021 fprintf (dump_file, "to\n ");
6022 dump_value_range (dump_file, vr0);
6023 fprintf (dump_file, "\n");
6028 /* Visit all arguments for PHI node PHI that flow through executable
6029 edges. If a valid value range can be derived from all the incoming
6030 value ranges, set a new range for the LHS of PHI. */
6032 enum ssa_prop_result
6033 vrp_prop::visit_phi (gphi *phi)
6035 tree lhs = PHI_RESULT (phi);
6036 value_range vr_result = VR_INITIALIZER;
6037 extract_range_from_phi_node (phi, &vr_result);
6038 if (update_value_range (lhs, &vr_result))
6040 if (dump_file && (dump_flags & TDF_DETAILS))
6042 fprintf (dump_file, "Found new range for ");
6043 print_generic_expr (dump_file, lhs);
6044 fprintf (dump_file, ": ");
6045 dump_value_range (dump_file, &vr_result);
6046 fprintf (dump_file, "\n");
6049 if (vr_result.type == VR_VARYING)
6050 return SSA_PROP_VARYING;
6052 return SSA_PROP_INTERESTING;
6055 /* Nothing changed, don't add outgoing edges. */
6056 return SSA_PROP_NOT_INTERESTING;
6059 class vrp_folder : public substitute_and_fold_engine
6061 public:
6062 tree get_value (tree) FINAL OVERRIDE;
6063 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6064 bool fold_predicate_in (gimple_stmt_iterator *);
6066 class vr_values *vr_values;
6068 /* Delegators. */
6069 tree vrp_evaluate_conditional (tree_code code, tree op0,
6070 tree op1, gimple *stmt)
6071 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6072 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6073 { return vr_values->simplify_stmt_using_ranges (gsi); }
6074 tree op_with_constant_singleton_value_range (tree op)
6075 { return vr_values->op_with_constant_singleton_value_range (op); }
6078 /* If the statement pointed by SI has a predicate whose value can be
6079 computed using the value range information computed by VRP, compute
6080 its value and return true. Otherwise, return false. */
6082 bool
6083 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6085 bool assignment_p = false;
6086 tree val;
6087 gimple *stmt = gsi_stmt (*si);
6089 if (is_gimple_assign (stmt)
6090 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6092 assignment_p = true;
6093 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6094 gimple_assign_rhs1 (stmt),
6095 gimple_assign_rhs2 (stmt),
6096 stmt);
6098 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6099 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6100 gimple_cond_lhs (cond_stmt),
6101 gimple_cond_rhs (cond_stmt),
6102 stmt);
6103 else
6104 return false;
6106 if (val)
6108 if (assignment_p)
6109 val = fold_convert (gimple_expr_type (stmt), val);
6111 if (dump_file)
6113 fprintf (dump_file, "Folding predicate ");
6114 print_gimple_expr (dump_file, stmt, 0);
6115 fprintf (dump_file, " to ");
6116 print_generic_expr (dump_file, val);
6117 fprintf (dump_file, "\n");
6120 if (is_gimple_assign (stmt))
6121 gimple_assign_set_rhs_from_tree (si, val);
6122 else
6124 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6125 gcond *cond_stmt = as_a <gcond *> (stmt);
6126 if (integer_zerop (val))
6127 gimple_cond_make_false (cond_stmt);
6128 else if (integer_onep (val))
6129 gimple_cond_make_true (cond_stmt);
6130 else
6131 gcc_unreachable ();
6134 return true;
6137 return false;
6140 /* Callback for substitute_and_fold folding the stmt at *SI. */
6142 bool
6143 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6145 if (fold_predicate_in (si))
6146 return true;
6148 return simplify_stmt_using_ranges (si);
6151 /* If OP has a value range with a single constant value return that,
6152 otherwise return NULL_TREE. This returns OP itself if OP is a
6153 constant.
6155 Implemented as a pure wrapper right now, but this will change. */
6157 tree
6158 vrp_folder::get_value (tree op)
6160 return op_with_constant_singleton_value_range (op);
6163 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6164 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6165 BB. If no such ASSERT_EXPR is found, return OP. */
6167 static tree
6168 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6170 imm_use_iterator imm_iter;
6171 gimple *use_stmt;
6172 use_operand_p use_p;
6174 if (TREE_CODE (op) == SSA_NAME)
6176 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6178 use_stmt = USE_STMT (use_p);
6179 if (use_stmt != stmt
6180 && gimple_assign_single_p (use_stmt)
6181 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6182 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6183 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6184 return gimple_assign_lhs (use_stmt);
6187 return op;
6190 /* A hack. */
6191 static class vr_values *x_vr_values;
6193 /* A trivial wrapper so that we can present the generic jump threading
6194 code with a simple API for simplifying statements. STMT is the
6195 statement we want to simplify, WITHIN_STMT provides the location
6196 for any overflow warnings. */
6198 static tree
6199 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6200 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6201 basic_block bb)
6203 /* First see if the conditional is in the hash table. */
6204 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6205 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6206 return cached_lhs;
6208 vr_values *vr_values = x_vr_values;
6209 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6211 tree op0 = gimple_cond_lhs (cond_stmt);
6212 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6214 tree op1 = gimple_cond_rhs (cond_stmt);
6215 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6217 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6218 op0, op1, within_stmt);
6221 /* We simplify a switch statement by trying to determine which case label
6222 will be taken. If we are successful then we return the corresponding
6223 CASE_LABEL_EXPR. */
6224 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6226 tree op = gimple_switch_index (switch_stmt);
6227 if (TREE_CODE (op) != SSA_NAME)
6228 return NULL_TREE;
6230 op = lhs_of_dominating_assert (op, bb, stmt);
6232 const value_range *vr = vr_values->get_value_range (op);
6233 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6234 || symbolic_range_p (vr))
6235 return NULL_TREE;
6237 if (vr->type == VR_RANGE)
6239 size_t i, j;
6240 /* Get the range of labels that contain a part of the operand's
6241 value range. */
6242 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6244 /* Is there only one such label? */
6245 if (i == j)
6247 tree label = gimple_switch_label (switch_stmt, i);
6249 /* The i'th label will be taken only if the value range of the
6250 operand is entirely within the bounds of this label. */
6251 if (CASE_HIGH (label) != NULL_TREE
6252 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6253 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6254 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6255 && tree_int_cst_equal (vr->min, vr->max)))
6256 return label;
6259 /* If there are no such labels then the default label will be
6260 taken. */
6261 if (i > j)
6262 return gimple_switch_label (switch_stmt, 0);
6265 if (vr->type == VR_ANTI_RANGE)
6267 unsigned n = gimple_switch_num_labels (switch_stmt);
6268 tree min_label = gimple_switch_label (switch_stmt, 1);
6269 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6271 /* The default label will be taken only if the anti-range of the
6272 operand is entirely outside the bounds of all the (non-default)
6273 case labels. */
6274 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6275 && (CASE_HIGH (max_label) != NULL_TREE
6276 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6277 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6278 return gimple_switch_label (switch_stmt, 0);
6281 return NULL_TREE;
6284 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6286 tree lhs = gimple_assign_lhs (assign_stmt);
6287 if (TREE_CODE (lhs) == SSA_NAME
6288 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6289 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6290 && stmt_interesting_for_vrp (stmt))
6292 edge dummy_e;
6293 tree dummy_tree;
6294 value_range new_vr = VR_INITIALIZER;
6295 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6296 &dummy_tree, &new_vr);
6297 if (range_int_cst_singleton_p (&new_vr))
6298 return new_vr.min;
6302 return NULL_TREE;
6305 class vrp_dom_walker : public dom_walker
6307 public:
6308 vrp_dom_walker (cdi_direction direction,
6309 class const_and_copies *const_and_copies,
6310 class avail_exprs_stack *avail_exprs_stack)
6311 : dom_walker (direction, REACHABLE_BLOCKS),
6312 m_const_and_copies (const_and_copies),
6313 m_avail_exprs_stack (avail_exprs_stack),
6314 m_dummy_cond (NULL) {}
6316 virtual edge before_dom_children (basic_block);
6317 virtual void after_dom_children (basic_block);
6319 class vr_values *vr_values;
6321 private:
6322 class const_and_copies *m_const_and_copies;
6323 class avail_exprs_stack *m_avail_exprs_stack;
6325 gcond *m_dummy_cond;
6329 /* Called before processing dominator children of BB. We want to look
6330 at ASSERT_EXPRs and record information from them in the appropriate
6331 tables.
6333 We could look at other statements here. It's not seen as likely
6334 to significantly increase the jump threads we discover. */
6336 edge
6337 vrp_dom_walker::before_dom_children (basic_block bb)
6339 gimple_stmt_iterator gsi;
6341 m_avail_exprs_stack->push_marker ();
6342 m_const_and_copies->push_marker ();
6343 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6345 gimple *stmt = gsi_stmt (gsi);
6346 if (gimple_assign_single_p (stmt)
6347 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6349 tree rhs1 = gimple_assign_rhs1 (stmt);
6350 tree cond = TREE_OPERAND (rhs1, 1);
6351 tree inverted = invert_truthvalue (cond);
6352 vec<cond_equivalence> p;
6353 p.create (3);
6354 record_conditions (&p, cond, inverted);
6355 for (unsigned int i = 0; i < p.length (); i++)
6356 m_avail_exprs_stack->record_cond (&p[i]);
6358 tree lhs = gimple_assign_lhs (stmt);
6359 m_const_and_copies->record_const_or_copy (lhs,
6360 TREE_OPERAND (rhs1, 0));
6361 p.release ();
6362 continue;
6364 break;
6366 return NULL;
6369 /* Called after processing dominator children of BB. This is where we
6370 actually call into the threader. */
6371 void
6372 vrp_dom_walker::after_dom_children (basic_block bb)
6374 if (!m_dummy_cond)
6375 m_dummy_cond = gimple_build_cond (NE_EXPR,
6376 integer_zero_node, integer_zero_node,
6377 NULL, NULL);
6379 x_vr_values = vr_values;
6380 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6381 m_avail_exprs_stack, NULL,
6382 simplify_stmt_for_jump_threading);
6383 x_vr_values = NULL;
6385 m_avail_exprs_stack->pop_to_marker ();
6386 m_const_and_copies->pop_to_marker ();
6389 /* Blocks which have more than one predecessor and more than
6390 one successor present jump threading opportunities, i.e.,
6391 when the block is reached from a specific predecessor, we
6392 may be able to determine which of the outgoing edges will
6393 be traversed. When this optimization applies, we are able
6394 to avoid conditionals at runtime and we may expose secondary
6395 optimization opportunities.
6397 This routine is effectively a driver for the generic jump
6398 threading code. It basically just presents the generic code
6399 with edges that may be suitable for jump threading.
6401 Unlike DOM, we do not iterate VRP if jump threading was successful.
6402 While iterating may expose new opportunities for VRP, it is expected
6403 those opportunities would be very limited and the compile time cost
6404 to expose those opportunities would be significant.
6406 As jump threading opportunities are discovered, they are registered
6407 for later realization. */
6409 static void
6410 identify_jump_threads (class vr_values *vr_values)
6412 int i;
6413 edge e;
6415 /* Ugh. When substituting values earlier in this pass we can
6416 wipe the dominance information. So rebuild the dominator
6417 information as we need it within the jump threading code. */
6418 calculate_dominance_info (CDI_DOMINATORS);
6420 /* We do not allow VRP information to be used for jump threading
6421 across a back edge in the CFG. Otherwise it becomes too
6422 difficult to avoid eliminating loop exit tests. Of course
6423 EDGE_DFS_BACK is not accurate at this time so we have to
6424 recompute it. */
6425 mark_dfs_back_edges ();
6427 /* Do not thread across edges we are about to remove. Just marking
6428 them as EDGE_IGNORE will do. */
6429 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6430 e->flags |= EDGE_IGNORE;
6432 /* Allocate our unwinder stack to unwind any temporary equivalences
6433 that might be recorded. */
6434 const_and_copies *equiv_stack = new const_and_copies ();
6436 hash_table<expr_elt_hasher> *avail_exprs
6437 = new hash_table<expr_elt_hasher> (1024);
6438 avail_exprs_stack *avail_exprs_stack
6439 = new class avail_exprs_stack (avail_exprs);
6441 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6442 walker.vr_values = vr_values;
6443 walker.walk (cfun->cfg->x_entry_block_ptr);
6445 /* Clear EDGE_IGNORE. */
6446 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6447 e->flags &= ~EDGE_IGNORE;
6449 /* We do not actually update the CFG or SSA graphs at this point as
6450 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6451 handle ASSERT_EXPRs gracefully. */
6452 delete equiv_stack;
6453 delete avail_exprs;
6454 delete avail_exprs_stack;
6457 /* Traverse all the blocks folding conditionals with known ranges. */
6459 void
6460 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6462 size_t i;
6464 /* We have completed propagating through the lattice. */
6465 vr_values.set_lattice_propagation_complete ();
6467 if (dump_file)
6469 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6470 vr_values.dump_all_value_ranges (dump_file);
6471 fprintf (dump_file, "\n");
6474 /* Set value range to non pointer SSA_NAMEs. */
6475 for (i = 0; i < num_ssa_names; i++)
6477 tree name = ssa_name (i);
6478 if (!name)
6479 continue;
6481 const value_range *vr = get_value_range (name);
6482 if (!name
6483 || (vr->type == VR_VARYING)
6484 || (vr->type == VR_UNDEFINED)
6485 || (TREE_CODE (vr->min) != INTEGER_CST)
6486 || (TREE_CODE (vr->max) != INTEGER_CST))
6487 continue;
6489 if (POINTER_TYPE_P (TREE_TYPE (name))
6490 && range_includes_zero_p (vr) == 0)
6491 set_ptr_nonnull (name);
6492 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6493 set_range_info (name, vr->type,
6494 wi::to_wide (vr->min),
6495 wi::to_wide (vr->max));
6498 /* If we're checking array refs, we want to merge information on
6499 the executability of each edge between vrp_folder and the
6500 check_array_bounds_dom_walker: each can clear the
6501 EDGE_EXECUTABLE flag on edges, in different ways.
6503 Hence, if we're going to call check_all_array_refs, set
6504 the flag on every edge now, rather than in
6505 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6506 it from some edges. */
6507 if (warn_array_bounds && warn_array_bounds_p)
6508 set_all_edges_as_executable (cfun);
6510 class vrp_folder vrp_folder;
6511 vrp_folder.vr_values = &vr_values;
6512 vrp_folder.substitute_and_fold ();
6514 if (warn_array_bounds && warn_array_bounds_p)
6515 check_all_array_refs ();
6518 /* Main entry point to VRP (Value Range Propagation). This pass is
6519 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6520 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6521 Programming Language Design and Implementation, pp. 67-78, 1995.
6522 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6524 This is essentially an SSA-CCP pass modified to deal with ranges
6525 instead of constants.
6527 While propagating ranges, we may find that two or more SSA name
6528 have equivalent, though distinct ranges. For instance,
6530 1 x_9 = p_3->a;
6531 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6532 3 if (p_4 == q_2)
6533 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6534 5 endif
6535 6 if (q_2)
6537 In the code above, pointer p_5 has range [q_2, q_2], but from the
6538 code we can also determine that p_5 cannot be NULL and, if q_2 had
6539 a non-varying range, p_5's range should also be compatible with it.
6541 These equivalences are created by two expressions: ASSERT_EXPR and
6542 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6543 result of another assertion, then we can use the fact that p_5 and
6544 p_4 are equivalent when evaluating p_5's range.
6546 Together with value ranges, we also propagate these equivalences
6547 between names so that we can take advantage of information from
6548 multiple ranges when doing final replacement. Note that this
6549 equivalency relation is transitive but not symmetric.
6551 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6552 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6553 in contexts where that assertion does not hold (e.g., in line 6).
6555 TODO, the main difference between this pass and Patterson's is that
6556 we do not propagate edge probabilities. We only compute whether
6557 edges can be taken or not. That is, instead of having a spectrum
6558 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6559 DON'T KNOW. In the future, it may be worthwhile to propagate
6560 probabilities to aid branch prediction. */
6562 static unsigned int
6563 execute_vrp (bool warn_array_bounds_p)
6565 int i;
6566 edge e;
6567 switch_update *su;
6569 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6570 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6571 scev_initialize ();
6573 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6574 Inserting assertions may split edges which will invalidate
6575 EDGE_DFS_BACK. */
6576 insert_range_assertions ();
6578 to_remove_edges.create (10);
6579 to_update_switch_stmts.create (5);
6580 threadedge_initialize_values ();
6582 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6583 mark_dfs_back_edges ();
6585 class vrp_prop vrp_prop;
6586 vrp_prop.vrp_initialize ();
6587 vrp_prop.ssa_propagate ();
6588 vrp_prop.vrp_finalize (warn_array_bounds_p);
6590 /* We must identify jump threading opportunities before we release
6591 the datastructures built by VRP. */
6592 identify_jump_threads (&vrp_prop.vr_values);
6594 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6595 was set by a type conversion can often be rewritten to use the
6596 RHS of the type conversion.
6598 However, doing so inhibits jump threading through the comparison.
6599 So that transformation is not performed until after jump threading
6600 is complete. */
6601 basic_block bb;
6602 FOR_EACH_BB_FN (bb, cfun)
6604 gimple *last = last_stmt (bb);
6605 if (last && gimple_code (last) == GIMPLE_COND)
6606 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6609 free_numbers_of_iterations_estimates (cfun);
6611 /* ASSERT_EXPRs must be removed before finalizing jump threads
6612 as finalizing jump threads calls the CFG cleanup code which
6613 does not properly handle ASSERT_EXPRs. */
6614 remove_range_assertions ();
6616 /* If we exposed any new variables, go ahead and put them into
6617 SSA form now, before we handle jump threading. This simplifies
6618 interactions between rewriting of _DECL nodes into SSA form
6619 and rewriting SSA_NAME nodes into SSA form after block
6620 duplication and CFG manipulation. */
6621 update_ssa (TODO_update_ssa);
6623 /* We identified all the jump threading opportunities earlier, but could
6624 not transform the CFG at that time. This routine transforms the
6625 CFG and arranges for the dominator tree to be rebuilt if necessary.
6627 Note the SSA graph update will occur during the normal TODO
6628 processing by the pass manager. */
6629 thread_through_all_blocks (false);
6631 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
6632 CFG in a broken state and requires a cfg_cleanup run. */
6633 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6634 remove_edge (e);
6635 /* Update SWITCH_EXPR case label vector. */
6636 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
6638 size_t j;
6639 size_t n = TREE_VEC_LENGTH (su->vec);
6640 tree label;
6641 gimple_switch_set_num_labels (su->stmt, n);
6642 for (j = 0; j < n; j++)
6643 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
6644 /* As we may have replaced the default label with a regular one
6645 make sure to make it a real default label again. This ensures
6646 optimal expansion. */
6647 label = gimple_switch_label (su->stmt, 0);
6648 CASE_LOW (label) = NULL_TREE;
6649 CASE_HIGH (label) = NULL_TREE;
6652 if (to_remove_edges.length () > 0)
6654 free_dominance_info (CDI_DOMINATORS);
6655 loops_state_set (LOOPS_NEED_FIXUP);
6658 to_remove_edges.release ();
6659 to_update_switch_stmts.release ();
6660 threadedge_finalize_values ();
6662 scev_finalize ();
6663 loop_optimizer_finalize ();
6664 return 0;
6667 namespace {
6669 const pass_data pass_data_vrp =
6671 GIMPLE_PASS, /* type */
6672 "vrp", /* name */
6673 OPTGROUP_NONE, /* optinfo_flags */
6674 TV_TREE_VRP, /* tv_id */
6675 PROP_ssa, /* properties_required */
6676 0, /* properties_provided */
6677 0, /* properties_destroyed */
6678 0, /* todo_flags_start */
6679 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6682 class pass_vrp : public gimple_opt_pass
6684 public:
6685 pass_vrp (gcc::context *ctxt)
6686 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6689 /* opt_pass methods: */
6690 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6691 void set_pass_param (unsigned int n, bool param)
6693 gcc_assert (n == 0);
6694 warn_array_bounds_p = param;
6696 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6697 virtual unsigned int execute (function *)
6698 { return execute_vrp (warn_array_bounds_p); }
6700 private:
6701 bool warn_array_bounds_p;
6702 }; // class pass_vrp
6704 } // anon namespace
6706 gimple_opt_pass *
6707 make_pass_vrp (gcc::context *ctxt)
6709 return new pass_vrp (ctxt);
6713 /* Worker for determine_value_range. */
6715 static void
6716 determine_value_range_1 (value_range *vr, tree expr)
6718 if (BINARY_CLASS_P (expr))
6720 value_range vr0 = VR_INITIALIZER, vr1 = VR_INITIALIZER;
6721 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6722 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6723 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr),
6724 &vr0, &vr1);
6726 else if (UNARY_CLASS_P (expr))
6728 value_range vr0 = VR_INITIALIZER;
6729 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6730 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6731 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6733 else if (TREE_CODE (expr) == INTEGER_CST)
6734 set_value_range_to_value (vr, expr, NULL);
6735 else
6737 value_range_type kind;
6738 wide_int min, max;
6739 /* For SSA names try to extract range info computed by VRP. Otherwise
6740 fall back to varying. */
6741 if (TREE_CODE (expr) == SSA_NAME
6742 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6743 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6744 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min),
6745 wide_int_to_tree (TREE_TYPE (expr), max), NULL);
6746 else
6747 set_value_range_to_varying (vr);
6751 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6752 the determined range type. */
6754 value_range_type
6755 determine_value_range (tree expr, wide_int *min, wide_int *max)
6757 value_range vr = VR_INITIALIZER;
6758 determine_value_range_1 (&vr, expr);
6759 if ((vr.type == VR_RANGE
6760 || vr.type == VR_ANTI_RANGE)
6761 && !symbolic_range_p (&vr))
6763 *min = wi::to_wide (vr.min);
6764 *max = wi::to_wide (vr.max);
6765 return vr.type;
6768 return VR_VARYING;