S/390: Fix conditional returns on z196+
[official-gcc.git] / gcc / tree-vrp.c
blobab222a385f6329ca337d2554a167e48abe76b8f4
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Location information for ASSERT_EXPRs. Each instance of this
86 structure describes an ASSERT_EXPR for an SSA name. Since a single
87 SSA name may have more than one assertion associated with it, these
88 locations are kept in a linked list attached to the corresponding
89 SSA name. */
90 struct assert_locus
92 /* Basic block where the assertion would be inserted. */
93 basic_block bb;
95 /* Some assertions need to be inserted on an edge (e.g., assertions
96 generated by COND_EXPRs). In those cases, BB will be NULL. */
97 edge e;
99 /* Pointer to the statement that generated this assertion. */
100 gimple_stmt_iterator si;
102 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
103 enum tree_code comp_code;
105 /* Value being compared against. */
106 tree val;
108 /* Expression to compare. */
109 tree expr;
111 /* Next node in the linked list. */
112 assert_locus *next;
115 /* If bit I is present, it means that SSA name N_i has a list of
116 assertions that should be inserted in the IL. */
117 static bitmap need_assert_for;
119 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
120 holds a list of ASSERT_LOCUS_T nodes that describe where
121 ASSERT_EXPRs for SSA name N_I should be inserted. */
122 static assert_locus **asserts_for;
124 /* Return the maximum value for TYPE. */
126 tree
127 vrp_val_max (const_tree type)
129 if (!INTEGRAL_TYPE_P (type))
130 return NULL_TREE;
132 return TYPE_MAX_VALUE (type);
135 /* Return the minimum value for TYPE. */
137 tree
138 vrp_val_min (const_tree type)
140 if (!INTEGRAL_TYPE_P (type))
141 return NULL_TREE;
143 return TYPE_MIN_VALUE (type);
146 /* Return whether VAL is equal to the maximum value of its type.
147 We can't do a simple equality comparison with TYPE_MAX_VALUE because
148 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
149 is not == to the integer constant with the same value in the type. */
151 bool
152 vrp_val_is_max (const_tree val)
154 tree type_max = vrp_val_max (TREE_TYPE (val));
155 return (val == type_max
156 || (type_max != NULL_TREE
157 && operand_equal_p (val, type_max, 0)));
160 /* Return whether VAL is equal to the minimum value of its type. */
162 bool
163 vrp_val_is_min (const_tree val)
165 tree type_min = vrp_val_min (TREE_TYPE (val));
166 return (val == type_min
167 || (type_min != NULL_TREE
168 && operand_equal_p (val, type_min, 0)));
171 /* VR_TYPE describes a range with mininum value *MIN and maximum
172 value *MAX. Restrict the range to the set of values that have
173 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
174 return the new range type.
176 SGN gives the sign of the values described by the range. */
178 enum value_range_type
179 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
180 wide_int *min, wide_int *max,
181 const wide_int &nonzero_bits,
182 signop sgn)
184 if (vr_type == VR_ANTI_RANGE)
186 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
187 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
188 to create an inclusive upper bound for A and an inclusive lower
189 bound for B. */
190 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
191 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
193 /* If the calculation of A_MAX wrapped, A is effectively empty
194 and A_MAX is the highest value that satisfies NONZERO_BITS.
195 Likewise if the calculation of B_MIN wrapped, B is effectively
196 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
197 bool a_empty = wi::ge_p (a_max, *min, sgn);
198 bool b_empty = wi::le_p (b_min, *max, sgn);
200 /* If both A and B are empty, there are no valid values. */
201 if (a_empty && b_empty)
202 return VR_UNDEFINED;
204 /* If exactly one of A or B is empty, return a VR_RANGE for the
205 other one. */
206 if (a_empty || b_empty)
208 *min = b_min;
209 *max = a_max;
210 gcc_checking_assert (wi::le_p (*min, *max, sgn));
211 return VR_RANGE;
214 /* Update the VR_ANTI_RANGE bounds. */
215 *min = a_max + 1;
216 *max = b_min - 1;
217 gcc_checking_assert (wi::le_p (*min, *max, sgn));
219 /* Now check whether the excluded range includes any values that
220 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
221 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
223 unsigned int precision = min->get_precision ();
224 *min = wi::min_value (precision, sgn);
225 *max = wi::max_value (precision, sgn);
226 vr_type = VR_RANGE;
229 if (vr_type == VR_RANGE)
231 *max = wi::round_down_for_mask (*max, nonzero_bits);
233 /* Check that the range contains at least one valid value. */
234 if (wi::gt_p (*min, *max, sgn))
235 return VR_UNDEFINED;
237 *min = wi::round_up_for_mask (*min, nonzero_bits);
238 gcc_checking_assert (wi::le_p (*min, *max, sgn));
240 return vr_type;
243 /* Set value range VR to VR_UNDEFINED. */
245 static inline void
246 set_value_range_to_undefined (value_range *vr)
248 vr->type = VR_UNDEFINED;
249 vr->min = vr->max = NULL_TREE;
250 if (vr->equiv)
251 bitmap_clear (vr->equiv);
254 /* Set value range VR to VR_VARYING. */
256 void
257 set_value_range_to_varying (value_range *vr)
259 vr->type = VR_VARYING;
260 vr->min = vr->max = NULL_TREE;
261 if (vr->equiv)
262 bitmap_clear (vr->equiv);
265 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
267 void
268 set_value_range (value_range *vr, enum value_range_type t, tree min,
269 tree max, bitmap equiv)
271 /* Check the validity of the range. */
272 if (flag_checking
273 && (t == VR_RANGE || t == VR_ANTI_RANGE))
275 int cmp;
277 gcc_assert (min && max);
279 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
281 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
282 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
284 cmp = compare_values (min, max);
285 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
288 if (flag_checking
289 && (t == VR_UNDEFINED || t == VR_VARYING))
291 gcc_assert (min == NULL_TREE && max == NULL_TREE);
292 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
295 vr->type = t;
296 vr->min = min;
297 vr->max = max;
299 /* Since updating the equivalence set involves deep copying the
300 bitmaps, only do it if absolutely necessary.
302 All equivalence bitmaps are allocated from the same obstack. So
303 we can use the obstack associated with EQUIV to allocate vr->equiv. */
304 if (vr->equiv == NULL
305 && equiv != NULL)
306 vr->equiv = BITMAP_ALLOC (equiv->obstack);
308 if (equiv != vr->equiv)
310 if (equiv && !bitmap_empty_p (equiv))
311 bitmap_copy (vr->equiv, equiv);
312 else
313 bitmap_clear (vr->equiv);
318 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
319 This means adjusting T, MIN and MAX representing the case of a
320 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
321 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
322 In corner cases where MAX+1 or MIN-1 wraps this will fall back
323 to varying.
324 This routine exists to ease canonicalization in the case where we
325 extract ranges from var + CST op limit. */
327 void
328 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
329 tree min, tree max, bitmap equiv)
331 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
332 if (t == VR_UNDEFINED)
334 set_value_range_to_undefined (vr);
335 return;
337 else if (t == VR_VARYING)
339 set_value_range_to_varying (vr);
340 return;
343 /* Nothing to canonicalize for symbolic ranges. */
344 if (TREE_CODE (min) != INTEGER_CST
345 || TREE_CODE (max) != INTEGER_CST)
347 set_value_range (vr, t, min, max, equiv);
348 return;
351 /* Wrong order for min and max, to swap them and the VR type we need
352 to adjust them. */
353 if (tree_int_cst_lt (max, min))
355 tree one, tmp;
357 /* For one bit precision if max < min, then the swapped
358 range covers all values, so for VR_RANGE it is varying and
359 for VR_ANTI_RANGE empty range, so drop to varying as well. */
360 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
362 set_value_range_to_varying (vr);
363 return;
366 one = build_int_cst (TREE_TYPE (min), 1);
367 tmp = int_const_binop (PLUS_EXPR, max, one);
368 max = int_const_binop (MINUS_EXPR, min, one);
369 min = tmp;
371 /* There's one corner case, if we had [C+1, C] before we now have
372 that again. But this represents an empty value range, so drop
373 to varying in this case. */
374 if (tree_int_cst_lt (max, min))
376 set_value_range_to_varying (vr);
377 return;
380 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
383 /* Anti-ranges that can be represented as ranges should be so. */
384 if (t == VR_ANTI_RANGE)
386 /* For -fstrict-enums we may receive out-of-range ranges so consider
387 values < -INF and values > INF as -INF/INF as well. */
388 tree type = TREE_TYPE (min);
389 bool is_min = (INTEGRAL_TYPE_P (type)
390 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
391 bool is_max = (INTEGRAL_TYPE_P (type)
392 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
394 if (is_min && is_max)
396 /* We cannot deal with empty ranges, drop to varying.
397 ??? This could be VR_UNDEFINED instead. */
398 set_value_range_to_varying (vr);
399 return;
401 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
402 && (is_min || is_max))
404 /* Non-empty boolean ranges can always be represented
405 as a singleton range. */
406 if (is_min)
407 min = max = vrp_val_max (TREE_TYPE (min));
408 else
409 min = max = vrp_val_min (TREE_TYPE (min));
410 t = VR_RANGE;
412 else if (is_min
413 /* As a special exception preserve non-null ranges. */
414 && !(TYPE_UNSIGNED (TREE_TYPE (min))
415 && integer_zerop (max)))
417 tree one = build_int_cst (TREE_TYPE (max), 1);
418 min = int_const_binop (PLUS_EXPR, max, one);
419 max = vrp_val_max (TREE_TYPE (max));
420 t = VR_RANGE;
422 else if (is_max)
424 tree one = build_int_cst (TREE_TYPE (min), 1);
425 max = int_const_binop (MINUS_EXPR, min, one);
426 min = vrp_val_min (TREE_TYPE (min));
427 t = VR_RANGE;
431 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
432 to make sure VRP iteration terminates, otherwise we can get into
433 oscillations. */
435 set_value_range (vr, t, min, max, equiv);
438 /* Copy value range FROM into value range TO. */
440 void
441 copy_value_range (value_range *to, const value_range *from)
443 set_value_range (to, from->type, from->min, from->max, from->equiv);
446 /* Set value range VR to a single value. This function is only called
447 with values we get from statements, and exists to clear the
448 TREE_OVERFLOW flag. */
450 void
451 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
453 gcc_assert (is_gimple_min_invariant (val));
454 if (TREE_OVERFLOW_P (val))
455 val = drop_tree_overflow (val);
456 set_value_range (vr, VR_RANGE, val, val, equiv);
459 /* Set value range VR to a non-NULL range of type TYPE. */
461 void
462 set_value_range_to_nonnull (value_range *vr, tree type)
464 tree zero = build_int_cst (type, 0);
465 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
469 /* Set value range VR to a NULL range of type TYPE. */
471 void
472 set_value_range_to_null (value_range *vr, tree type)
474 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
477 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
479 bool
480 vrp_operand_equal_p (const_tree val1, const_tree val2)
482 if (val1 == val2)
483 return true;
484 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
485 return false;
486 return true;
489 /* Return true, if the bitmaps B1 and B2 are equal. */
491 bool
492 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
494 return (b1 == b2
495 || ((!b1 || bitmap_empty_p (b1))
496 && (!b2 || bitmap_empty_p (b2)))
497 || (b1 && b2
498 && bitmap_equal_p (b1, b2)));
501 /* Return true if VR is [0, 0]. */
503 static inline bool
504 range_is_null (const value_range *vr)
506 return vr->type == VR_RANGE
507 && integer_zerop (vr->min)
508 && integer_zerop (vr->max);
511 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
512 a singleton. */
514 bool
515 range_int_cst_p (const value_range *vr)
517 return (vr->type == VR_RANGE
518 && TREE_CODE (vr->max) == INTEGER_CST
519 && TREE_CODE (vr->min) == INTEGER_CST);
522 /* Return true if VR is a INTEGER_CST singleton. */
524 bool
525 range_int_cst_singleton_p (const value_range *vr)
527 return (range_int_cst_p (vr)
528 && tree_int_cst_equal (vr->min, vr->max));
531 /* Return true if value range VR involves at least one symbol. */
533 bool
534 symbolic_range_p (const value_range *vr)
536 return (!is_gimple_min_invariant (vr->min)
537 || !is_gimple_min_invariant (vr->max));
540 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
541 otherwise. We only handle additive operations and set NEG to true if the
542 symbol is negated and INV to the invariant part, if any. */
544 tree
545 get_single_symbol (tree t, bool *neg, tree *inv)
547 bool neg_;
548 tree inv_;
550 *inv = NULL_TREE;
551 *neg = false;
553 if (TREE_CODE (t) == PLUS_EXPR
554 || TREE_CODE (t) == POINTER_PLUS_EXPR
555 || TREE_CODE (t) == MINUS_EXPR)
557 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
559 neg_ = (TREE_CODE (t) == MINUS_EXPR);
560 inv_ = TREE_OPERAND (t, 0);
561 t = TREE_OPERAND (t, 1);
563 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
565 neg_ = false;
566 inv_ = TREE_OPERAND (t, 1);
567 t = TREE_OPERAND (t, 0);
569 else
570 return NULL_TREE;
572 else
574 neg_ = false;
575 inv_ = NULL_TREE;
578 if (TREE_CODE (t) == NEGATE_EXPR)
580 t = TREE_OPERAND (t, 0);
581 neg_ = !neg_;
584 if (TREE_CODE (t) != SSA_NAME)
585 return NULL_TREE;
587 if (inv_ && TREE_OVERFLOW_P (inv_))
588 inv_ = drop_tree_overflow (inv_);
590 *neg = neg_;
591 *inv = inv_;
592 return t;
595 /* The reverse operation: build a symbolic expression with TYPE
596 from symbol SYM, negated according to NEG, and invariant INV. */
598 static tree
599 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
601 const bool pointer_p = POINTER_TYPE_P (type);
602 tree t = sym;
604 if (neg)
605 t = build1 (NEGATE_EXPR, type, t);
607 if (integer_zerop (inv))
608 return t;
610 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
613 /* Return
614 1 if VAL < VAL2
615 0 if !(VAL < VAL2)
616 -2 if those are incomparable. */
618 operand_less_p (tree val, tree val2)
620 /* LT is folded faster than GE and others. Inline the common case. */
621 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
622 return tree_int_cst_lt (val, val2);
623 else
625 tree tcmp;
627 fold_defer_overflow_warnings ();
629 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
631 fold_undefer_and_ignore_overflow_warnings ();
633 if (!tcmp
634 || TREE_CODE (tcmp) != INTEGER_CST)
635 return -2;
637 if (!integer_zerop (tcmp))
638 return 1;
641 return 0;
644 /* Compare two values VAL1 and VAL2. Return
646 -2 if VAL1 and VAL2 cannot be compared at compile-time,
647 -1 if VAL1 < VAL2,
648 0 if VAL1 == VAL2,
649 +1 if VAL1 > VAL2, and
650 +2 if VAL1 != VAL2
652 This is similar to tree_int_cst_compare but supports pointer values
653 and values that cannot be compared at compile time.
655 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
656 true if the return value is only valid if we assume that signed
657 overflow is undefined. */
660 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
662 if (val1 == val2)
663 return 0;
665 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
666 both integers. */
667 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
668 == POINTER_TYPE_P (TREE_TYPE (val2)));
670 /* Convert the two values into the same type. This is needed because
671 sizetype causes sign extension even for unsigned types. */
672 val2 = fold_convert (TREE_TYPE (val1), val2);
673 STRIP_USELESS_TYPE_CONVERSION (val2);
675 const bool overflow_undefined
676 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
677 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
678 tree inv1, inv2;
679 bool neg1, neg2;
680 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
681 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
683 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
684 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
685 if (sym1 && sym2)
687 /* Both values must use the same name with the same sign. */
688 if (sym1 != sym2 || neg1 != neg2)
689 return -2;
691 /* [-]NAME + CST == [-]NAME + CST. */
692 if (inv1 == inv2)
693 return 0;
695 /* If overflow is defined we cannot simplify more. */
696 if (!overflow_undefined)
697 return -2;
699 if (strict_overflow_p != NULL
700 /* Symbolic range building sets TREE_NO_WARNING to declare
701 that overflow doesn't happen. */
702 && (!inv1 || !TREE_NO_WARNING (val1))
703 && (!inv2 || !TREE_NO_WARNING (val2)))
704 *strict_overflow_p = true;
706 if (!inv1)
707 inv1 = build_int_cst (TREE_TYPE (val1), 0);
708 if (!inv2)
709 inv2 = build_int_cst (TREE_TYPE (val2), 0);
711 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
712 TYPE_SIGN (TREE_TYPE (val1)));
715 const bool cst1 = is_gimple_min_invariant (val1);
716 const bool cst2 = is_gimple_min_invariant (val2);
718 /* If one is of the form '[-]NAME + CST' and the other is constant, then
719 it might be possible to say something depending on the constants. */
720 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
722 if (!overflow_undefined)
723 return -2;
725 if (strict_overflow_p != NULL
726 /* Symbolic range building sets TREE_NO_WARNING to declare
727 that overflow doesn't happen. */
728 && (!sym1 || !TREE_NO_WARNING (val1))
729 && (!sym2 || !TREE_NO_WARNING (val2)))
730 *strict_overflow_p = true;
732 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
733 tree cst = cst1 ? val1 : val2;
734 tree inv = cst1 ? inv2 : inv1;
736 /* Compute the difference between the constants. If it overflows or
737 underflows, this means that we can trivially compare the NAME with
738 it and, consequently, the two values with each other. */
739 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
740 if (wi::cmp (0, wi::to_wide (inv), sgn)
741 != wi::cmp (diff, wi::to_wide (cst), sgn))
743 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
744 return cst1 ? res : -res;
747 return -2;
750 /* We cannot say anything more for non-constants. */
751 if (!cst1 || !cst2)
752 return -2;
754 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
756 /* We cannot compare overflowed values. */
757 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
758 return -2;
760 if (TREE_CODE (val1) == INTEGER_CST
761 && TREE_CODE (val2) == INTEGER_CST)
762 return tree_int_cst_compare (val1, val2);
764 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
766 if (known_eq (wi::to_poly_widest (val1),
767 wi::to_poly_widest (val2)))
768 return 0;
769 if (known_lt (wi::to_poly_widest (val1),
770 wi::to_poly_widest (val2)))
771 return -1;
772 if (known_gt (wi::to_poly_widest (val1),
773 wi::to_poly_widest (val2)))
774 return 1;
777 return -2;
779 else
781 tree t;
783 /* First see if VAL1 and VAL2 are not the same. */
784 if (val1 == val2 || operand_equal_p (val1, val2, 0))
785 return 0;
787 /* If VAL1 is a lower address than VAL2, return -1. */
788 if (operand_less_p (val1, val2) == 1)
789 return -1;
791 /* If VAL1 is a higher address than VAL2, return +1. */
792 if (operand_less_p (val2, val1) == 1)
793 return 1;
795 /* If VAL1 is different than VAL2, return +2.
796 For integer constants we either have already returned -1 or 1
797 or they are equivalent. We still might succeed in proving
798 something about non-trivial operands. */
799 if (TREE_CODE (val1) != INTEGER_CST
800 || TREE_CODE (val2) != INTEGER_CST)
802 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
803 if (t && integer_onep (t))
804 return 2;
807 return -2;
811 /* Compare values like compare_values_warnv. */
814 compare_values (tree val1, tree val2)
816 bool sop;
817 return compare_values_warnv (val1, val2, &sop);
821 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
822 0 if VAL is not inside [MIN, MAX],
823 -2 if we cannot tell either way.
825 Benchmark compile/20001226-1.c compilation time after changing this
826 function. */
829 value_inside_range (tree val, tree min, tree max)
831 int cmp1, cmp2;
833 cmp1 = operand_less_p (val, min);
834 if (cmp1 == -2)
835 return -2;
836 if (cmp1 == 1)
837 return 0;
839 cmp2 = operand_less_p (max, val);
840 if (cmp2 == -2)
841 return -2;
843 return !cmp2;
847 /* Return true if value ranges VR0 and VR1 have a non-empty
848 intersection.
850 Benchmark compile/20001226-1.c compilation time after changing this
851 function.
854 static inline bool
855 value_ranges_intersect_p (const value_range *vr0, const value_range *vr1)
857 /* The value ranges do not intersect if the maximum of the first range is
858 less than the minimum of the second range or vice versa.
859 When those relations are unknown, we can't do any better. */
860 if (operand_less_p (vr0->max, vr1->min) != 0)
861 return false;
862 if (operand_less_p (vr1->max, vr0->min) != 0)
863 return false;
864 return true;
868 /* Return TRUE if *VR includes the value zero. */
870 bool
871 range_includes_zero_p (const value_range *vr)
873 if (vr->type == VR_VARYING)
874 return true;
876 /* Ughh, we don't know. We choose not to optimize. */
877 if (vr->type == VR_UNDEFINED)
878 return true;
880 tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
881 if (vr->type == VR_ANTI_RANGE)
883 int res = value_inside_range (zero, vr->min, vr->max);
884 return res == 0 || res == -2;
886 return value_inside_range (zero, vr->min, vr->max) != 0;
889 /* Return true if *VR is know to only contain nonnegative values. */
891 static inline bool
892 value_range_nonnegative_p (const value_range *vr)
894 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
895 which would return a useful value should be encoded as a
896 VR_RANGE. */
897 if (vr->type == VR_RANGE)
899 int result = compare_values (vr->min, integer_zero_node);
900 return (result == 0 || result == 1);
903 return false;
906 /* If *VR has a value rante that is a single constant value return that,
907 otherwise return NULL_TREE. */
909 tree
910 value_range_constant_singleton (const value_range *vr)
912 if (vr->type == VR_RANGE
913 && vrp_operand_equal_p (vr->min, vr->max)
914 && is_gimple_min_invariant (vr->min))
915 return vr->min;
917 return NULL_TREE;
920 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
922 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
924 Return TRUE if VR was a constant range and we were able to compute
925 the bit masks. */
927 bool
928 vrp_set_zero_nonzero_bits (const tree expr_type,
929 const value_range *vr,
930 wide_int *may_be_nonzero,
931 wide_int *must_be_nonzero)
933 if (!range_int_cst_p (vr))
935 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
936 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
937 return false;
939 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
940 wi::to_wide (vr->min),
941 wi::to_wide (vr->max),
942 *may_be_nonzero, *must_be_nonzero);
943 return true;
946 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
947 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
948 false otherwise. If *AR can be represented with a single range
949 *VR1 will be VR_UNDEFINED. */
951 static bool
952 ranges_from_anti_range (const value_range *ar,
953 value_range *vr0, value_range *vr1)
955 tree type = TREE_TYPE (ar->min);
957 vr0->type = VR_UNDEFINED;
958 vr1->type = VR_UNDEFINED;
960 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
961 [A+1, +INF]. Not sure if this helps in practice, though. */
963 if (ar->type != VR_ANTI_RANGE
964 || TREE_CODE (ar->min) != INTEGER_CST
965 || TREE_CODE (ar->max) != INTEGER_CST
966 || !vrp_val_min (type)
967 || !vrp_val_max (type))
968 return false;
970 if (!vrp_val_is_min (ar->min))
972 vr0->type = VR_RANGE;
973 vr0->min = vrp_val_min (type);
974 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
976 if (!vrp_val_is_max (ar->max))
978 vr1->type = VR_RANGE;
979 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
980 vr1->max = vrp_val_max (type);
982 if (vr0->type == VR_UNDEFINED)
984 *vr0 = *vr1;
985 vr1->type = VR_UNDEFINED;
988 return vr0->type != VR_UNDEFINED;
991 /* Extract the components of a value range into a pair of wide ints in
992 [WMIN, WMAX].
994 If the value range is anything but a VR_*RANGE of constants, the
995 resulting wide ints are set to [-MIN, +MAX] for the type. */
997 static void inline
998 extract_range_into_wide_ints (const value_range *vr,
999 signop sign, unsigned prec,
1000 wide_int &wmin, wide_int &wmax)
1002 if ((vr->type == VR_RANGE
1003 || vr->type == VR_ANTI_RANGE)
1004 && TREE_CODE (vr->min) == INTEGER_CST
1005 && TREE_CODE (vr->max) == INTEGER_CST)
1007 wmin = wi::to_wide (vr->min);
1008 wmax = wi::to_wide (vr->max);
1010 else
1012 wmin = wi::min_value (prec, sign);
1013 wmax = wi::max_value (prec, sign);
1017 /* Value range wrapper for wide_int_range_multiplicative_op:
1019 *VR = *VR0 .CODE. *VR1. */
1021 static void
1022 extract_range_from_multiplicative_op (value_range *vr,
1023 enum tree_code code,
1024 const value_range *vr0,
1025 const value_range *vr1)
1027 gcc_assert (code == MULT_EXPR
1028 || code == TRUNC_DIV_EXPR
1029 || code == FLOOR_DIV_EXPR
1030 || code == CEIL_DIV_EXPR
1031 || code == EXACT_DIV_EXPR
1032 || code == ROUND_DIV_EXPR
1033 || code == RSHIFT_EXPR
1034 || code == LSHIFT_EXPR);
1035 gcc_assert (vr0->type == VR_RANGE && vr0->type == vr1->type);
1037 tree type = TREE_TYPE (vr0->min);
1038 wide_int res_lb, res_ub;
1039 wide_int vr0_lb = wi::to_wide (vr0->min);
1040 wide_int vr0_ub = wi::to_wide (vr0->max);
1041 wide_int vr1_lb = wi::to_wide (vr1->min);
1042 wide_int vr1_ub = wi::to_wide (vr1->max);
1043 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1044 bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
1045 unsigned prec = TYPE_PRECISION (type);
1047 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1048 code, TYPE_SIGN (type), prec,
1049 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1050 overflow_undefined, overflow_wraps))
1051 set_and_canonicalize_value_range (vr, VR_RANGE,
1052 wide_int_to_tree (type, res_lb),
1053 wide_int_to_tree (type, res_ub), NULL);
1054 else
1055 set_value_range_to_varying (vr);
1058 /* If BOUND will include a symbolic bound, adjust it accordingly,
1059 otherwise leave it as is.
1061 CODE is the original operation that combined the bounds (PLUS_EXPR
1062 or MINUS_EXPR).
1064 TYPE is the type of the original operation.
1066 SYM_OPn is the symbolic for OPn if it has a symbolic.
1068 NEG_OPn is TRUE if the OPn was negated. */
1070 static void
1071 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1072 tree sym_op0, tree sym_op1,
1073 bool neg_op0, bool neg_op1)
1075 bool minus_p = (code == MINUS_EXPR);
1076 /* If the result bound is constant, we're done; otherwise, build the
1077 symbolic lower bound. */
1078 if (sym_op0 == sym_op1)
1080 else if (sym_op0)
1081 bound = build_symbolic_expr (type, sym_op0,
1082 neg_op0, bound);
1083 else if (sym_op1)
1085 /* We may not negate if that might introduce
1086 undefined overflow. */
1087 if (!minus_p
1088 || neg_op1
1089 || TYPE_OVERFLOW_WRAPS (type))
1090 bound = build_symbolic_expr (type, sym_op1,
1091 neg_op1 ^ minus_p, bound);
1092 else
1093 bound = NULL_TREE;
1097 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1098 int bound according to CODE. CODE is the operation combining the
1099 bound (either a PLUS_EXPR or a MINUS_EXPR).
1101 TYPE is the type of the combine operation.
1103 WI is the wide int to store the result.
1105 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1106 if over/underflow occurred. */
1108 static void
1109 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1110 tree type, tree op0, tree op1)
1112 bool minus_p = (code == MINUS_EXPR);
1113 const signop sgn = TYPE_SIGN (type);
1114 const unsigned int prec = TYPE_PRECISION (type);
1116 /* Combine the bounds, if any. */
1117 if (op0 && op1)
1119 if (minus_p)
1120 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1121 else
1122 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1124 else if (op0)
1125 wi = wi::to_wide (op0);
1126 else if (op1)
1128 if (minus_p)
1129 wi = wi::neg (wi::to_wide (op1), &ovf);
1130 else
1131 wi = wi::to_wide (op1);
1133 else
1134 wi = wi::shwi (0, prec);
1137 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1138 put the result in VR.
1140 TYPE is the type of the range.
1142 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1143 occurred while originally calculating WMIN or WMAX. -1 indicates
1144 underflow. +1 indicates overflow. 0 indicates neither. */
1146 static void
1147 set_value_range_with_overflow (value_range &vr,
1148 tree type,
1149 const wide_int &wmin, const wide_int &wmax,
1150 wi::overflow_type min_ovf,
1151 wi::overflow_type max_ovf)
1153 const signop sgn = TYPE_SIGN (type);
1154 const unsigned int prec = TYPE_PRECISION (type);
1155 vr.type = VR_RANGE;
1156 vr.equiv = NULL;
1157 if (TYPE_OVERFLOW_WRAPS (type))
1159 /* If overflow wraps, truncate the values and adjust the
1160 range kind and bounds appropriately. */
1161 wide_int tmin = wide_int::from (wmin, prec, sgn);
1162 wide_int tmax = wide_int::from (wmax, prec, sgn);
1163 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1165 /* No overflow or both overflow or underflow. The
1166 range kind stays VR_RANGE. */
1167 vr.min = wide_int_to_tree (type, tmin);
1168 vr.max = wide_int_to_tree (type, tmax);
1170 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1171 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1173 /* Min underflow or max overflow. The range kind
1174 changes to VR_ANTI_RANGE. */
1175 bool covers = false;
1176 wide_int tem = tmin;
1177 vr.type = VR_ANTI_RANGE;
1178 tmin = tmax + 1;
1179 if (wi::cmp (tmin, tmax, sgn) < 0)
1180 covers = true;
1181 tmax = tem - 1;
1182 if (wi::cmp (tmax, tem, sgn) > 0)
1183 covers = true;
1184 /* If the anti-range would cover nothing, drop to varying.
1185 Likewise if the anti-range bounds are outside of the
1186 types values. */
1187 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1189 set_value_range_to_varying (&vr);
1190 return;
1192 vr.min = wide_int_to_tree (type, tmin);
1193 vr.max = wide_int_to_tree (type, tmax);
1195 else
1197 /* Other underflow and/or overflow, drop to VR_VARYING. */
1198 set_value_range_to_varying (&vr);
1199 return;
1202 else
1204 /* If overflow does not wrap, saturate to the types min/max
1205 value. */
1206 wide_int type_min = wi::min_value (prec, sgn);
1207 wide_int type_max = wi::max_value (prec, sgn);
1208 if (min_ovf == wi::OVF_UNDERFLOW)
1209 vr.min = wide_int_to_tree (type, type_min);
1210 else if (min_ovf == wi::OVF_OVERFLOW)
1211 vr.min = wide_int_to_tree (type, type_max);
1212 else
1213 vr.min = wide_int_to_tree (type, wmin);
1215 if (max_ovf == wi::OVF_UNDERFLOW)
1216 vr.max = wide_int_to_tree (type, type_min);
1217 else if (max_ovf == wi::OVF_OVERFLOW)
1218 vr.max = wide_int_to_tree (type, type_max);
1219 else
1220 vr.max = wide_int_to_tree (type, wmax);
1224 /* Extract range information from a binary operation CODE based on
1225 the ranges of each of its operands *VR0 and *VR1 with resulting
1226 type EXPR_TYPE. The resulting range is stored in *VR. */
1228 void
1229 extract_range_from_binary_expr_1 (value_range *vr,
1230 enum tree_code code, tree expr_type,
1231 const value_range *vr0_,
1232 const value_range *vr1_)
1234 signop sign = TYPE_SIGN (expr_type);
1235 unsigned int prec = TYPE_PRECISION (expr_type);
1236 value_range vr0 = *vr0_, vr1 = *vr1_;
1237 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1238 enum value_range_type type;
1239 tree min = NULL_TREE, max = NULL_TREE;
1240 int cmp;
1242 if (!INTEGRAL_TYPE_P (expr_type)
1243 && !POINTER_TYPE_P (expr_type))
1245 set_value_range_to_varying (vr);
1246 return;
1249 /* Not all binary expressions can be applied to ranges in a
1250 meaningful way. Handle only arithmetic operations. */
1251 if (code != PLUS_EXPR
1252 && code != MINUS_EXPR
1253 && code != POINTER_PLUS_EXPR
1254 && code != MULT_EXPR
1255 && code != TRUNC_DIV_EXPR
1256 && code != FLOOR_DIV_EXPR
1257 && code != CEIL_DIV_EXPR
1258 && code != EXACT_DIV_EXPR
1259 && code != ROUND_DIV_EXPR
1260 && code != TRUNC_MOD_EXPR
1261 && code != RSHIFT_EXPR
1262 && code != LSHIFT_EXPR
1263 && code != MIN_EXPR
1264 && code != MAX_EXPR
1265 && code != BIT_AND_EXPR
1266 && code != BIT_IOR_EXPR
1267 && code != BIT_XOR_EXPR)
1269 set_value_range_to_varying (vr);
1270 return;
1273 /* If both ranges are UNDEFINED, so is the result. */
1274 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1276 set_value_range_to_undefined (vr);
1277 return;
1279 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1280 code. At some point we may want to special-case operations that
1281 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1282 operand. */
1283 else if (vr0.type == VR_UNDEFINED)
1284 set_value_range_to_varying (&vr0);
1285 else if (vr1.type == VR_UNDEFINED)
1286 set_value_range_to_varying (&vr1);
1288 /* We get imprecise results from ranges_from_anti_range when
1289 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1290 range, but then we also need to hack up vrp_meet. It's just
1291 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1292 if (code == EXACT_DIV_EXPR
1293 && vr0.type == VR_ANTI_RANGE
1294 && vr0.min == vr0.max
1295 && integer_zerop (vr0.min))
1297 set_value_range_to_nonnull (vr, expr_type);
1298 return;
1301 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1302 and express ~[] op X as ([]' op X) U ([]'' op X). */
1303 if (vr0.type == VR_ANTI_RANGE
1304 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1306 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1307 if (vrtem1.type != VR_UNDEFINED)
1309 value_range vrres = VR_INITIALIZER;
1310 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1311 &vrtem1, vr1_);
1312 vrp_meet (vr, &vrres);
1314 return;
1316 /* Likewise for X op ~[]. */
1317 if (vr1.type == VR_ANTI_RANGE
1318 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1320 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1321 if (vrtem1.type != VR_UNDEFINED)
1323 value_range vrres = VR_INITIALIZER;
1324 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1325 vr0_, &vrtem1);
1326 vrp_meet (vr, &vrres);
1328 return;
1331 /* The type of the resulting value range defaults to VR0.TYPE. */
1332 type = vr0.type;
1334 /* Refuse to operate on VARYING ranges, ranges of different kinds
1335 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1336 because we may be able to derive a useful range even if one of
1337 the operands is VR_VARYING or symbolic range. Similarly for
1338 divisions, MIN/MAX and PLUS/MINUS.
1340 TODO, we may be able to derive anti-ranges in some cases. */
1341 if (code != BIT_AND_EXPR
1342 && code != BIT_IOR_EXPR
1343 && code != TRUNC_DIV_EXPR
1344 && code != FLOOR_DIV_EXPR
1345 && code != CEIL_DIV_EXPR
1346 && code != EXACT_DIV_EXPR
1347 && code != ROUND_DIV_EXPR
1348 && code != TRUNC_MOD_EXPR
1349 && code != MIN_EXPR
1350 && code != MAX_EXPR
1351 && code != PLUS_EXPR
1352 && code != MINUS_EXPR
1353 && code != RSHIFT_EXPR
1354 && code != POINTER_PLUS_EXPR
1355 && (vr0.type == VR_VARYING
1356 || vr1.type == VR_VARYING
1357 || vr0.type != vr1.type
1358 || symbolic_range_p (&vr0)
1359 || symbolic_range_p (&vr1)))
1361 set_value_range_to_varying (vr);
1362 return;
1365 /* Now evaluate the expression to determine the new range. */
1366 if (POINTER_TYPE_P (expr_type))
1368 if (code == MIN_EXPR || code == MAX_EXPR)
1370 /* For MIN/MAX expressions with pointers, we only care about
1371 nullness, if both are non null, then the result is nonnull.
1372 If both are null, then the result is null. Otherwise they
1373 are varying. */
1374 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1375 set_value_range_to_nonnull (vr, expr_type);
1376 else if (range_is_null (&vr0) && range_is_null (&vr1))
1377 set_value_range_to_null (vr, expr_type);
1378 else
1379 set_value_range_to_varying (vr);
1381 else if (code == POINTER_PLUS_EXPR)
1383 /* For pointer types, we are really only interested in asserting
1384 whether the expression evaluates to non-NULL. */
1385 if (!range_includes_zero_p (&vr0)
1386 || !range_includes_zero_p (&vr1))
1387 set_value_range_to_nonnull (vr, expr_type);
1388 else if (range_is_null (&vr0) && range_is_null (&vr1))
1389 set_value_range_to_null (vr, expr_type);
1390 else
1391 set_value_range_to_varying (vr);
1393 else if (code == BIT_AND_EXPR)
1395 /* For pointer types, we are really only interested in asserting
1396 whether the expression evaluates to non-NULL. */
1397 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1398 set_value_range_to_nonnull (vr, expr_type);
1399 else if (range_is_null (&vr0) || range_is_null (&vr1))
1400 set_value_range_to_null (vr, expr_type);
1401 else
1402 set_value_range_to_varying (vr);
1404 else
1405 set_value_range_to_varying (vr);
1407 return;
1410 /* For integer ranges, apply the operation to each end of the
1411 range and see what we end up with. */
1412 if (code == PLUS_EXPR || code == MINUS_EXPR)
1414 /* This will normalize things such that calculating
1415 [0,0] - VR_VARYING is not dropped to varying, but is
1416 calculated as [MIN+1, MAX]. */
1417 if (vr0.type == VR_VARYING)
1419 vr0.type = VR_RANGE;
1420 vr0.min = vrp_val_min (expr_type);
1421 vr0.max = vrp_val_max (expr_type);
1423 if (vr1.type == VR_VARYING)
1425 vr1.type = VR_RANGE;
1426 vr1.min = vrp_val_min (expr_type);
1427 vr1.max = vrp_val_max (expr_type);
1430 const bool minus_p = (code == MINUS_EXPR);
1431 tree min_op0 = vr0.min;
1432 tree min_op1 = minus_p ? vr1.max : vr1.min;
1433 tree max_op0 = vr0.max;
1434 tree max_op1 = minus_p ? vr1.min : vr1.max;
1435 tree sym_min_op0 = NULL_TREE;
1436 tree sym_min_op1 = NULL_TREE;
1437 tree sym_max_op0 = NULL_TREE;
1438 tree sym_max_op1 = NULL_TREE;
1439 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1441 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1443 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1444 single-symbolic ranges, try to compute the precise resulting range,
1445 but only if we know that this resulting range will also be constant
1446 or single-symbolic. */
1447 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1448 && (TREE_CODE (min_op0) == INTEGER_CST
1449 || (sym_min_op0
1450 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1451 && (TREE_CODE (min_op1) == INTEGER_CST
1452 || (sym_min_op1
1453 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1454 && (!(sym_min_op0 && sym_min_op1)
1455 || (sym_min_op0 == sym_min_op1
1456 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1457 && (TREE_CODE (max_op0) == INTEGER_CST
1458 || (sym_max_op0
1459 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1460 && (TREE_CODE (max_op1) == INTEGER_CST
1461 || (sym_max_op1
1462 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1463 && (!(sym_max_op0 && sym_max_op1)
1464 || (sym_max_op0 == sym_max_op1
1465 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1467 wide_int wmin, wmax;
1468 wi::overflow_type min_ovf = wi::OVF_NONE;
1469 wi::overflow_type max_ovf = wi::OVF_NONE;
1471 /* Build the bounds. */
1472 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1473 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1475 /* If we have overflow for the constant part and the resulting
1476 range will be symbolic, drop to VR_VARYING. */
1477 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1478 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1480 set_value_range_to_varying (vr);
1481 return;
1484 /* Adjust the range for possible overflow. */
1485 set_value_range_with_overflow (*vr, expr_type,
1486 wmin, wmax, min_ovf, max_ovf);
1487 if (vr->type == VR_VARYING)
1488 return;
1490 /* Build the symbolic bounds if needed. */
1491 adjust_symbolic_bound (vr->min, code, expr_type,
1492 sym_min_op0, sym_min_op1,
1493 neg_min_op0, neg_min_op1);
1494 adjust_symbolic_bound (vr->max, code, expr_type,
1495 sym_max_op0, sym_max_op1,
1496 neg_max_op0, neg_max_op1);
1497 /* ?? It would probably be cleaner to eliminate min/max/type
1498 entirely and hold these values in VR directly. */
1499 min = vr->min;
1500 max = vr->max;
1501 type = vr->type;
1503 else
1505 /* For other cases, for example if we have a PLUS_EXPR with two
1506 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1507 to compute a precise range for such a case.
1508 ??? General even mixed range kind operations can be expressed
1509 by for example transforming ~[3, 5] + [1, 2] to range-only
1510 operations and a union primitive:
1511 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1512 [-INF+1, 4] U [6, +INF(OVF)]
1513 though usually the union is not exactly representable with
1514 a single range or anti-range as the above is
1515 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1516 but one could use a scheme similar to equivalences for this. */
1517 set_value_range_to_varying (vr);
1518 return;
1521 else if (code == MIN_EXPR
1522 || code == MAX_EXPR)
1524 wide_int wmin, wmax;
1525 wide_int vr0_min, vr0_max;
1526 wide_int vr1_min, vr1_max;
1527 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1528 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1529 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1530 vr0_min, vr0_max, vr1_min, vr1_max))
1531 set_value_range (vr, VR_RANGE,
1532 wide_int_to_tree (expr_type, wmin),
1533 wide_int_to_tree (expr_type, wmax), NULL);
1534 else
1535 set_value_range_to_varying (vr);
1536 return;
1538 else if (code == MULT_EXPR)
1540 if (!range_int_cst_p (&vr0)
1541 || !range_int_cst_p (&vr1))
1543 set_value_range_to_varying (vr);
1544 return;
1546 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1547 return;
1549 else if (code == RSHIFT_EXPR
1550 || code == LSHIFT_EXPR)
1552 if (range_int_cst_p (&vr1)
1553 && !wide_int_range_shift_undefined_p (prec,
1554 wi::to_wide (vr1.min),
1555 wi::to_wide (vr1.max)))
1557 if (code == RSHIFT_EXPR)
1559 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1560 useful ranges just from the shift count. E.g.
1561 x >> 63 for signed 64-bit x is always [-1, 0]. */
1562 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1564 vr0.type = type = VR_RANGE;
1565 vr0.min = vrp_val_min (expr_type);
1566 vr0.max = vrp_val_max (expr_type);
1568 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1569 return;
1571 else if (code == LSHIFT_EXPR
1572 && range_int_cst_p (&vr0))
1574 wide_int res_lb, res_ub;
1575 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1576 wi::to_wide (vr0.min),
1577 wi::to_wide (vr0.max),
1578 wi::to_wide (vr1.min),
1579 wi::to_wide (vr1.max),
1580 TYPE_OVERFLOW_UNDEFINED (expr_type),
1581 TYPE_OVERFLOW_WRAPS (expr_type)))
1583 min = wide_int_to_tree (expr_type, res_lb);
1584 max = wide_int_to_tree (expr_type, res_ub);
1585 set_and_canonicalize_value_range (vr, VR_RANGE,
1586 min, max, NULL);
1587 return;
1591 set_value_range_to_varying (vr);
1592 return;
1594 else if (code == TRUNC_DIV_EXPR
1595 || code == FLOOR_DIV_EXPR
1596 || code == CEIL_DIV_EXPR
1597 || code == EXACT_DIV_EXPR
1598 || code == ROUND_DIV_EXPR)
1600 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1601 wide_int wmin, wmax, extra_min, extra_max;
1602 bool extra_range_p;
1604 /* Special case explicit division by zero as undefined. */
1605 if (range_is_null (&vr1))
1607 set_value_range_to_undefined (vr);
1608 return;
1611 /* First, normalize ranges into constants we can handle. Note
1612 that VR_ANTI_RANGE's of constants were already normalized
1613 before arriving here.
1615 NOTE: As a future improvement, we may be able to do better
1616 with mixed symbolic (anti-)ranges like [0, A]. See note in
1617 ranges_from_anti_range. */
1618 extract_range_into_wide_ints (&vr0, sign, prec,
1619 dividend_min, dividend_max);
1620 extract_range_into_wide_ints (&vr1, sign, prec,
1621 divisor_min, divisor_max);
1622 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1623 dividend_min, dividend_max,
1624 divisor_min, divisor_max,
1625 TYPE_OVERFLOW_UNDEFINED (expr_type),
1626 TYPE_OVERFLOW_WRAPS (expr_type),
1627 extra_range_p, extra_min, extra_max))
1629 set_value_range_to_varying (vr);
1630 return;
1632 set_value_range (vr, VR_RANGE,
1633 wide_int_to_tree (expr_type, wmin),
1634 wide_int_to_tree (expr_type, wmax), NULL);
1635 if (extra_range_p)
1637 value_range extra_range = VR_INITIALIZER;
1638 set_value_range (&extra_range, VR_RANGE,
1639 wide_int_to_tree (expr_type, extra_min),
1640 wide_int_to_tree (expr_type, extra_max), NULL);
1641 vrp_meet (vr, &extra_range);
1643 return;
1645 else if (code == TRUNC_MOD_EXPR)
1647 if (range_is_null (&vr1))
1649 set_value_range_to_undefined (vr);
1650 return;
1652 wide_int wmin, wmax, tmp;
1653 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1654 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1655 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1656 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1657 vr0_min, vr0_max, vr1_min, vr1_max);
1658 min = wide_int_to_tree (expr_type, wmin);
1659 max = wide_int_to_tree (expr_type, wmax);
1660 set_value_range (vr, VR_RANGE, min, max, NULL);
1661 return;
1663 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1665 wide_int may_be_nonzero0, may_be_nonzero1;
1666 wide_int must_be_nonzero0, must_be_nonzero1;
1667 wide_int wmin, wmax;
1668 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1669 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1670 &may_be_nonzero0, &must_be_nonzero0);
1671 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1672 &may_be_nonzero1, &must_be_nonzero1);
1673 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1674 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1675 if (code == BIT_AND_EXPR)
1677 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1678 vr0_min, vr0_max,
1679 vr1_min, vr1_max,
1680 must_be_nonzero0,
1681 may_be_nonzero0,
1682 must_be_nonzero1,
1683 may_be_nonzero1))
1685 min = wide_int_to_tree (expr_type, wmin);
1686 max = wide_int_to_tree (expr_type, wmax);
1687 set_value_range (vr, VR_RANGE, min, max, NULL);
1689 else
1690 set_value_range_to_varying (vr);
1691 return;
1693 else if (code == BIT_IOR_EXPR)
1695 if (wide_int_range_bit_ior (wmin, wmax, sign,
1696 vr0_min, vr0_max,
1697 vr1_min, vr1_max,
1698 must_be_nonzero0,
1699 may_be_nonzero0,
1700 must_be_nonzero1,
1701 may_be_nonzero1))
1703 min = wide_int_to_tree (expr_type, wmin);
1704 max = wide_int_to_tree (expr_type, wmax);
1705 set_value_range (vr, VR_RANGE, min, max, NULL);
1707 else
1708 set_value_range_to_varying (vr);
1709 return;
1711 else if (code == BIT_XOR_EXPR)
1713 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
1714 must_be_nonzero0,
1715 may_be_nonzero0,
1716 must_be_nonzero1,
1717 may_be_nonzero1))
1719 min = wide_int_to_tree (expr_type, wmin);
1720 max = wide_int_to_tree (expr_type, wmax);
1721 set_value_range (vr, VR_RANGE, min, max, NULL);
1723 else
1724 set_value_range_to_varying (vr);
1725 return;
1728 else
1729 gcc_unreachable ();
1731 /* If either MIN or MAX overflowed, then set the resulting range to
1732 VARYING. */
1733 if (min == NULL_TREE
1734 || TREE_OVERFLOW_P (min)
1735 || max == NULL_TREE
1736 || TREE_OVERFLOW_P (max))
1738 set_value_range_to_varying (vr);
1739 return;
1742 /* We punt for [-INF, +INF].
1743 We learn nothing when we have INF on both sides.
1744 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1745 if (vrp_val_is_min (min) && vrp_val_is_max (max))
1747 set_value_range_to_varying (vr);
1748 return;
1751 cmp = compare_values (min, max);
1752 if (cmp == -2 || cmp == 1)
1754 /* If the new range has its limits swapped around (MIN > MAX),
1755 then the operation caused one of them to wrap around, mark
1756 the new range VARYING. */
1757 set_value_range_to_varying (vr);
1759 else
1760 set_value_range (vr, type, min, max, NULL);
1763 /* Extract range information from a unary operation CODE based on
1764 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
1765 The resulting range is stored in *VR. */
1767 void
1768 extract_range_from_unary_expr (value_range *vr,
1769 enum tree_code code, tree type,
1770 const value_range *vr0_, tree op0_type)
1772 signop sign = TYPE_SIGN (type);
1773 unsigned int prec = TYPE_PRECISION (type);
1774 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1776 /* VRP only operates on integral and pointer types. */
1777 if (!(INTEGRAL_TYPE_P (op0_type)
1778 || POINTER_TYPE_P (op0_type))
1779 || !(INTEGRAL_TYPE_P (type)
1780 || POINTER_TYPE_P (type)))
1782 set_value_range_to_varying (vr);
1783 return;
1786 /* If VR0 is UNDEFINED, so is the result. */
1787 if (vr0.type == VR_UNDEFINED)
1789 set_value_range_to_undefined (vr);
1790 return;
1793 /* Handle operations that we express in terms of others. */
1794 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
1796 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
1797 copy_value_range (vr, &vr0);
1798 return;
1800 else if (code == NEGATE_EXPR)
1802 /* -X is simply 0 - X, so re-use existing code that also handles
1803 anti-ranges fine. */
1804 value_range zero = VR_INITIALIZER;
1805 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
1806 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
1807 return;
1809 else if (code == BIT_NOT_EXPR)
1811 /* ~X is simply -1 - X, so re-use existing code that also handles
1812 anti-ranges fine. */
1813 value_range minusone = VR_INITIALIZER;
1814 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
1815 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
1816 type, &minusone, &vr0);
1817 return;
1820 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1821 and express op ~[] as (op []') U (op []''). */
1822 if (vr0.type == VR_ANTI_RANGE
1823 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1825 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
1826 if (vrtem1.type != VR_UNDEFINED)
1828 value_range vrres = VR_INITIALIZER;
1829 extract_range_from_unary_expr (&vrres, code, type,
1830 &vrtem1, op0_type);
1831 vrp_meet (vr, &vrres);
1833 return;
1836 if (CONVERT_EXPR_CODE_P (code))
1838 tree inner_type = op0_type;
1839 tree outer_type = type;
1841 /* If the expression evaluates to a pointer, we are only interested in
1842 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
1843 if (POINTER_TYPE_P (type))
1845 if (!range_includes_zero_p (&vr0))
1846 set_value_range_to_nonnull (vr, type);
1847 else if (range_is_null (&vr0))
1848 set_value_range_to_null (vr, type);
1849 else
1850 set_value_range_to_varying (vr);
1851 return;
1854 /* We normalize everything to a VR_RANGE, but for constant
1855 anti-ranges we must handle them by leaving the final result
1856 as an anti range. This allows us to convert things like
1857 ~[0,5] seamlessly. */
1858 value_range_type vr_type = VR_RANGE;
1859 if (vr0.type == VR_ANTI_RANGE
1860 && TREE_CODE (vr0.min) == INTEGER_CST
1861 && TREE_CODE (vr0.max) == INTEGER_CST)
1862 vr_type = VR_ANTI_RANGE;
1864 /* NOTES: Previously we were returning VARYING for all symbolics, but
1865 we can do better by treating them as [-MIN, +MAX]. For
1866 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
1867 we can return: ~[0x8000000, 0xffffffff7fffffff].
1869 We were also failing to convert ~[0,0] from char* to unsigned,
1870 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
1871 wide_int vr0_min, vr0_max, wmin, wmax;
1872 signop inner_sign = TYPE_SIGN (inner_type);
1873 signop outer_sign = TYPE_SIGN (outer_type);
1874 unsigned inner_prec = TYPE_PRECISION (inner_type);
1875 unsigned outer_prec = TYPE_PRECISION (outer_type);
1876 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
1877 vr0_min, vr0_max);
1878 if (wide_int_range_convert (wmin, wmax,
1879 inner_sign, inner_prec,
1880 outer_sign, outer_prec,
1881 vr0_min, vr0_max))
1883 tree min = wide_int_to_tree (outer_type, wmin);
1884 tree max = wide_int_to_tree (outer_type, wmax);
1885 set_and_canonicalize_value_range (vr, vr_type, min, max, NULL);
1887 else
1888 set_value_range_to_varying (vr);
1889 return;
1891 else if (code == ABS_EXPR)
1893 wide_int wmin, wmax;
1894 wide_int vr0_min, vr0_max;
1895 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1896 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
1897 TYPE_OVERFLOW_UNDEFINED (type)))
1898 set_value_range (vr, VR_RANGE,
1899 wide_int_to_tree (type, wmin),
1900 wide_int_to_tree (type, wmax), NULL);
1901 else
1902 set_value_range_to_varying (vr);
1903 return;
1906 /* For unhandled operations fall back to varying. */
1907 set_value_range_to_varying (vr);
1908 return;
1911 /* Debugging dumps. */
1913 void dump_value_range (FILE *, const value_range *);
1914 void debug_value_range (const value_range *);
1915 void dump_all_value_ranges (FILE *);
1916 void dump_vr_equiv (FILE *, bitmap);
1917 void debug_vr_equiv (bitmap);
1920 /* Dump value range VR to FILE. */
1922 void
1923 dump_value_range (FILE *file, const value_range *vr)
1925 if (vr == NULL)
1926 fprintf (file, "[]");
1927 else if (vr->type == VR_UNDEFINED)
1928 fprintf (file, "UNDEFINED");
1929 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
1931 tree type = TREE_TYPE (vr->min);
1933 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
1935 if (INTEGRAL_TYPE_P (type)
1936 && !TYPE_UNSIGNED (type)
1937 && vrp_val_is_min (vr->min))
1938 fprintf (file, "-INF");
1939 else
1940 print_generic_expr (file, vr->min);
1942 fprintf (file, ", ");
1944 if (INTEGRAL_TYPE_P (type)
1945 && vrp_val_is_max (vr->max))
1946 fprintf (file, "+INF");
1947 else
1948 print_generic_expr (file, vr->max);
1950 fprintf (file, "]");
1952 if (vr->equiv)
1954 bitmap_iterator bi;
1955 unsigned i, c = 0;
1957 fprintf (file, " EQUIVALENCES: { ");
1959 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
1961 print_generic_expr (file, ssa_name (i));
1962 fprintf (file, " ");
1963 c++;
1966 fprintf (file, "} (%u elements)", c);
1969 else if (vr->type == VR_VARYING)
1970 fprintf (file, "VARYING");
1971 else
1972 fprintf (file, "INVALID RANGE");
1976 /* Dump value range VR to stderr. */
1978 DEBUG_FUNCTION void
1979 debug_value_range (const value_range *vr)
1981 dump_value_range (stderr, vr);
1982 fprintf (stderr, "\n");
1985 void
1986 value_range::dump () const
1988 debug_value_range (this);
1992 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1993 create a new SSA name N and return the assertion assignment
1994 'N = ASSERT_EXPR <V, V OP W>'. */
1996 static gimple *
1997 build_assert_expr_for (tree cond, tree v)
1999 tree a;
2000 gassign *assertion;
2002 gcc_assert (TREE_CODE (v) == SSA_NAME
2003 && COMPARISON_CLASS_P (cond));
2005 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2006 assertion = gimple_build_assign (NULL_TREE, a);
2008 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2009 operand of the ASSERT_EXPR. Create it so the new name and the old one
2010 are registered in the replacement table so that we can fix the SSA web
2011 after adding all the ASSERT_EXPRs. */
2012 tree new_def = create_new_def_for (v, assertion, NULL);
2013 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2014 given we have to be able to fully propagate those out to re-create
2015 valid SSA when removing the asserts. */
2016 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2017 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2019 return assertion;
2023 /* Return false if EXPR is a predicate expression involving floating
2024 point values. */
2026 static inline bool
2027 fp_predicate (gimple *stmt)
2029 GIMPLE_CHECK (stmt, GIMPLE_COND);
2031 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2034 /* If the range of values taken by OP can be inferred after STMT executes,
2035 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2036 describes the inferred range. Return true if a range could be
2037 inferred. */
2039 bool
2040 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2042 *val_p = NULL_TREE;
2043 *comp_code_p = ERROR_MARK;
2045 /* Do not attempt to infer anything in names that flow through
2046 abnormal edges. */
2047 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2048 return false;
2050 /* If STMT is the last statement of a basic block with no normal
2051 successors, there is no point inferring anything about any of its
2052 operands. We would not be able to find a proper insertion point
2053 for the assertion, anyway. */
2054 if (stmt_ends_bb_p (stmt))
2056 edge_iterator ei;
2057 edge e;
2059 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2060 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2061 break;
2062 if (e == NULL)
2063 return false;
2066 if (infer_nonnull_range (stmt, op))
2068 *val_p = build_int_cst (TREE_TYPE (op), 0);
2069 *comp_code_p = NE_EXPR;
2070 return true;
2073 return false;
2077 void dump_asserts_for (FILE *, tree);
2078 void debug_asserts_for (tree);
2079 void dump_all_asserts (FILE *);
2080 void debug_all_asserts (void);
2082 /* Dump all the registered assertions for NAME to FILE. */
2084 void
2085 dump_asserts_for (FILE *file, tree name)
2087 assert_locus *loc;
2089 fprintf (file, "Assertions to be inserted for ");
2090 print_generic_expr (file, name);
2091 fprintf (file, "\n");
2093 loc = asserts_for[SSA_NAME_VERSION (name)];
2094 while (loc)
2096 fprintf (file, "\t");
2097 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2098 fprintf (file, "\n\tBB #%d", loc->bb->index);
2099 if (loc->e)
2101 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2102 loc->e->dest->index);
2103 dump_edge_info (file, loc->e, dump_flags, 0);
2105 fprintf (file, "\n\tPREDICATE: ");
2106 print_generic_expr (file, loc->expr);
2107 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2108 print_generic_expr (file, loc->val);
2109 fprintf (file, "\n\n");
2110 loc = loc->next;
2113 fprintf (file, "\n");
2117 /* Dump all the registered assertions for NAME to stderr. */
2119 DEBUG_FUNCTION void
2120 debug_asserts_for (tree name)
2122 dump_asserts_for (stderr, name);
2126 /* Dump all the registered assertions for all the names to FILE. */
2128 void
2129 dump_all_asserts (FILE *file)
2131 unsigned i;
2132 bitmap_iterator bi;
2134 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2135 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2136 dump_asserts_for (file, ssa_name (i));
2137 fprintf (file, "\n");
2141 /* Dump all the registered assertions for all the names to stderr. */
2143 DEBUG_FUNCTION void
2144 debug_all_asserts (void)
2146 dump_all_asserts (stderr);
2149 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2151 static void
2152 add_assert_info (vec<assert_info> &asserts,
2153 tree name, tree expr, enum tree_code comp_code, tree val)
2155 assert_info info;
2156 info.comp_code = comp_code;
2157 info.name = name;
2158 if (TREE_OVERFLOW_P (val))
2159 val = drop_tree_overflow (val);
2160 info.val = val;
2161 info.expr = expr;
2162 asserts.safe_push (info);
2165 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2166 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2167 E->DEST, then register this location as a possible insertion point
2168 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2170 BB, E and SI provide the exact insertion point for the new
2171 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2172 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2173 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2174 must not be NULL. */
2176 static void
2177 register_new_assert_for (tree name, tree expr,
2178 enum tree_code comp_code,
2179 tree val,
2180 basic_block bb,
2181 edge e,
2182 gimple_stmt_iterator si)
2184 assert_locus *n, *loc, *last_loc;
2185 basic_block dest_bb;
2187 gcc_checking_assert (bb == NULL || e == NULL);
2189 if (e == NULL)
2190 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2191 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2193 /* Never build an assert comparing against an integer constant with
2194 TREE_OVERFLOW set. This confuses our undefined overflow warning
2195 machinery. */
2196 if (TREE_OVERFLOW_P (val))
2197 val = drop_tree_overflow (val);
2199 /* The new assertion A will be inserted at BB or E. We need to
2200 determine if the new location is dominated by a previously
2201 registered location for A. If we are doing an edge insertion,
2202 assume that A will be inserted at E->DEST. Note that this is not
2203 necessarily true.
2205 If E is a critical edge, it will be split. But even if E is
2206 split, the new block will dominate the same set of blocks that
2207 E->DEST dominates.
2209 The reverse, however, is not true, blocks dominated by E->DEST
2210 will not be dominated by the new block created to split E. So,
2211 if the insertion location is on a critical edge, we will not use
2212 the new location to move another assertion previously registered
2213 at a block dominated by E->DEST. */
2214 dest_bb = (bb) ? bb : e->dest;
2216 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2217 VAL at a block dominating DEST_BB, then we don't need to insert a new
2218 one. Similarly, if the same assertion already exists at a block
2219 dominated by DEST_BB and the new location is not on a critical
2220 edge, then update the existing location for the assertion (i.e.,
2221 move the assertion up in the dominance tree).
2223 Note, this is implemented as a simple linked list because there
2224 should not be more than a handful of assertions registered per
2225 name. If this becomes a performance problem, a table hashed by
2226 COMP_CODE and VAL could be implemented. */
2227 loc = asserts_for[SSA_NAME_VERSION (name)];
2228 last_loc = loc;
2229 while (loc)
2231 if (loc->comp_code == comp_code
2232 && (loc->val == val
2233 || operand_equal_p (loc->val, val, 0))
2234 && (loc->expr == expr
2235 || operand_equal_p (loc->expr, expr, 0)))
2237 /* If E is not a critical edge and DEST_BB
2238 dominates the existing location for the assertion, move
2239 the assertion up in the dominance tree by updating its
2240 location information. */
2241 if ((e == NULL || !EDGE_CRITICAL_P (e))
2242 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2244 loc->bb = dest_bb;
2245 loc->e = e;
2246 loc->si = si;
2247 return;
2251 /* Update the last node of the list and move to the next one. */
2252 last_loc = loc;
2253 loc = loc->next;
2256 /* If we didn't find an assertion already registered for
2257 NAME COMP_CODE VAL, add a new one at the end of the list of
2258 assertions associated with NAME. */
2259 n = XNEW (struct assert_locus);
2260 n->bb = dest_bb;
2261 n->e = e;
2262 n->si = si;
2263 n->comp_code = comp_code;
2264 n->val = val;
2265 n->expr = expr;
2266 n->next = NULL;
2268 if (last_loc)
2269 last_loc->next = n;
2270 else
2271 asserts_for[SSA_NAME_VERSION (name)] = n;
2273 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2276 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2277 Extract a suitable test code and value and store them into *CODE_P and
2278 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2280 If no extraction was possible, return FALSE, otherwise return TRUE.
2282 If INVERT is true, then we invert the result stored into *CODE_P. */
2284 static bool
2285 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2286 tree cond_op0, tree cond_op1,
2287 bool invert, enum tree_code *code_p,
2288 tree *val_p)
2290 enum tree_code comp_code;
2291 tree val;
2293 /* Otherwise, we have a comparison of the form NAME COMP VAL
2294 or VAL COMP NAME. */
2295 if (name == cond_op1)
2297 /* If the predicate is of the form VAL COMP NAME, flip
2298 COMP around because we need to register NAME as the
2299 first operand in the predicate. */
2300 comp_code = swap_tree_comparison (cond_code);
2301 val = cond_op0;
2303 else if (name == cond_op0)
2305 /* The comparison is of the form NAME COMP VAL, so the
2306 comparison code remains unchanged. */
2307 comp_code = cond_code;
2308 val = cond_op1;
2310 else
2311 gcc_unreachable ();
2313 /* Invert the comparison code as necessary. */
2314 if (invert)
2315 comp_code = invert_tree_comparison (comp_code, 0);
2317 /* VRP only handles integral and pointer types. */
2318 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2319 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2320 return false;
2322 /* Do not register always-false predicates.
2323 FIXME: this works around a limitation in fold() when dealing with
2324 enumerations. Given 'enum { N1, N2 } x;', fold will not
2325 fold 'if (x > N2)' to 'if (0)'. */
2326 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2327 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2329 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2330 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2332 if (comp_code == GT_EXPR
2333 && (!max
2334 || compare_values (val, max) == 0))
2335 return false;
2337 if (comp_code == LT_EXPR
2338 && (!min
2339 || compare_values (val, min) == 0))
2340 return false;
2342 *code_p = comp_code;
2343 *val_p = val;
2344 return true;
2347 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2348 (otherwise return VAL). VAL and MASK must be zero-extended for
2349 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2350 (to transform signed values into unsigned) and at the end xor
2351 SGNBIT back. */
2353 static wide_int
2354 masked_increment (const wide_int &val_in, const wide_int &mask,
2355 const wide_int &sgnbit, unsigned int prec)
2357 wide_int bit = wi::one (prec), res;
2358 unsigned int i;
2360 wide_int val = val_in ^ sgnbit;
2361 for (i = 0; i < prec; i++, bit += bit)
2363 res = mask;
2364 if ((res & bit) == 0)
2365 continue;
2366 res = bit - 1;
2367 res = wi::bit_and_not (val + bit, res);
2368 res &= mask;
2369 if (wi::gtu_p (res, val))
2370 return res ^ sgnbit;
2372 return val ^ sgnbit;
2375 /* Helper for overflow_comparison_p
2377 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2378 OP1's defining statement to see if it ultimately has the form
2379 OP0 CODE (OP0 PLUS INTEGER_CST)
2381 If so, return TRUE indicating this is an overflow test and store into
2382 *NEW_CST an updated constant that can be used in a narrowed range test.
2384 REVERSED indicates if the comparison was originally:
2386 OP1 CODE' OP0.
2388 This affects how we build the updated constant. */
2390 static bool
2391 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2392 bool follow_assert_exprs, bool reversed, tree *new_cst)
2394 /* See if this is a relational operation between two SSA_NAMES with
2395 unsigned, overflow wrapping values. If so, check it more deeply. */
2396 if ((code == LT_EXPR || code == LE_EXPR
2397 || code == GE_EXPR || code == GT_EXPR)
2398 && TREE_CODE (op0) == SSA_NAME
2399 && TREE_CODE (op1) == SSA_NAME
2400 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2401 && TYPE_UNSIGNED (TREE_TYPE (op0))
2402 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2404 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2406 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2407 if (follow_assert_exprs)
2409 while (gimple_assign_single_p (op1_def)
2410 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2412 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2413 if (TREE_CODE (op1) != SSA_NAME)
2414 break;
2415 op1_def = SSA_NAME_DEF_STMT (op1);
2419 /* Now look at the defining statement of OP1 to see if it adds
2420 or subtracts a nonzero constant from another operand. */
2421 if (op1_def
2422 && is_gimple_assign (op1_def)
2423 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2424 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2425 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2427 tree target = gimple_assign_rhs1 (op1_def);
2429 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2430 for one where TARGET appears on the RHS. */
2431 if (follow_assert_exprs)
2433 /* Now see if that "other operand" is op0, following the chain
2434 of ASSERT_EXPRs if necessary. */
2435 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2436 while (op0 != target
2437 && gimple_assign_single_p (op0_def)
2438 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2440 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2441 if (TREE_CODE (op0) != SSA_NAME)
2442 break;
2443 op0_def = SSA_NAME_DEF_STMT (op0);
2447 /* If we did not find our target SSA_NAME, then this is not
2448 an overflow test. */
2449 if (op0 != target)
2450 return false;
2452 tree type = TREE_TYPE (op0);
2453 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2454 tree inc = gimple_assign_rhs2 (op1_def);
2455 if (reversed)
2456 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2457 else
2458 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2459 return true;
2462 return false;
2465 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2466 OP1's defining statement to see if it ultimately has the form
2467 OP0 CODE (OP0 PLUS INTEGER_CST)
2469 If so, return TRUE indicating this is an overflow test and store into
2470 *NEW_CST an updated constant that can be used in a narrowed range test.
2472 These statements are left as-is in the IL to facilitate discovery of
2473 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2474 the alternate range representation is often useful within VRP. */
2476 bool
2477 overflow_comparison_p (tree_code code, tree name, tree val,
2478 bool use_equiv_p, tree *new_cst)
2480 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2481 return true;
2482 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2483 use_equiv_p, true, new_cst);
2487 /* Try to register an edge assertion for SSA name NAME on edge E for
2488 the condition COND contributing to the conditional jump pointed to by BSI.
2489 Invert the condition COND if INVERT is true. */
2491 static void
2492 register_edge_assert_for_2 (tree name, edge e,
2493 enum tree_code cond_code,
2494 tree cond_op0, tree cond_op1, bool invert,
2495 vec<assert_info> &asserts)
2497 tree val;
2498 enum tree_code comp_code;
2500 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2501 cond_op0,
2502 cond_op1,
2503 invert, &comp_code, &val))
2504 return;
2506 /* Queue the assert. */
2507 tree x;
2508 if (overflow_comparison_p (comp_code, name, val, false, &x))
2510 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2511 ? GT_EXPR : LE_EXPR);
2512 add_assert_info (asserts, name, name, new_code, x);
2514 add_assert_info (asserts, name, name, comp_code, val);
2516 /* In the case of NAME <= CST and NAME being defined as
2517 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2518 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2519 This catches range and anti-range tests. */
2520 if ((comp_code == LE_EXPR
2521 || comp_code == GT_EXPR)
2522 && TREE_CODE (val) == INTEGER_CST
2523 && TYPE_UNSIGNED (TREE_TYPE (val)))
2525 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2526 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2528 /* Extract CST2 from the (optional) addition. */
2529 if (is_gimple_assign (def_stmt)
2530 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2532 name2 = gimple_assign_rhs1 (def_stmt);
2533 cst2 = gimple_assign_rhs2 (def_stmt);
2534 if (TREE_CODE (name2) == SSA_NAME
2535 && TREE_CODE (cst2) == INTEGER_CST)
2536 def_stmt = SSA_NAME_DEF_STMT (name2);
2539 /* Extract NAME2 from the (optional) sign-changing cast. */
2540 if (gimple_assign_cast_p (def_stmt))
2542 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2543 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2544 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2545 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2546 name3 = gimple_assign_rhs1 (def_stmt);
2549 /* If name3 is used later, create an ASSERT_EXPR for it. */
2550 if (name3 != NULL_TREE
2551 && TREE_CODE (name3) == SSA_NAME
2552 && (cst2 == NULL_TREE
2553 || TREE_CODE (cst2) == INTEGER_CST)
2554 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2556 tree tmp;
2558 /* Build an expression for the range test. */
2559 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2560 if (cst2 != NULL_TREE)
2561 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2563 if (dump_file)
2565 fprintf (dump_file, "Adding assert for ");
2566 print_generic_expr (dump_file, name3);
2567 fprintf (dump_file, " from ");
2568 print_generic_expr (dump_file, tmp);
2569 fprintf (dump_file, "\n");
2572 add_assert_info (asserts, name3, tmp, comp_code, val);
2575 /* If name2 is used later, create an ASSERT_EXPR for it. */
2576 if (name2 != NULL_TREE
2577 && TREE_CODE (name2) == SSA_NAME
2578 && TREE_CODE (cst2) == INTEGER_CST
2579 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2581 tree tmp;
2583 /* Build an expression for the range test. */
2584 tmp = name2;
2585 if (TREE_TYPE (name) != TREE_TYPE (name2))
2586 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2587 if (cst2 != NULL_TREE)
2588 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2590 if (dump_file)
2592 fprintf (dump_file, "Adding assert for ");
2593 print_generic_expr (dump_file, name2);
2594 fprintf (dump_file, " from ");
2595 print_generic_expr (dump_file, tmp);
2596 fprintf (dump_file, "\n");
2599 add_assert_info (asserts, name2, tmp, comp_code, val);
2603 /* In the case of post-in/decrement tests like if (i++) ... and uses
2604 of the in/decremented value on the edge the extra name we want to
2605 assert for is not on the def chain of the name compared. Instead
2606 it is in the set of use stmts.
2607 Similar cases happen for conversions that were simplified through
2608 fold_{sign_changed,widened}_comparison. */
2609 if ((comp_code == NE_EXPR
2610 || comp_code == EQ_EXPR)
2611 && TREE_CODE (val) == INTEGER_CST)
2613 imm_use_iterator ui;
2614 gimple *use_stmt;
2615 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2617 if (!is_gimple_assign (use_stmt))
2618 continue;
2620 /* Cut off to use-stmts that are dominating the predecessor. */
2621 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2622 continue;
2624 tree name2 = gimple_assign_lhs (use_stmt);
2625 if (TREE_CODE (name2) != SSA_NAME)
2626 continue;
2628 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2629 tree cst;
2630 if (code == PLUS_EXPR
2631 || code == MINUS_EXPR)
2633 cst = gimple_assign_rhs2 (use_stmt);
2634 if (TREE_CODE (cst) != INTEGER_CST)
2635 continue;
2636 cst = int_const_binop (code, val, cst);
2638 else if (CONVERT_EXPR_CODE_P (code))
2640 /* For truncating conversions we cannot record
2641 an inequality. */
2642 if (comp_code == NE_EXPR
2643 && (TYPE_PRECISION (TREE_TYPE (name2))
2644 < TYPE_PRECISION (TREE_TYPE (name))))
2645 continue;
2646 cst = fold_convert (TREE_TYPE (name2), val);
2648 else
2649 continue;
2651 if (TREE_OVERFLOW_P (cst))
2652 cst = drop_tree_overflow (cst);
2653 add_assert_info (asserts, name2, name2, comp_code, cst);
2657 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2658 && TREE_CODE (val) == INTEGER_CST)
2660 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2661 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2662 tree val2 = NULL_TREE;
2663 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2664 wide_int mask = wi::zero (prec);
2665 unsigned int nprec = prec;
2666 enum tree_code rhs_code = ERROR_MARK;
2668 if (is_gimple_assign (def_stmt))
2669 rhs_code = gimple_assign_rhs_code (def_stmt);
2671 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2672 assert that A != CST1 -+ CST2. */
2673 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2674 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2676 tree op0 = gimple_assign_rhs1 (def_stmt);
2677 tree op1 = gimple_assign_rhs2 (def_stmt);
2678 if (TREE_CODE (op0) == SSA_NAME
2679 && TREE_CODE (op1) == INTEGER_CST)
2681 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2682 ? MINUS_EXPR : PLUS_EXPR);
2683 op1 = int_const_binop (reverse_op, val, op1);
2684 if (TREE_OVERFLOW (op1))
2685 op1 = drop_tree_overflow (op1);
2686 add_assert_info (asserts, op0, op0, comp_code, op1);
2690 /* Add asserts for NAME cmp CST and NAME being defined
2691 as NAME = (int) NAME2. */
2692 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2693 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2694 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2695 && gimple_assign_cast_p (def_stmt))
2697 name2 = gimple_assign_rhs1 (def_stmt);
2698 if (CONVERT_EXPR_CODE_P (rhs_code)
2699 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2700 && TYPE_UNSIGNED (TREE_TYPE (name2))
2701 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2702 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2703 || !tree_int_cst_equal (val,
2704 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2706 tree tmp, cst;
2707 enum tree_code new_comp_code = comp_code;
2709 cst = fold_convert (TREE_TYPE (name2),
2710 TYPE_MIN_VALUE (TREE_TYPE (val)));
2711 /* Build an expression for the range test. */
2712 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2713 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2714 fold_convert (TREE_TYPE (name2), val));
2715 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2717 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2718 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2719 build_int_cst (TREE_TYPE (name2), 1));
2722 if (dump_file)
2724 fprintf (dump_file, "Adding assert for ");
2725 print_generic_expr (dump_file, name2);
2726 fprintf (dump_file, " from ");
2727 print_generic_expr (dump_file, tmp);
2728 fprintf (dump_file, "\n");
2731 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2735 /* Add asserts for NAME cmp CST and NAME being defined as
2736 NAME = NAME2 >> CST2.
2738 Extract CST2 from the right shift. */
2739 if (rhs_code == RSHIFT_EXPR)
2741 name2 = gimple_assign_rhs1 (def_stmt);
2742 cst2 = gimple_assign_rhs2 (def_stmt);
2743 if (TREE_CODE (name2) == SSA_NAME
2744 && tree_fits_uhwi_p (cst2)
2745 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2746 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2747 && type_has_mode_precision_p (TREE_TYPE (val)))
2749 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2750 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2753 if (val2 != NULL_TREE
2754 && TREE_CODE (val2) == INTEGER_CST
2755 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2756 TREE_TYPE (val),
2757 val2, cst2), val))
2759 enum tree_code new_comp_code = comp_code;
2760 tree tmp, new_val;
2762 tmp = name2;
2763 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2765 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2767 tree type = build_nonstandard_integer_type (prec, 1);
2768 tmp = build1 (NOP_EXPR, type, name2);
2769 val2 = fold_convert (type, val2);
2771 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2772 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2773 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2775 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2777 wide_int minval
2778 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2779 new_val = val2;
2780 if (minval == wi::to_wide (new_val))
2781 new_val = NULL_TREE;
2783 else
2785 wide_int maxval
2786 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2787 mask |= wi::to_wide (val2);
2788 if (wi::eq_p (mask, maxval))
2789 new_val = NULL_TREE;
2790 else
2791 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2794 if (new_val)
2796 if (dump_file)
2798 fprintf (dump_file, "Adding assert for ");
2799 print_generic_expr (dump_file, name2);
2800 fprintf (dump_file, " from ");
2801 print_generic_expr (dump_file, tmp);
2802 fprintf (dump_file, "\n");
2805 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2809 /* Add asserts for NAME cmp CST and NAME being defined as
2810 NAME = NAME2 & CST2.
2812 Extract CST2 from the and.
2814 Also handle
2815 NAME = (unsigned) NAME2;
2816 casts where NAME's type is unsigned and has smaller precision
2817 than NAME2's type as if it was NAME = NAME2 & MASK. */
2818 names[0] = NULL_TREE;
2819 names[1] = NULL_TREE;
2820 cst2 = NULL_TREE;
2821 if (rhs_code == BIT_AND_EXPR
2822 || (CONVERT_EXPR_CODE_P (rhs_code)
2823 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2824 && TYPE_UNSIGNED (TREE_TYPE (val))
2825 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2826 > prec))
2828 name2 = gimple_assign_rhs1 (def_stmt);
2829 if (rhs_code == BIT_AND_EXPR)
2830 cst2 = gimple_assign_rhs2 (def_stmt);
2831 else
2833 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2834 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2836 if (TREE_CODE (name2) == SSA_NAME
2837 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2838 && TREE_CODE (cst2) == INTEGER_CST
2839 && !integer_zerop (cst2)
2840 && (nprec > 1
2841 || TYPE_UNSIGNED (TREE_TYPE (val))))
2843 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2844 if (gimple_assign_cast_p (def_stmt2))
2846 names[1] = gimple_assign_rhs1 (def_stmt2);
2847 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2848 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2849 || (TYPE_PRECISION (TREE_TYPE (name2))
2850 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2851 names[1] = NULL_TREE;
2853 names[0] = name2;
2856 if (names[0] || names[1])
2858 wide_int minv, maxv, valv, cst2v;
2859 wide_int tem, sgnbit;
2860 bool valid_p = false, valn, cst2n;
2861 enum tree_code ccode = comp_code;
2863 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2864 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2865 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2866 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2867 /* If CST2 doesn't have most significant bit set,
2868 but VAL is negative, we have comparison like
2869 if ((x & 0x123) > -4) (always true). Just give up. */
2870 if (!cst2n && valn)
2871 ccode = ERROR_MARK;
2872 if (cst2n)
2873 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2874 else
2875 sgnbit = wi::zero (nprec);
2876 minv = valv & cst2v;
2877 switch (ccode)
2879 case EQ_EXPR:
2880 /* Minimum unsigned value for equality is VAL & CST2
2881 (should be equal to VAL, otherwise we probably should
2882 have folded the comparison into false) and
2883 maximum unsigned value is VAL | ~CST2. */
2884 maxv = valv | ~cst2v;
2885 valid_p = true;
2886 break;
2888 case NE_EXPR:
2889 tem = valv | ~cst2v;
2890 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2891 if (valv == 0)
2893 cst2n = false;
2894 sgnbit = wi::zero (nprec);
2895 goto gt_expr;
2897 /* If (VAL | ~CST2) is all ones, handle it as
2898 (X & CST2) < VAL. */
2899 if (tem == -1)
2901 cst2n = false;
2902 valn = false;
2903 sgnbit = wi::zero (nprec);
2904 goto lt_expr;
2906 if (!cst2n && wi::neg_p (cst2v))
2907 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2908 if (sgnbit != 0)
2910 if (valv == sgnbit)
2912 cst2n = true;
2913 valn = true;
2914 goto gt_expr;
2916 if (tem == wi::mask (nprec - 1, false, nprec))
2918 cst2n = true;
2919 goto lt_expr;
2921 if (!cst2n)
2922 sgnbit = wi::zero (nprec);
2924 break;
2926 case GE_EXPR:
2927 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2928 is VAL and maximum unsigned value is ~0. For signed
2929 comparison, if CST2 doesn't have most significant bit
2930 set, handle it similarly. If CST2 has MSB set,
2931 the minimum is the same, and maximum is ~0U/2. */
2932 if (minv != valv)
2934 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2935 VAL. */
2936 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2937 if (minv == valv)
2938 break;
2940 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2941 valid_p = true;
2942 break;
2944 case GT_EXPR:
2945 gt_expr:
2946 /* Find out smallest MINV where MINV > VAL
2947 && (MINV & CST2) == MINV, if any. If VAL is signed and
2948 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2949 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2950 if (minv == valv)
2951 break;
2952 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2953 valid_p = true;
2954 break;
2956 case LE_EXPR:
2957 /* Minimum unsigned value for <= is 0 and maximum
2958 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2959 Otherwise, find smallest VAL2 where VAL2 > VAL
2960 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2961 as maximum.
2962 For signed comparison, if CST2 doesn't have most
2963 significant bit set, handle it similarly. If CST2 has
2964 MSB set, the maximum is the same and minimum is INT_MIN. */
2965 if (minv == valv)
2966 maxv = valv;
2967 else
2969 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2970 if (maxv == valv)
2971 break;
2972 maxv -= 1;
2974 maxv |= ~cst2v;
2975 minv = sgnbit;
2976 valid_p = true;
2977 break;
2979 case LT_EXPR:
2980 lt_expr:
2981 /* Minimum unsigned value for < is 0 and maximum
2982 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2983 Otherwise, find smallest VAL2 where VAL2 > VAL
2984 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2985 as maximum.
2986 For signed comparison, if CST2 doesn't have most
2987 significant bit set, handle it similarly. If CST2 has
2988 MSB set, the maximum is the same and minimum is INT_MIN. */
2989 if (minv == valv)
2991 if (valv == sgnbit)
2992 break;
2993 maxv = valv;
2995 else
2997 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2998 if (maxv == valv)
2999 break;
3001 maxv -= 1;
3002 maxv |= ~cst2v;
3003 minv = sgnbit;
3004 valid_p = true;
3005 break;
3007 default:
3008 break;
3010 if (valid_p
3011 && (maxv - minv) != -1)
3013 tree tmp, new_val, type;
3014 int i;
3016 for (i = 0; i < 2; i++)
3017 if (names[i])
3019 wide_int maxv2 = maxv;
3020 tmp = names[i];
3021 type = TREE_TYPE (names[i]);
3022 if (!TYPE_UNSIGNED (type))
3024 type = build_nonstandard_integer_type (nprec, 1);
3025 tmp = build1 (NOP_EXPR, type, names[i]);
3027 if (minv != 0)
3029 tmp = build2 (PLUS_EXPR, type, tmp,
3030 wide_int_to_tree (type, -minv));
3031 maxv2 = maxv - minv;
3033 new_val = wide_int_to_tree (type, maxv2);
3035 if (dump_file)
3037 fprintf (dump_file, "Adding assert for ");
3038 print_generic_expr (dump_file, names[i]);
3039 fprintf (dump_file, " from ");
3040 print_generic_expr (dump_file, tmp);
3041 fprintf (dump_file, "\n");
3044 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3051 /* OP is an operand of a truth value expression which is known to have
3052 a particular value. Register any asserts for OP and for any
3053 operands in OP's defining statement.
3055 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3056 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3058 static void
3059 register_edge_assert_for_1 (tree op, enum tree_code code,
3060 edge e, vec<assert_info> &asserts)
3062 gimple *op_def;
3063 tree val;
3064 enum tree_code rhs_code;
3066 /* We only care about SSA_NAMEs. */
3067 if (TREE_CODE (op) != SSA_NAME)
3068 return;
3070 /* We know that OP will have a zero or nonzero value. */
3071 val = build_int_cst (TREE_TYPE (op), 0);
3072 add_assert_info (asserts, op, op, code, val);
3074 /* Now look at how OP is set. If it's set from a comparison,
3075 a truth operation or some bit operations, then we may be able
3076 to register information about the operands of that assignment. */
3077 op_def = SSA_NAME_DEF_STMT (op);
3078 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3079 return;
3081 rhs_code = gimple_assign_rhs_code (op_def);
3083 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3085 bool invert = (code == EQ_EXPR ? true : false);
3086 tree op0 = gimple_assign_rhs1 (op_def);
3087 tree op1 = gimple_assign_rhs2 (op_def);
3089 if (TREE_CODE (op0) == SSA_NAME)
3090 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3091 if (TREE_CODE (op1) == SSA_NAME)
3092 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3094 else if ((code == NE_EXPR
3095 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3096 || (code == EQ_EXPR
3097 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3099 /* Recurse on each operand. */
3100 tree op0 = gimple_assign_rhs1 (op_def);
3101 tree op1 = gimple_assign_rhs2 (op_def);
3102 if (TREE_CODE (op0) == SSA_NAME
3103 && has_single_use (op0))
3104 register_edge_assert_for_1 (op0, code, e, asserts);
3105 if (TREE_CODE (op1) == SSA_NAME
3106 && has_single_use (op1))
3107 register_edge_assert_for_1 (op1, code, e, asserts);
3109 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3110 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3112 /* Recurse, flipping CODE. */
3113 code = invert_tree_comparison (code, false);
3114 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3116 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3118 /* Recurse through the copy. */
3119 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3121 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3123 /* Recurse through the type conversion, unless it is a narrowing
3124 conversion or conversion from non-integral type. */
3125 tree rhs = gimple_assign_rhs1 (op_def);
3126 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3127 && (TYPE_PRECISION (TREE_TYPE (rhs))
3128 <= TYPE_PRECISION (TREE_TYPE (op))))
3129 register_edge_assert_for_1 (rhs, code, e, asserts);
3133 /* Check if comparison
3134 NAME COND_OP INTEGER_CST
3135 has a form of
3136 (X & 11...100..0) COND_OP XX...X00...0
3137 Such comparison can yield assertions like
3138 X >= XX...X00...0
3139 X <= XX...X11...1
3140 in case of COND_OP being EQ_EXPR or
3141 X < XX...X00...0
3142 X > XX...X11...1
3143 in case of NE_EXPR. */
3145 static bool
3146 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3147 tree *new_name, tree *low, enum tree_code *low_code,
3148 tree *high, enum tree_code *high_code)
3150 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3152 if (!is_gimple_assign (def_stmt)
3153 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3154 return false;
3156 tree t = gimple_assign_rhs1 (def_stmt);
3157 tree maskt = gimple_assign_rhs2 (def_stmt);
3158 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3159 return false;
3161 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3162 wide_int inv_mask = ~mask;
3163 /* Must have been removed by now so don't bother optimizing. */
3164 if (mask == 0 || inv_mask == 0)
3165 return false;
3167 /* Assume VALT is INTEGER_CST. */
3168 wi::tree_to_wide_ref val = wi::to_wide (valt);
3170 if ((inv_mask & (inv_mask + 1)) != 0
3171 || (val & mask) != val)
3172 return false;
3174 bool is_range = cond_code == EQ_EXPR;
3176 tree type = TREE_TYPE (t);
3177 wide_int min = wi::min_value (type),
3178 max = wi::max_value (type);
3180 if (is_range)
3182 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3183 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3185 else
3187 /* We can still generate assertion if one of alternatives
3188 is known to always be false. */
3189 if (val == min)
3191 *low_code = (enum tree_code) 0;
3192 *high_code = GT_EXPR;
3194 else if ((val | inv_mask) == max)
3196 *low_code = LT_EXPR;
3197 *high_code = (enum tree_code) 0;
3199 else
3200 return false;
3203 *new_name = t;
3204 *low = wide_int_to_tree (type, val);
3205 *high = wide_int_to_tree (type, val | inv_mask);
3207 return true;
3210 /* Try to register an edge assertion for SSA name NAME on edge E for
3211 the condition COND contributing to the conditional jump pointed to by
3212 SI. */
3214 void
3215 register_edge_assert_for (tree name, edge e,
3216 enum tree_code cond_code, tree cond_op0,
3217 tree cond_op1, vec<assert_info> &asserts)
3219 tree val;
3220 enum tree_code comp_code;
3221 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3223 /* Do not attempt to infer anything in names that flow through
3224 abnormal edges. */
3225 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3226 return;
3228 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3229 cond_op0, cond_op1,
3230 is_else_edge,
3231 &comp_code, &val))
3232 return;
3234 /* Register ASSERT_EXPRs for name. */
3235 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3236 cond_op1, is_else_edge, asserts);
3239 /* If COND is effectively an equality test of an SSA_NAME against
3240 the value zero or one, then we may be able to assert values
3241 for SSA_NAMEs which flow into COND. */
3243 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3244 statement of NAME we can assert both operands of the BIT_AND_EXPR
3245 have nonzero value. */
3246 if (((comp_code == EQ_EXPR && integer_onep (val))
3247 || (comp_code == NE_EXPR && integer_zerop (val))))
3249 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3251 if (is_gimple_assign (def_stmt)
3252 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3254 tree op0 = gimple_assign_rhs1 (def_stmt);
3255 tree op1 = gimple_assign_rhs2 (def_stmt);
3256 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3257 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3261 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3262 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3263 have zero value. */
3264 if (((comp_code == EQ_EXPR && integer_zerop (val))
3265 || (comp_code == NE_EXPR && integer_onep (val))))
3267 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3269 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3270 necessarily zero value, or if type-precision is one. */
3271 if (is_gimple_assign (def_stmt)
3272 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3273 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3274 || comp_code == EQ_EXPR)))
3276 tree op0 = gimple_assign_rhs1 (def_stmt);
3277 tree op1 = gimple_assign_rhs2 (def_stmt);
3278 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3279 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3283 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3284 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3285 && TREE_CODE (val) == INTEGER_CST)
3287 enum tree_code low_code, high_code;
3288 tree low, high;
3289 if (is_masked_range_test (name, val, comp_code, &name, &low,
3290 &low_code, &high, &high_code))
3292 if (low_code != ERROR_MARK)
3293 register_edge_assert_for_2 (name, e, low_code, name,
3294 low, /*invert*/false, asserts);
3295 if (high_code != ERROR_MARK)
3296 register_edge_assert_for_2 (name, e, high_code, name,
3297 high, /*invert*/false, asserts);
3302 /* Finish found ASSERTS for E and register them at GSI. */
3304 static void
3305 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3306 vec<assert_info> &asserts)
3308 for (unsigned i = 0; i < asserts.length (); ++i)
3309 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3310 reachable from E. */
3311 if (live_on_edge (e, asserts[i].name))
3312 register_new_assert_for (asserts[i].name, asserts[i].expr,
3313 asserts[i].comp_code, asserts[i].val,
3314 NULL, e, gsi);
3319 /* Determine whether the outgoing edges of BB should receive an
3320 ASSERT_EXPR for each of the operands of BB's LAST statement.
3321 The last statement of BB must be a COND_EXPR.
3323 If any of the sub-graphs rooted at BB have an interesting use of
3324 the predicate operands, an assert location node is added to the
3325 list of assertions for the corresponding operands. */
3327 static void
3328 find_conditional_asserts (basic_block bb, gcond *last)
3330 gimple_stmt_iterator bsi;
3331 tree op;
3332 edge_iterator ei;
3333 edge e;
3334 ssa_op_iter iter;
3336 bsi = gsi_for_stmt (last);
3338 /* Look for uses of the operands in each of the sub-graphs
3339 rooted at BB. We need to check each of the outgoing edges
3340 separately, so that we know what kind of ASSERT_EXPR to
3341 insert. */
3342 FOR_EACH_EDGE (e, ei, bb->succs)
3344 if (e->dest == bb)
3345 continue;
3347 /* Register the necessary assertions for each operand in the
3348 conditional predicate. */
3349 auto_vec<assert_info, 8> asserts;
3350 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3351 register_edge_assert_for (op, e,
3352 gimple_cond_code (last),
3353 gimple_cond_lhs (last),
3354 gimple_cond_rhs (last), asserts);
3355 finish_register_edge_assert_for (e, bsi, asserts);
3359 struct case_info
3361 tree expr;
3362 basic_block bb;
3365 /* Compare two case labels sorting first by the destination bb index
3366 and then by the case value. */
3368 static int
3369 compare_case_labels (const void *p1, const void *p2)
3371 const struct case_info *ci1 = (const struct case_info *) p1;
3372 const struct case_info *ci2 = (const struct case_info *) p2;
3373 int idx1 = ci1->bb->index;
3374 int idx2 = ci2->bb->index;
3376 if (idx1 < idx2)
3377 return -1;
3378 else if (idx1 == idx2)
3380 /* Make sure the default label is first in a group. */
3381 if (!CASE_LOW (ci1->expr))
3382 return -1;
3383 else if (!CASE_LOW (ci2->expr))
3384 return 1;
3385 else
3386 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3387 CASE_LOW (ci2->expr));
3389 else
3390 return 1;
3393 /* Determine whether the outgoing edges of BB should receive an
3394 ASSERT_EXPR for each of the operands of BB's LAST statement.
3395 The last statement of BB must be a SWITCH_EXPR.
3397 If any of the sub-graphs rooted at BB have an interesting use of
3398 the predicate operands, an assert location node is added to the
3399 list of assertions for the corresponding operands. */
3401 static void
3402 find_switch_asserts (basic_block bb, gswitch *last)
3404 gimple_stmt_iterator bsi;
3405 tree op;
3406 edge e;
3407 struct case_info *ci;
3408 size_t n = gimple_switch_num_labels (last);
3409 #if GCC_VERSION >= 4000
3410 unsigned int idx;
3411 #else
3412 /* Work around GCC 3.4 bug (PR 37086). */
3413 volatile unsigned int idx;
3414 #endif
3416 bsi = gsi_for_stmt (last);
3417 op = gimple_switch_index (last);
3418 if (TREE_CODE (op) != SSA_NAME)
3419 return;
3421 /* Build a vector of case labels sorted by destination label. */
3422 ci = XNEWVEC (struct case_info, n);
3423 for (idx = 0; idx < n; ++idx)
3425 ci[idx].expr = gimple_switch_label (last, idx);
3426 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3428 edge default_edge = find_edge (bb, ci[0].bb);
3429 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3431 for (idx = 0; idx < n; ++idx)
3433 tree min, max;
3434 tree cl = ci[idx].expr;
3435 basic_block cbb = ci[idx].bb;
3437 min = CASE_LOW (cl);
3438 max = CASE_HIGH (cl);
3440 /* If there are multiple case labels with the same destination
3441 we need to combine them to a single value range for the edge. */
3442 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3444 /* Skip labels until the last of the group. */
3445 do {
3446 ++idx;
3447 } while (idx < n && cbb == ci[idx].bb);
3448 --idx;
3450 /* Pick up the maximum of the case label range. */
3451 if (CASE_HIGH (ci[idx].expr))
3452 max = CASE_HIGH (ci[idx].expr);
3453 else
3454 max = CASE_LOW (ci[idx].expr);
3457 /* Can't extract a useful assertion out of a range that includes the
3458 default label. */
3459 if (min == NULL_TREE)
3460 continue;
3462 /* Find the edge to register the assert expr on. */
3463 e = find_edge (bb, cbb);
3465 /* Register the necessary assertions for the operand in the
3466 SWITCH_EXPR. */
3467 auto_vec<assert_info, 8> asserts;
3468 register_edge_assert_for (op, e,
3469 max ? GE_EXPR : EQ_EXPR,
3470 op, fold_convert (TREE_TYPE (op), min),
3471 asserts);
3472 if (max)
3473 register_edge_assert_for (op, e, LE_EXPR, op,
3474 fold_convert (TREE_TYPE (op), max),
3475 asserts);
3476 finish_register_edge_assert_for (e, bsi, asserts);
3479 XDELETEVEC (ci);
3481 if (!live_on_edge (default_edge, op))
3482 return;
3484 /* Now register along the default label assertions that correspond to the
3485 anti-range of each label. */
3486 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3487 if (insertion_limit == 0)
3488 return;
3490 /* We can't do this if the default case shares a label with another case. */
3491 tree default_cl = gimple_switch_default_label (last);
3492 for (idx = 1; idx < n; idx++)
3494 tree min, max;
3495 tree cl = gimple_switch_label (last, idx);
3496 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3497 continue;
3499 min = CASE_LOW (cl);
3500 max = CASE_HIGH (cl);
3502 /* Combine contiguous case ranges to reduce the number of assertions
3503 to insert. */
3504 for (idx = idx + 1; idx < n; idx++)
3506 tree next_min, next_max;
3507 tree next_cl = gimple_switch_label (last, idx);
3508 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3509 break;
3511 next_min = CASE_LOW (next_cl);
3512 next_max = CASE_HIGH (next_cl);
3514 wide_int difference = (wi::to_wide (next_min)
3515 - wi::to_wide (max ? max : min));
3516 if (wi::eq_p (difference, 1))
3517 max = next_max ? next_max : next_min;
3518 else
3519 break;
3521 idx--;
3523 if (max == NULL_TREE)
3525 /* Register the assertion OP != MIN. */
3526 auto_vec<assert_info, 8> asserts;
3527 min = fold_convert (TREE_TYPE (op), min);
3528 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3529 asserts);
3530 finish_register_edge_assert_for (default_edge, bsi, asserts);
3532 else
3534 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3535 which will give OP the anti-range ~[MIN,MAX]. */
3536 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3537 min = fold_convert (TREE_TYPE (uop), min);
3538 max = fold_convert (TREE_TYPE (uop), max);
3540 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3541 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3542 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3543 NULL, default_edge, bsi);
3546 if (--insertion_limit == 0)
3547 break;
3552 /* Traverse all the statements in block BB looking for statements that
3553 may generate useful assertions for the SSA names in their operand.
3554 If a statement produces a useful assertion A for name N_i, then the
3555 list of assertions already generated for N_i is scanned to
3556 determine if A is actually needed.
3558 If N_i already had the assertion A at a location dominating the
3559 current location, then nothing needs to be done. Otherwise, the
3560 new location for A is recorded instead.
3562 1- For every statement S in BB, all the variables used by S are
3563 added to bitmap FOUND_IN_SUBGRAPH.
3565 2- If statement S uses an operand N in a way that exposes a known
3566 value range for N, then if N was not already generated by an
3567 ASSERT_EXPR, create a new assert location for N. For instance,
3568 if N is a pointer and the statement dereferences it, we can
3569 assume that N is not NULL.
3571 3- COND_EXPRs are a special case of #2. We can derive range
3572 information from the predicate but need to insert different
3573 ASSERT_EXPRs for each of the sub-graphs rooted at the
3574 conditional block. If the last statement of BB is a conditional
3575 expression of the form 'X op Y', then
3577 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3579 b) If the conditional is the only entry point to the sub-graph
3580 corresponding to the THEN_CLAUSE, recurse into it. On
3581 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3582 an ASSERT_EXPR is added for the corresponding variable.
3584 c) Repeat step (b) on the ELSE_CLAUSE.
3586 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3588 For instance,
3590 if (a == 9)
3591 b = a;
3592 else
3593 b = c + 1;
3595 In this case, an assertion on the THEN clause is useful to
3596 determine that 'a' is always 9 on that edge. However, an assertion
3597 on the ELSE clause would be unnecessary.
3599 4- If BB does not end in a conditional expression, then we recurse
3600 into BB's dominator children.
3602 At the end of the recursive traversal, every SSA name will have a
3603 list of locations where ASSERT_EXPRs should be added. When a new
3604 location for name N is found, it is registered by calling
3605 register_new_assert_for. That function keeps track of all the
3606 registered assertions to prevent adding unnecessary assertions.
3607 For instance, if a pointer P_4 is dereferenced more than once in a
3608 dominator tree, only the location dominating all the dereference of
3609 P_4 will receive an ASSERT_EXPR. */
3611 static void
3612 find_assert_locations_1 (basic_block bb, sbitmap live)
3614 gimple *last;
3616 last = last_stmt (bb);
3618 /* If BB's last statement is a conditional statement involving integer
3619 operands, determine if we need to add ASSERT_EXPRs. */
3620 if (last
3621 && gimple_code (last) == GIMPLE_COND
3622 && !fp_predicate (last)
3623 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3624 find_conditional_asserts (bb, as_a <gcond *> (last));
3626 /* If BB's last statement is a switch statement involving integer
3627 operands, determine if we need to add ASSERT_EXPRs. */
3628 if (last
3629 && gimple_code (last) == GIMPLE_SWITCH
3630 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3631 find_switch_asserts (bb, as_a <gswitch *> (last));
3633 /* Traverse all the statements in BB marking used names and looking
3634 for statements that may infer assertions for their used operands. */
3635 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3636 gsi_prev (&si))
3638 gimple *stmt;
3639 tree op;
3640 ssa_op_iter i;
3642 stmt = gsi_stmt (si);
3644 if (is_gimple_debug (stmt))
3645 continue;
3647 /* See if we can derive an assertion for any of STMT's operands. */
3648 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3650 tree value;
3651 enum tree_code comp_code;
3653 /* If op is not live beyond this stmt, do not bother to insert
3654 asserts for it. */
3655 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3656 continue;
3658 /* If OP is used in such a way that we can infer a value
3659 range for it, and we don't find a previous assertion for
3660 it, create a new assertion location node for OP. */
3661 if (infer_value_range (stmt, op, &comp_code, &value))
3663 /* If we are able to infer a nonzero value range for OP,
3664 then walk backwards through the use-def chain to see if OP
3665 was set via a typecast.
3667 If so, then we can also infer a nonzero value range
3668 for the operand of the NOP_EXPR. */
3669 if (comp_code == NE_EXPR && integer_zerop (value))
3671 tree t = op;
3672 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3674 while (is_gimple_assign (def_stmt)
3675 && CONVERT_EXPR_CODE_P
3676 (gimple_assign_rhs_code (def_stmt))
3677 && TREE_CODE
3678 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3679 && POINTER_TYPE_P
3680 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3682 t = gimple_assign_rhs1 (def_stmt);
3683 def_stmt = SSA_NAME_DEF_STMT (t);
3685 /* Note we want to register the assert for the
3686 operand of the NOP_EXPR after SI, not after the
3687 conversion. */
3688 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3689 register_new_assert_for (t, t, comp_code, value,
3690 bb, NULL, si);
3694 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3698 /* Update live. */
3699 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3700 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3701 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3702 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3705 /* Traverse all PHI nodes in BB, updating live. */
3706 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3707 gsi_next (&si))
3709 use_operand_p arg_p;
3710 ssa_op_iter i;
3711 gphi *phi = si.phi ();
3712 tree res = gimple_phi_result (phi);
3714 if (virtual_operand_p (res))
3715 continue;
3717 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3719 tree arg = USE_FROM_PTR (arg_p);
3720 if (TREE_CODE (arg) == SSA_NAME)
3721 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3724 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3728 /* Do an RPO walk over the function computing SSA name liveness
3729 on-the-fly and deciding on assert expressions to insert. */
3731 static void
3732 find_assert_locations (void)
3734 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3735 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3736 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3737 int rpo_cnt, i;
3739 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3740 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3741 for (i = 0; i < rpo_cnt; ++i)
3742 bb_rpo[rpo[i]] = i;
3744 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3745 the order we compute liveness and insert asserts we otherwise
3746 fail to insert asserts into the loop latch. */
3747 loop_p loop;
3748 FOR_EACH_LOOP (loop, 0)
3750 i = loop->latch->index;
3751 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3752 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3753 !gsi_end_p (gsi); gsi_next (&gsi))
3755 gphi *phi = gsi.phi ();
3756 if (virtual_operand_p (gimple_phi_result (phi)))
3757 continue;
3758 tree arg = gimple_phi_arg_def (phi, j);
3759 if (TREE_CODE (arg) == SSA_NAME)
3761 if (live[i] == NULL)
3763 live[i] = sbitmap_alloc (num_ssa_names);
3764 bitmap_clear (live[i]);
3766 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3771 for (i = rpo_cnt - 1; i >= 0; --i)
3773 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3774 edge e;
3775 edge_iterator ei;
3777 if (!live[rpo[i]])
3779 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3780 bitmap_clear (live[rpo[i]]);
3783 /* Process BB and update the live information with uses in
3784 this block. */
3785 find_assert_locations_1 (bb, live[rpo[i]]);
3787 /* Merge liveness into the predecessor blocks and free it. */
3788 if (!bitmap_empty_p (live[rpo[i]]))
3790 int pred_rpo = i;
3791 FOR_EACH_EDGE (e, ei, bb->preds)
3793 int pred = e->src->index;
3794 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3795 continue;
3797 if (!live[pred])
3799 live[pred] = sbitmap_alloc (num_ssa_names);
3800 bitmap_clear (live[pred]);
3802 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3804 if (bb_rpo[pred] < pred_rpo)
3805 pred_rpo = bb_rpo[pred];
3808 /* Record the RPO number of the last visited block that needs
3809 live information from this block. */
3810 last_rpo[rpo[i]] = pred_rpo;
3812 else
3814 sbitmap_free (live[rpo[i]]);
3815 live[rpo[i]] = NULL;
3818 /* We can free all successors live bitmaps if all their
3819 predecessors have been visited already. */
3820 FOR_EACH_EDGE (e, ei, bb->succs)
3821 if (last_rpo[e->dest->index] == i
3822 && live[e->dest->index])
3824 sbitmap_free (live[e->dest->index]);
3825 live[e->dest->index] = NULL;
3829 XDELETEVEC (rpo);
3830 XDELETEVEC (bb_rpo);
3831 XDELETEVEC (last_rpo);
3832 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3833 if (live[i])
3834 sbitmap_free (live[i]);
3835 XDELETEVEC (live);
3838 /* Create an ASSERT_EXPR for NAME and insert it in the location
3839 indicated by LOC. Return true if we made any edge insertions. */
3841 static bool
3842 process_assert_insertions_for (tree name, assert_locus *loc)
3844 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3845 gimple *stmt;
3846 tree cond;
3847 gimple *assert_stmt;
3848 edge_iterator ei;
3849 edge e;
3851 /* If we have X <=> X do not insert an assert expr for that. */
3852 if (loc->expr == loc->val)
3853 return false;
3855 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3856 assert_stmt = build_assert_expr_for (cond, name);
3857 if (loc->e)
3859 /* We have been asked to insert the assertion on an edge. This
3860 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3861 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3862 || (gimple_code (gsi_stmt (loc->si))
3863 == GIMPLE_SWITCH));
3865 gsi_insert_on_edge (loc->e, assert_stmt);
3866 return true;
3869 /* If the stmt iterator points at the end then this is an insertion
3870 at the beginning of a block. */
3871 if (gsi_end_p (loc->si))
3873 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3874 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3875 return false;
3878 /* Otherwise, we can insert right after LOC->SI iff the
3879 statement must not be the last statement in the block. */
3880 stmt = gsi_stmt (loc->si);
3881 if (!stmt_ends_bb_p (stmt))
3883 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3884 return false;
3887 /* If STMT must be the last statement in BB, we can only insert new
3888 assertions on the non-abnormal edge out of BB. Note that since
3889 STMT is not control flow, there may only be one non-abnormal/eh edge
3890 out of BB. */
3891 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3892 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3894 gsi_insert_on_edge (e, assert_stmt);
3895 return true;
3898 gcc_unreachable ();
3901 /* Qsort helper for sorting assert locations. If stable is true, don't
3902 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3903 on the other side some pointers might be NULL. */
3905 template <bool stable>
3906 static int
3907 compare_assert_loc (const void *pa, const void *pb)
3909 assert_locus * const a = *(assert_locus * const *)pa;
3910 assert_locus * const b = *(assert_locus * const *)pb;
3912 /* If stable, some asserts might be optimized away already, sort
3913 them last. */
3914 if (stable)
3916 if (a == NULL)
3917 return b != NULL;
3918 else if (b == NULL)
3919 return -1;
3922 if (a->e == NULL && b->e != NULL)
3923 return 1;
3924 else if (a->e != NULL && b->e == NULL)
3925 return -1;
3927 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3928 no need to test both a->e and b->e. */
3930 /* Sort after destination index. */
3931 if (a->e == NULL)
3933 else if (a->e->dest->index > b->e->dest->index)
3934 return 1;
3935 else if (a->e->dest->index < b->e->dest->index)
3936 return -1;
3938 /* Sort after comp_code. */
3939 if (a->comp_code > b->comp_code)
3940 return 1;
3941 else if (a->comp_code < b->comp_code)
3942 return -1;
3944 hashval_t ha, hb;
3946 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3947 uses DECL_UID of the VAR_DECL, so sorting might differ between
3948 -g and -g0. When doing the removal of redundant assert exprs
3949 and commonization to successors, this does not matter, but for
3950 the final sort needs to be stable. */
3951 if (stable)
3953 ha = 0;
3954 hb = 0;
3956 else
3958 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3959 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3962 /* Break the tie using hashing and source/bb index. */
3963 if (ha == hb)
3964 return (a->e != NULL
3965 ? a->e->src->index - b->e->src->index
3966 : a->bb->index - b->bb->index);
3967 return ha > hb ? 1 : -1;
3970 /* Process all the insertions registered for every name N_i registered
3971 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3972 found in ASSERTS_FOR[i]. */
3974 static void
3975 process_assert_insertions (void)
3977 unsigned i;
3978 bitmap_iterator bi;
3979 bool update_edges_p = false;
3980 int num_asserts = 0;
3982 if (dump_file && (dump_flags & TDF_DETAILS))
3983 dump_all_asserts (dump_file);
3985 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3987 assert_locus *loc = asserts_for[i];
3988 gcc_assert (loc);
3990 auto_vec<assert_locus *, 16> asserts;
3991 for (; loc; loc = loc->next)
3992 asserts.safe_push (loc);
3993 asserts.qsort (compare_assert_loc<false>);
3995 /* Push down common asserts to successors and remove redundant ones. */
3996 unsigned ecnt = 0;
3997 assert_locus *common = NULL;
3998 unsigned commonj = 0;
3999 for (unsigned j = 0; j < asserts.length (); ++j)
4001 loc = asserts[j];
4002 if (! loc->e)
4003 common = NULL;
4004 else if (! common
4005 || loc->e->dest != common->e->dest
4006 || loc->comp_code != common->comp_code
4007 || ! operand_equal_p (loc->val, common->val, 0)
4008 || ! operand_equal_p (loc->expr, common->expr, 0))
4010 commonj = j;
4011 common = loc;
4012 ecnt = 1;
4014 else if (loc->e == asserts[j-1]->e)
4016 /* Remove duplicate asserts. */
4017 if (commonj == j - 1)
4019 commonj = j;
4020 common = loc;
4022 free (asserts[j-1]);
4023 asserts[j-1] = NULL;
4025 else
4027 ecnt++;
4028 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4030 /* We have the same assertion on all incoming edges of a BB.
4031 Insert it at the beginning of that block. */
4032 loc->bb = loc->e->dest;
4033 loc->e = NULL;
4034 loc->si = gsi_none ();
4035 common = NULL;
4036 /* Clear asserts commoned. */
4037 for (; commonj != j; ++commonj)
4038 if (asserts[commonj])
4040 free (asserts[commonj]);
4041 asserts[commonj] = NULL;
4047 /* The asserts vector sorting above might be unstable for
4048 -fcompare-debug, sort again to ensure a stable sort. */
4049 asserts.qsort (compare_assert_loc<true>);
4050 for (unsigned j = 0; j < asserts.length (); ++j)
4052 loc = asserts[j];
4053 if (! loc)
4054 break;
4055 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4056 num_asserts++;
4057 free (loc);
4061 if (update_edges_p)
4062 gsi_commit_edge_inserts ();
4064 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4065 num_asserts);
4069 /* Traverse the flowgraph looking for conditional jumps to insert range
4070 expressions. These range expressions are meant to provide information
4071 to optimizations that need to reason in terms of value ranges. They
4072 will not be expanded into RTL. For instance, given:
4074 x = ...
4075 y = ...
4076 if (x < y)
4077 y = x - 2;
4078 else
4079 x = y + 3;
4081 this pass will transform the code into:
4083 x = ...
4084 y = ...
4085 if (x < y)
4087 x = ASSERT_EXPR <x, x < y>
4088 y = x - 2
4090 else
4092 y = ASSERT_EXPR <y, x >= y>
4093 x = y + 3
4096 The idea is that once copy and constant propagation have run, other
4097 optimizations will be able to determine what ranges of values can 'x'
4098 take in different paths of the code, simply by checking the reaching
4099 definition of 'x'. */
4101 static void
4102 insert_range_assertions (void)
4104 need_assert_for = BITMAP_ALLOC (NULL);
4105 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4107 calculate_dominance_info (CDI_DOMINATORS);
4109 find_assert_locations ();
4110 if (!bitmap_empty_p (need_assert_for))
4112 process_assert_insertions ();
4113 update_ssa (TODO_update_ssa_no_phi);
4116 if (dump_file && (dump_flags & TDF_DETAILS))
4118 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4119 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4122 free (asserts_for);
4123 BITMAP_FREE (need_assert_for);
4126 class vrp_prop : public ssa_propagation_engine
4128 public:
4129 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4130 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4132 void vrp_initialize (void);
4133 void vrp_finalize (bool);
4134 void check_all_array_refs (void);
4135 void check_array_ref (location_t, tree, bool);
4136 void check_mem_ref (location_t, tree, bool);
4137 void search_for_addr_array (tree, location_t);
4139 class vr_values vr_values;
4140 /* Temporary delegator to minimize code churn. */
4141 value_range *get_value_range (const_tree op)
4142 { return vr_values.get_value_range (op); }
4143 void set_defs_to_varying (gimple *stmt)
4144 { return vr_values.set_defs_to_varying (stmt); }
4145 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4146 tree *output_p, value_range *vr)
4147 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4148 bool update_value_range (const_tree op, value_range *vr)
4149 { return vr_values.update_value_range (op, vr); }
4150 void extract_range_basic (value_range *vr, gimple *stmt)
4151 { vr_values.extract_range_basic (vr, stmt); }
4152 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4153 { vr_values.extract_range_from_phi_node (phi, vr); }
4155 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4156 and "struct" hacks. If VRP can determine that the
4157 array subscript is a constant, check if it is outside valid
4158 range. If the array subscript is a RANGE, warn if it is
4159 non-overlapping with valid range.
4160 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4162 void
4163 vrp_prop::check_array_ref (location_t location, tree ref,
4164 bool ignore_off_by_one)
4166 const value_range *vr = NULL;
4167 tree low_sub, up_sub;
4168 tree low_bound, up_bound, up_bound_p1;
4170 if (TREE_NO_WARNING (ref))
4171 return;
4173 low_sub = up_sub = TREE_OPERAND (ref, 1);
4174 up_bound = array_ref_up_bound (ref);
4176 if (!up_bound
4177 || TREE_CODE (up_bound) != INTEGER_CST
4178 || (warn_array_bounds < 2
4179 && array_at_struct_end_p (ref)))
4181 /* Accesses to trailing arrays via pointers may access storage
4182 beyond the types array bounds. For such arrays, or for flexible
4183 array members, as well as for other arrays of an unknown size,
4184 replace the upper bound with a more permissive one that assumes
4185 the size of the largest object is PTRDIFF_MAX. */
4186 tree eltsize = array_ref_element_size (ref);
4188 if (TREE_CODE (eltsize) != INTEGER_CST
4189 || integer_zerop (eltsize))
4191 up_bound = NULL_TREE;
4192 up_bound_p1 = NULL_TREE;
4194 else
4196 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4197 tree arg = TREE_OPERAND (ref, 0);
4198 poly_int64 off;
4200 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4201 maxbound = wide_int_to_tree (sizetype,
4202 wi::sub (wi::to_wide (maxbound),
4203 off));
4204 else
4205 maxbound = fold_convert (sizetype, maxbound);
4207 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4209 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4210 build_int_cst (ptrdiff_type_node, 1));
4213 else
4214 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4215 build_int_cst (TREE_TYPE (up_bound), 1));
4217 low_bound = array_ref_low_bound (ref);
4219 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4221 bool warned = false;
4223 /* Empty array. */
4224 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4225 warned = warning_at (location, OPT_Warray_bounds,
4226 "array subscript %E is above array bounds of %qT",
4227 low_bound, artype);
4229 if (TREE_CODE (low_sub) == SSA_NAME)
4231 vr = get_value_range (low_sub);
4232 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4234 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4235 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4239 if (vr && vr->type == VR_ANTI_RANGE)
4241 if (up_bound
4242 && TREE_CODE (up_sub) == INTEGER_CST
4243 && (ignore_off_by_one
4244 ? tree_int_cst_lt (up_bound, up_sub)
4245 : tree_int_cst_le (up_bound, up_sub))
4246 && TREE_CODE (low_sub) == INTEGER_CST
4247 && tree_int_cst_le (low_sub, low_bound))
4248 warned = warning_at (location, OPT_Warray_bounds,
4249 "array subscript [%E, %E] is outside "
4250 "array bounds of %qT",
4251 low_sub, up_sub, artype);
4253 else if (up_bound
4254 && TREE_CODE (up_sub) == INTEGER_CST
4255 && (ignore_off_by_one
4256 ? !tree_int_cst_le (up_sub, up_bound_p1)
4257 : !tree_int_cst_le (up_sub, up_bound)))
4259 if (dump_file && (dump_flags & TDF_DETAILS))
4261 fprintf (dump_file, "Array bound warning for ");
4262 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4263 fprintf (dump_file, "\n");
4265 warned = warning_at (location, OPT_Warray_bounds,
4266 "array subscript %E is above array bounds of %qT",
4267 up_sub, artype);
4269 else if (TREE_CODE (low_sub) == INTEGER_CST
4270 && tree_int_cst_lt (low_sub, low_bound))
4272 if (dump_file && (dump_flags & TDF_DETAILS))
4274 fprintf (dump_file, "Array bound warning for ");
4275 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4276 fprintf (dump_file, "\n");
4278 warned = warning_at (location, OPT_Warray_bounds,
4279 "array subscript %E is below array bounds of %qT",
4280 low_sub, artype);
4283 if (warned)
4285 ref = TREE_OPERAND (ref, 0);
4287 if (DECL_P (ref))
4288 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4290 TREE_NO_WARNING (ref) = 1;
4294 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4295 references to string constants. If VRP can determine that the array
4296 subscript is a constant, check if it is outside valid range.
4297 If the array subscript is a RANGE, warn if it is non-overlapping
4298 with valid range.
4299 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4300 (used to allow one-past-the-end indices for code that takes
4301 the address of the just-past-the-end element of an array). */
4303 void
4304 vrp_prop::check_mem_ref (location_t location, tree ref,
4305 bool ignore_off_by_one)
4307 if (TREE_NO_WARNING (ref))
4308 return;
4310 tree arg = TREE_OPERAND (ref, 0);
4311 /* The constant and variable offset of the reference. */
4312 tree cstoff = TREE_OPERAND (ref, 1);
4313 tree varoff = NULL_TREE;
4315 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4317 /* The array or string constant bounds in bytes. Initially set
4318 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4319 determined. */
4320 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4322 /* The minimum and maximum intermediate offset. For a reference
4323 to be valid, not only does the final offset/subscript must be
4324 in bounds but all intermediate offsets should be as well.
4325 GCC may be able to deal gracefully with such out-of-bounds
4326 offsets so the checking is only enbaled at -Warray-bounds=2
4327 where it may help detect bugs in uses of the intermediate
4328 offsets that could otherwise not be detectable. */
4329 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4330 offset_int extrema[2] = { 0, wi::abs (ioff) };
4332 /* The range of the byte offset into the reference. */
4333 offset_int offrange[2] = { 0, 0 };
4335 const value_range *vr = NULL;
4337 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4338 The loop computes the the range of the final offset for expressions
4339 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4340 in some range. */
4341 while (TREE_CODE (arg) == SSA_NAME)
4343 gimple *def = SSA_NAME_DEF_STMT (arg);
4344 if (!is_gimple_assign (def))
4345 break;
4347 tree_code code = gimple_assign_rhs_code (def);
4348 if (code == POINTER_PLUS_EXPR)
4350 arg = gimple_assign_rhs1 (def);
4351 varoff = gimple_assign_rhs2 (def);
4353 else if (code == ASSERT_EXPR)
4355 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4356 continue;
4358 else
4359 return;
4361 /* VAROFF should always be a SSA_NAME here (and not even
4362 INTEGER_CST) but there's no point in taking chances. */
4363 if (TREE_CODE (varoff) != SSA_NAME)
4364 break;
4366 vr = get_value_range (varoff);
4367 if (!vr || vr->type == VR_UNDEFINED || !vr->min || !vr->max)
4368 break;
4370 if (TREE_CODE (vr->min) != INTEGER_CST
4371 || TREE_CODE (vr->max) != INTEGER_CST)
4372 break;
4374 if (vr->type == VR_RANGE)
4376 if (tree_int_cst_lt (vr->min, vr->max))
4378 offset_int min
4379 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min));
4380 offset_int max
4381 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max));
4382 if (min < max)
4384 offrange[0] += min;
4385 offrange[1] += max;
4387 else
4389 offrange[0] += max;
4390 offrange[1] += min;
4393 else
4395 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4396 to OFFRANGE. */
4397 offrange[0] += arrbounds[0];
4398 offrange[1] += arrbounds[1];
4401 else
4403 /* For an anti-range, analogously to the above, conservatively
4404 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4405 offrange[0] += arrbounds[0];
4406 offrange[1] += arrbounds[1];
4409 /* Keep track of the minimum and maximum offset. */
4410 if (offrange[1] < 0 && offrange[1] < extrema[0])
4411 extrema[0] = offrange[1];
4412 if (offrange[0] > 0 && offrange[0] > extrema[1])
4413 extrema[1] = offrange[0];
4415 if (offrange[0] < arrbounds[0])
4416 offrange[0] = arrbounds[0];
4418 if (offrange[1] > arrbounds[1])
4419 offrange[1] = arrbounds[1];
4422 if (TREE_CODE (arg) == ADDR_EXPR)
4424 arg = TREE_OPERAND (arg, 0);
4425 if (TREE_CODE (arg) != STRING_CST
4426 && TREE_CODE (arg) != VAR_DECL)
4427 return;
4429 else
4430 return;
4432 /* The type of the object being referred to. It can be an array,
4433 string literal, or a non-array type when the MEM_REF represents
4434 a reference/subscript via a pointer to an object that is not
4435 an element of an array. References to members of structs and
4436 unions are excluded because MEM_REF doesn't make it possible
4437 to identify the member where the reference originated.
4438 Incomplete types are excluded as well because their size is
4439 not known. */
4440 tree reftype = TREE_TYPE (arg);
4441 if (POINTER_TYPE_P (reftype)
4442 || !COMPLETE_TYPE_P (reftype)
4443 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4444 || RECORD_OR_UNION_TYPE_P (reftype))
4445 return;
4447 offset_int eltsize;
4448 if (TREE_CODE (reftype) == ARRAY_TYPE)
4450 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4452 if (tree dom = TYPE_DOMAIN (reftype))
4454 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4455 if (array_at_struct_end_p (arg)
4456 || !bnds[0] || !bnds[1])
4458 arrbounds[0] = 0;
4459 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4461 else
4463 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4464 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4467 else
4469 arrbounds[0] = 0;
4470 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4473 if (TREE_CODE (ref) == MEM_REF)
4475 /* For MEM_REF determine a tighter bound of the non-array
4476 element type. */
4477 tree eltype = TREE_TYPE (reftype);
4478 while (TREE_CODE (eltype) == ARRAY_TYPE)
4479 eltype = TREE_TYPE (eltype);
4480 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4483 else
4485 eltsize = 1;
4486 arrbounds[0] = 0;
4487 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4490 offrange[0] += ioff;
4491 offrange[1] += ioff;
4493 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4494 is set (when taking the address of the one-past-last element
4495 of an array) but always use the stricter bound in diagnostics. */
4496 offset_int ubound = arrbounds[1];
4497 if (ignore_off_by_one)
4498 ubound += 1;
4500 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4502 /* Treat a reference to a non-array object as one to an array
4503 of a single element. */
4504 if (TREE_CODE (reftype) != ARRAY_TYPE)
4505 reftype = build_array_type_nelts (reftype, 1);
4507 if (TREE_CODE (ref) == MEM_REF)
4509 /* Extract the element type out of MEM_REF and use its size
4510 to compute the index to print in the diagnostic; arrays
4511 in MEM_REF don't mean anything. */
4512 tree type = TREE_TYPE (ref);
4513 while (TREE_CODE (type) == ARRAY_TYPE)
4514 type = TREE_TYPE (type);
4515 tree size = TYPE_SIZE_UNIT (type);
4516 offrange[0] = offrange[0] / wi::to_offset (size);
4517 offrange[1] = offrange[1] / wi::to_offset (size);
4519 else
4521 /* For anything other than MEM_REF, compute the index to
4522 print in the diagnostic as the offset over element size. */
4523 offrange[0] = offrange[0] / eltsize;
4524 offrange[1] = offrange[1] / eltsize;
4527 bool warned;
4528 if (offrange[0] == offrange[1])
4529 warned = warning_at (location, OPT_Warray_bounds,
4530 "array subscript %wi is outside array bounds "
4531 "of %qT",
4532 offrange[0].to_shwi (), reftype);
4533 else
4534 warned = warning_at (location, OPT_Warray_bounds,
4535 "array subscript [%wi, %wi] is outside "
4536 "array bounds of %qT",
4537 offrange[0].to_shwi (),
4538 offrange[1].to_shwi (), reftype);
4539 if (warned && DECL_P (arg))
4540 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4542 TREE_NO_WARNING (ref) = 1;
4543 return;
4546 if (warn_array_bounds < 2)
4547 return;
4549 /* At level 2 check also intermediate offsets. */
4550 int i = 0;
4551 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4553 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4555 warning_at (location, OPT_Warray_bounds,
4556 "intermediate array offset %wi is outside array bounds "
4557 "of %qT",
4558 tmpidx, reftype);
4559 TREE_NO_WARNING (ref) = 1;
4563 /* Searches if the expr T, located at LOCATION computes
4564 address of an ARRAY_REF, and call check_array_ref on it. */
4566 void
4567 vrp_prop::search_for_addr_array (tree t, location_t location)
4569 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4572 if (TREE_CODE (t) == ARRAY_REF)
4573 check_array_ref (location, t, true /*ignore_off_by_one*/);
4574 else if (TREE_CODE (t) == MEM_REF)
4575 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4577 t = TREE_OPERAND (t, 0);
4579 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4581 if (TREE_CODE (t) != MEM_REF
4582 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4583 || TREE_NO_WARNING (t))
4584 return;
4586 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4587 tree low_bound, up_bound, el_sz;
4588 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4589 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4590 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4591 return;
4593 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4594 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4595 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4596 if (!low_bound
4597 || TREE_CODE (low_bound) != INTEGER_CST
4598 || !up_bound
4599 || TREE_CODE (up_bound) != INTEGER_CST
4600 || !el_sz
4601 || TREE_CODE (el_sz) != INTEGER_CST)
4602 return;
4604 offset_int idx;
4605 if (!mem_ref_offset (t).is_constant (&idx))
4606 return;
4608 bool warned = false;
4609 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4610 if (idx < 0)
4612 if (dump_file && (dump_flags & TDF_DETAILS))
4614 fprintf (dump_file, "Array bound warning for ");
4615 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4616 fprintf (dump_file, "\n");
4618 warned = warning_at (location, OPT_Warray_bounds,
4619 "array subscript %wi is below "
4620 "array bounds of %qT",
4621 idx.to_shwi (), TREE_TYPE (tem));
4623 else if (idx > (wi::to_offset (up_bound)
4624 - wi::to_offset (low_bound) + 1))
4626 if (dump_file && (dump_flags & TDF_DETAILS))
4628 fprintf (dump_file, "Array bound warning for ");
4629 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4630 fprintf (dump_file, "\n");
4632 warned = warning_at (location, OPT_Warray_bounds,
4633 "array subscript %wu is above "
4634 "array bounds of %qT",
4635 idx.to_uhwi (), TREE_TYPE (tem));
4638 if (warned)
4640 if (DECL_P (t))
4641 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4643 TREE_NO_WARNING (t) = 1;
4647 /* walk_tree() callback that checks if *TP is
4648 an ARRAY_REF inside an ADDR_EXPR (in which an array
4649 subscript one outside the valid range is allowed). Call
4650 check_array_ref for each ARRAY_REF found. The location is
4651 passed in DATA. */
4653 static tree
4654 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4656 tree t = *tp;
4657 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4658 location_t location;
4660 if (EXPR_HAS_LOCATION (t))
4661 location = EXPR_LOCATION (t);
4662 else
4663 location = gimple_location (wi->stmt);
4665 *walk_subtree = TRUE;
4667 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4668 if (TREE_CODE (t) == ARRAY_REF)
4669 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4670 else if (TREE_CODE (t) == MEM_REF)
4671 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4672 else if (TREE_CODE (t) == ADDR_EXPR)
4674 vrp_prop->search_for_addr_array (t, location);
4675 *walk_subtree = FALSE;
4678 return NULL_TREE;
4681 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4682 to walk over all statements of all reachable BBs and call
4683 check_array_bounds on them. */
4685 class check_array_bounds_dom_walker : public dom_walker
4687 public:
4688 check_array_bounds_dom_walker (vrp_prop *prop)
4689 : dom_walker (CDI_DOMINATORS,
4690 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4691 flags, so that we can merge in information on
4692 non-executable edges from vrp_folder . */
4693 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4694 m_prop (prop) {}
4695 ~check_array_bounds_dom_walker () {}
4697 edge before_dom_children (basic_block) FINAL OVERRIDE;
4699 private:
4700 vrp_prop *m_prop;
4703 /* Implementation of dom_walker::before_dom_children.
4705 Walk over all statements of BB and call check_array_bounds on them,
4706 and determine if there's a unique successor edge. */
4708 edge
4709 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4711 gimple_stmt_iterator si;
4712 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4714 gimple *stmt = gsi_stmt (si);
4715 struct walk_stmt_info wi;
4716 if (!gimple_has_location (stmt)
4717 || is_gimple_debug (stmt))
4718 continue;
4720 memset (&wi, 0, sizeof (wi));
4722 wi.info = m_prop;
4724 walk_gimple_op (stmt, check_array_bounds, &wi);
4727 /* Determine if there's a unique successor edge, and if so, return
4728 that back to dom_walker, ensuring that we don't visit blocks that
4729 became unreachable during the VRP propagation
4730 (PR tree-optimization/83312). */
4731 return find_taken_edge (bb, NULL_TREE);
4734 /* Walk over all statements of all reachable BBs and call check_array_bounds
4735 on them. */
4737 void
4738 vrp_prop::check_all_array_refs ()
4740 check_array_bounds_dom_walker w (this);
4741 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4744 /* Return true if all imm uses of VAR are either in STMT, or
4745 feed (optionally through a chain of single imm uses) GIMPLE_COND
4746 in basic block COND_BB. */
4748 static bool
4749 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4751 use_operand_p use_p, use2_p;
4752 imm_use_iterator iter;
4754 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4755 if (USE_STMT (use_p) != stmt)
4757 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4758 if (is_gimple_debug (use_stmt))
4759 continue;
4760 while (is_gimple_assign (use_stmt)
4761 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4762 && single_imm_use (gimple_assign_lhs (use_stmt),
4763 &use2_p, &use_stmt2))
4764 use_stmt = use_stmt2;
4765 if (gimple_code (use_stmt) != GIMPLE_COND
4766 || gimple_bb (use_stmt) != cond_bb)
4767 return false;
4769 return true;
4772 /* Handle
4773 _4 = x_3 & 31;
4774 if (_4 != 0)
4775 goto <bb 6>;
4776 else
4777 goto <bb 7>;
4778 <bb 6>:
4779 __builtin_unreachable ();
4780 <bb 7>:
4781 x_5 = ASSERT_EXPR <x_3, ...>;
4782 If x_3 has no other immediate uses (checked by caller),
4783 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4784 from the non-zero bitmask. */
4786 void
4787 maybe_set_nonzero_bits (edge e, tree var)
4789 basic_block cond_bb = e->src;
4790 gimple *stmt = last_stmt (cond_bb);
4791 tree cst;
4793 if (stmt == NULL
4794 || gimple_code (stmt) != GIMPLE_COND
4795 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4796 ? EQ_EXPR : NE_EXPR)
4797 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4798 || !integer_zerop (gimple_cond_rhs (stmt)))
4799 return;
4801 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4802 if (!is_gimple_assign (stmt)
4803 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4804 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4805 return;
4806 if (gimple_assign_rhs1 (stmt) != var)
4808 gimple *stmt2;
4810 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4811 return;
4812 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4813 if (!gimple_assign_cast_p (stmt2)
4814 || gimple_assign_rhs1 (stmt2) != var
4815 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4816 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4817 != TYPE_PRECISION (TREE_TYPE (var))))
4818 return;
4820 cst = gimple_assign_rhs2 (stmt);
4821 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4822 wi::to_wide (cst)));
4825 /* Convert range assertion expressions into the implied copies and
4826 copy propagate away the copies. Doing the trivial copy propagation
4827 here avoids the need to run the full copy propagation pass after
4828 VRP.
4830 FIXME, this will eventually lead to copy propagation removing the
4831 names that had useful range information attached to them. For
4832 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4833 then N_i will have the range [3, +INF].
4835 However, by converting the assertion into the implied copy
4836 operation N_i = N_j, we will then copy-propagate N_j into the uses
4837 of N_i and lose the range information. We may want to hold on to
4838 ASSERT_EXPRs a little while longer as the ranges could be used in
4839 things like jump threading.
4841 The problem with keeping ASSERT_EXPRs around is that passes after
4842 VRP need to handle them appropriately.
4844 Another approach would be to make the range information a first
4845 class property of the SSA_NAME so that it can be queried from
4846 any pass. This is made somewhat more complex by the need for
4847 multiple ranges to be associated with one SSA_NAME. */
4849 static void
4850 remove_range_assertions (void)
4852 basic_block bb;
4853 gimple_stmt_iterator si;
4854 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4855 a basic block preceeded by GIMPLE_COND branching to it and
4856 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4857 int is_unreachable;
4859 /* Note that the BSI iterator bump happens at the bottom of the
4860 loop and no bump is necessary if we're removing the statement
4861 referenced by the current BSI. */
4862 FOR_EACH_BB_FN (bb, cfun)
4863 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4865 gimple *stmt = gsi_stmt (si);
4867 if (is_gimple_assign (stmt)
4868 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4870 tree lhs = gimple_assign_lhs (stmt);
4871 tree rhs = gimple_assign_rhs1 (stmt);
4872 tree var;
4874 var = ASSERT_EXPR_VAR (rhs);
4876 if (TREE_CODE (var) == SSA_NAME
4877 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4878 && SSA_NAME_RANGE_INFO (lhs))
4880 if (is_unreachable == -1)
4882 is_unreachable = 0;
4883 if (single_pred_p (bb)
4884 && assert_unreachable_fallthru_edge_p
4885 (single_pred_edge (bb)))
4886 is_unreachable = 1;
4888 /* Handle
4889 if (x_7 >= 10 && x_7 < 20)
4890 __builtin_unreachable ();
4891 x_8 = ASSERT_EXPR <x_7, ...>;
4892 if the only uses of x_7 are in the ASSERT_EXPR and
4893 in the condition. In that case, we can copy the
4894 range info from x_8 computed in this pass also
4895 for x_7. */
4896 if (is_unreachable
4897 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4898 single_pred (bb)))
4900 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4901 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4902 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4903 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4907 /* Propagate the RHS into every use of the LHS. For SSA names
4908 also propagate abnormals as it merely restores the original
4909 IL in this case (an replace_uses_by would assert). */
4910 if (TREE_CODE (var) == SSA_NAME)
4912 imm_use_iterator iter;
4913 use_operand_p use_p;
4914 gimple *use_stmt;
4915 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4916 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4917 SET_USE (use_p, var);
4919 else
4920 replace_uses_by (lhs, var);
4922 /* And finally, remove the copy, it is not needed. */
4923 gsi_remove (&si, true);
4924 release_defs (stmt);
4926 else
4928 if (!is_gimple_debug (gsi_stmt (si)))
4929 is_unreachable = 0;
4930 gsi_next (&si);
4935 /* Return true if STMT is interesting for VRP. */
4937 bool
4938 stmt_interesting_for_vrp (gimple *stmt)
4940 if (gimple_code (stmt) == GIMPLE_PHI)
4942 tree res = gimple_phi_result (stmt);
4943 return (!virtual_operand_p (res)
4944 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4945 || POINTER_TYPE_P (TREE_TYPE (res))));
4947 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4949 tree lhs = gimple_get_lhs (stmt);
4951 /* In general, assignments with virtual operands are not useful
4952 for deriving ranges, with the obvious exception of calls to
4953 builtin functions. */
4954 if (lhs && TREE_CODE (lhs) == SSA_NAME
4955 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4956 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4957 && (is_gimple_call (stmt)
4958 || !gimple_vuse (stmt)))
4959 return true;
4960 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4961 switch (gimple_call_internal_fn (stmt))
4963 case IFN_ADD_OVERFLOW:
4964 case IFN_SUB_OVERFLOW:
4965 case IFN_MUL_OVERFLOW:
4966 case IFN_ATOMIC_COMPARE_EXCHANGE:
4967 /* These internal calls return _Complex integer type,
4968 but are interesting to VRP nevertheless. */
4969 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4970 return true;
4971 break;
4972 default:
4973 break;
4976 else if (gimple_code (stmt) == GIMPLE_COND
4977 || gimple_code (stmt) == GIMPLE_SWITCH)
4978 return true;
4980 return false;
4983 /* Initialization required by ssa_propagate engine. */
4985 void
4986 vrp_prop::vrp_initialize ()
4988 basic_block bb;
4990 FOR_EACH_BB_FN (bb, cfun)
4992 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4993 gsi_next (&si))
4995 gphi *phi = si.phi ();
4996 if (!stmt_interesting_for_vrp (phi))
4998 tree lhs = PHI_RESULT (phi);
4999 set_value_range_to_varying (get_value_range (lhs));
5000 prop_set_simulate_again (phi, false);
5002 else
5003 prop_set_simulate_again (phi, true);
5006 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5007 gsi_next (&si))
5009 gimple *stmt = gsi_stmt (si);
5011 /* If the statement is a control insn, then we do not
5012 want to avoid simulating the statement once. Failure
5013 to do so means that those edges will never get added. */
5014 if (stmt_ends_bb_p (stmt))
5015 prop_set_simulate_again (stmt, true);
5016 else if (!stmt_interesting_for_vrp (stmt))
5018 set_defs_to_varying (stmt);
5019 prop_set_simulate_again (stmt, false);
5021 else
5022 prop_set_simulate_again (stmt, true);
5027 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5028 that includes the value VAL. The search is restricted to the range
5029 [START_IDX, n - 1] where n is the size of VEC.
5031 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5032 returned.
5034 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5035 it is placed in IDX and false is returned.
5037 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5038 returned. */
5040 bool
5041 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5043 size_t n = gimple_switch_num_labels (stmt);
5044 size_t low, high;
5046 /* Find case label for minimum of the value range or the next one.
5047 At each iteration we are searching in [low, high - 1]. */
5049 for (low = start_idx, high = n; high != low; )
5051 tree t;
5052 int cmp;
5053 /* Note that i != high, so we never ask for n. */
5054 size_t i = (high + low) / 2;
5055 t = gimple_switch_label (stmt, i);
5057 /* Cache the result of comparing CASE_LOW and val. */
5058 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5060 if (cmp == 0)
5062 /* Ranges cannot be empty. */
5063 *idx = i;
5064 return true;
5066 else if (cmp > 0)
5067 high = i;
5068 else
5070 low = i + 1;
5071 if (CASE_HIGH (t) != NULL
5072 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5074 *idx = i;
5075 return true;
5080 *idx = high;
5081 return false;
5084 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5085 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5086 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5087 then MAX_IDX < MIN_IDX.
5088 Returns true if the default label is not needed. */
5090 bool
5091 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5092 size_t *max_idx)
5094 size_t i, j;
5095 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5096 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5098 if (i == j
5099 && min_take_default
5100 && max_take_default)
5102 /* Only the default case label reached.
5103 Return an empty range. */
5104 *min_idx = 1;
5105 *max_idx = 0;
5106 return false;
5108 else
5110 bool take_default = min_take_default || max_take_default;
5111 tree low, high;
5112 size_t k;
5114 if (max_take_default)
5115 j--;
5117 /* If the case label range is continuous, we do not need
5118 the default case label. Verify that. */
5119 high = CASE_LOW (gimple_switch_label (stmt, i));
5120 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5121 high = CASE_HIGH (gimple_switch_label (stmt, i));
5122 for (k = i + 1; k <= j; ++k)
5124 low = CASE_LOW (gimple_switch_label (stmt, k));
5125 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5127 take_default = true;
5128 break;
5130 high = low;
5131 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5132 high = CASE_HIGH (gimple_switch_label (stmt, k));
5135 *min_idx = i;
5136 *max_idx = j;
5137 return !take_default;
5141 /* Evaluate statement STMT. If the statement produces a useful range,
5142 return SSA_PROP_INTERESTING and record the SSA name with the
5143 interesting range into *OUTPUT_P.
5145 If STMT is a conditional branch and we can determine its truth
5146 value, the taken edge is recorded in *TAKEN_EDGE_P.
5148 If STMT produces a varying value, return SSA_PROP_VARYING. */
5150 enum ssa_prop_result
5151 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5153 value_range vr = VR_INITIALIZER;
5154 tree lhs = gimple_get_lhs (stmt);
5155 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5157 if (*output_p)
5159 if (update_value_range (*output_p, &vr))
5161 if (dump_file && (dump_flags & TDF_DETAILS))
5163 fprintf (dump_file, "Found new range for ");
5164 print_generic_expr (dump_file, *output_p);
5165 fprintf (dump_file, ": ");
5166 dump_value_range (dump_file, &vr);
5167 fprintf (dump_file, "\n");
5170 if (vr.type == VR_VARYING)
5171 return SSA_PROP_VARYING;
5173 return SSA_PROP_INTERESTING;
5175 return SSA_PROP_NOT_INTERESTING;
5178 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5179 switch (gimple_call_internal_fn (stmt))
5181 case IFN_ADD_OVERFLOW:
5182 case IFN_SUB_OVERFLOW:
5183 case IFN_MUL_OVERFLOW:
5184 case IFN_ATOMIC_COMPARE_EXCHANGE:
5185 /* These internal calls return _Complex integer type,
5186 which VRP does not track, but the immediate uses
5187 thereof might be interesting. */
5188 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5190 imm_use_iterator iter;
5191 use_operand_p use_p;
5192 enum ssa_prop_result res = SSA_PROP_VARYING;
5194 set_value_range_to_varying (get_value_range (lhs));
5196 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5198 gimple *use_stmt = USE_STMT (use_p);
5199 if (!is_gimple_assign (use_stmt))
5200 continue;
5201 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5202 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5203 continue;
5204 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5205 tree use_lhs = gimple_assign_lhs (use_stmt);
5206 if (TREE_CODE (rhs1) != rhs_code
5207 || TREE_OPERAND (rhs1, 0) != lhs
5208 || TREE_CODE (use_lhs) != SSA_NAME
5209 || !stmt_interesting_for_vrp (use_stmt)
5210 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5211 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5212 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5213 continue;
5215 /* If there is a change in the value range for any of the
5216 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5217 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5218 or IMAGPART_EXPR immediate uses, but none of them have
5219 a change in their value ranges, return
5220 SSA_PROP_NOT_INTERESTING. If there are no
5221 {REAL,IMAG}PART_EXPR uses at all,
5222 return SSA_PROP_VARYING. */
5223 value_range new_vr = VR_INITIALIZER;
5224 extract_range_basic (&new_vr, use_stmt);
5225 const value_range *old_vr = get_value_range (use_lhs);
5226 if (old_vr->type != new_vr.type
5227 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5228 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5229 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5230 res = SSA_PROP_INTERESTING;
5231 else
5232 res = SSA_PROP_NOT_INTERESTING;
5233 BITMAP_FREE (new_vr.equiv);
5234 if (res == SSA_PROP_INTERESTING)
5236 *output_p = lhs;
5237 return res;
5241 return res;
5243 break;
5244 default:
5245 break;
5248 /* All other statements produce nothing of interest for VRP, so mark
5249 their outputs varying and prevent further simulation. */
5250 set_defs_to_varying (stmt);
5252 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5255 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5256 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5257 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5258 possible such range. The resulting range is not canonicalized. */
5260 static void
5261 union_ranges (enum value_range_type *vr0type,
5262 tree *vr0min, tree *vr0max,
5263 enum value_range_type vr1type,
5264 tree vr1min, tree vr1max)
5266 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5267 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5269 /* [] is vr0, () is vr1 in the following classification comments. */
5270 if (mineq && maxeq)
5272 /* [( )] */
5273 if (*vr0type == vr1type)
5274 /* Nothing to do for equal ranges. */
5276 else if ((*vr0type == VR_RANGE
5277 && vr1type == VR_ANTI_RANGE)
5278 || (*vr0type == VR_ANTI_RANGE
5279 && vr1type == VR_RANGE))
5281 /* For anti-range with range union the result is varying. */
5282 goto give_up;
5284 else
5285 gcc_unreachable ();
5287 else if (operand_less_p (*vr0max, vr1min) == 1
5288 || operand_less_p (vr1max, *vr0min) == 1)
5290 /* [ ] ( ) or ( ) [ ]
5291 If the ranges have an empty intersection, result of the union
5292 operation is the anti-range or if both are anti-ranges
5293 it covers all. */
5294 if (*vr0type == VR_ANTI_RANGE
5295 && vr1type == VR_ANTI_RANGE)
5296 goto give_up;
5297 else if (*vr0type == VR_ANTI_RANGE
5298 && vr1type == VR_RANGE)
5300 else if (*vr0type == VR_RANGE
5301 && vr1type == VR_ANTI_RANGE)
5303 *vr0type = vr1type;
5304 *vr0min = vr1min;
5305 *vr0max = vr1max;
5307 else if (*vr0type == VR_RANGE
5308 && vr1type == VR_RANGE)
5310 /* The result is the convex hull of both ranges. */
5311 if (operand_less_p (*vr0max, vr1min) == 1)
5313 /* If the result can be an anti-range, create one. */
5314 if (TREE_CODE (*vr0max) == INTEGER_CST
5315 && TREE_CODE (vr1min) == INTEGER_CST
5316 && vrp_val_is_min (*vr0min)
5317 && vrp_val_is_max (vr1max))
5319 tree min = int_const_binop (PLUS_EXPR,
5320 *vr0max,
5321 build_int_cst (TREE_TYPE (*vr0max), 1));
5322 tree max = int_const_binop (MINUS_EXPR,
5323 vr1min,
5324 build_int_cst (TREE_TYPE (vr1min), 1));
5325 if (!operand_less_p (max, min))
5327 *vr0type = VR_ANTI_RANGE;
5328 *vr0min = min;
5329 *vr0max = max;
5331 else
5332 *vr0max = vr1max;
5334 else
5335 *vr0max = vr1max;
5337 else
5339 /* If the result can be an anti-range, create one. */
5340 if (TREE_CODE (vr1max) == INTEGER_CST
5341 && TREE_CODE (*vr0min) == INTEGER_CST
5342 && vrp_val_is_min (vr1min)
5343 && vrp_val_is_max (*vr0max))
5345 tree min = int_const_binop (PLUS_EXPR,
5346 vr1max,
5347 build_int_cst (TREE_TYPE (vr1max), 1));
5348 tree max = int_const_binop (MINUS_EXPR,
5349 *vr0min,
5350 build_int_cst (TREE_TYPE (*vr0min), 1));
5351 if (!operand_less_p (max, min))
5353 *vr0type = VR_ANTI_RANGE;
5354 *vr0min = min;
5355 *vr0max = max;
5357 else
5358 *vr0min = vr1min;
5360 else
5361 *vr0min = vr1min;
5364 else
5365 gcc_unreachable ();
5367 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5368 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5370 /* [ ( ) ] or [( ) ] or [ ( )] */
5371 if (*vr0type == VR_RANGE
5372 && vr1type == VR_RANGE)
5374 else if (*vr0type == VR_ANTI_RANGE
5375 && vr1type == VR_ANTI_RANGE)
5377 *vr0type = vr1type;
5378 *vr0min = vr1min;
5379 *vr0max = vr1max;
5381 else if (*vr0type == VR_ANTI_RANGE
5382 && vr1type == VR_RANGE)
5384 /* Arbitrarily choose the right or left gap. */
5385 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5386 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5387 build_int_cst (TREE_TYPE (vr1min), 1));
5388 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5389 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5390 build_int_cst (TREE_TYPE (vr1max), 1));
5391 else
5392 goto give_up;
5394 else if (*vr0type == VR_RANGE
5395 && vr1type == VR_ANTI_RANGE)
5396 /* The result covers everything. */
5397 goto give_up;
5398 else
5399 gcc_unreachable ();
5401 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5402 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5404 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5405 if (*vr0type == VR_RANGE
5406 && vr1type == VR_RANGE)
5408 *vr0type = vr1type;
5409 *vr0min = vr1min;
5410 *vr0max = vr1max;
5412 else if (*vr0type == VR_ANTI_RANGE
5413 && vr1type == VR_ANTI_RANGE)
5415 else if (*vr0type == VR_RANGE
5416 && vr1type == VR_ANTI_RANGE)
5418 *vr0type = VR_ANTI_RANGE;
5419 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5421 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5422 build_int_cst (TREE_TYPE (*vr0min), 1));
5423 *vr0min = vr1min;
5425 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5427 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5428 build_int_cst (TREE_TYPE (*vr0max), 1));
5429 *vr0max = vr1max;
5431 else
5432 goto give_up;
5434 else if (*vr0type == VR_ANTI_RANGE
5435 && vr1type == VR_RANGE)
5436 /* The result covers everything. */
5437 goto give_up;
5438 else
5439 gcc_unreachable ();
5441 else if ((operand_less_p (vr1min, *vr0max) == 1
5442 || operand_equal_p (vr1min, *vr0max, 0))
5443 && operand_less_p (*vr0min, vr1min) == 1
5444 && operand_less_p (*vr0max, vr1max) == 1)
5446 /* [ ( ] ) or [ ]( ) */
5447 if (*vr0type == VR_RANGE
5448 && vr1type == VR_RANGE)
5449 *vr0max = vr1max;
5450 else if (*vr0type == VR_ANTI_RANGE
5451 && vr1type == VR_ANTI_RANGE)
5452 *vr0min = vr1min;
5453 else if (*vr0type == VR_ANTI_RANGE
5454 && vr1type == VR_RANGE)
5456 if (TREE_CODE (vr1min) == INTEGER_CST)
5457 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5458 build_int_cst (TREE_TYPE (vr1min), 1));
5459 else
5460 goto give_up;
5462 else if (*vr0type == VR_RANGE
5463 && vr1type == VR_ANTI_RANGE)
5465 if (TREE_CODE (*vr0max) == INTEGER_CST)
5467 *vr0type = vr1type;
5468 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5469 build_int_cst (TREE_TYPE (*vr0max), 1));
5470 *vr0max = vr1max;
5472 else
5473 goto give_up;
5475 else
5476 gcc_unreachable ();
5478 else if ((operand_less_p (*vr0min, vr1max) == 1
5479 || operand_equal_p (*vr0min, vr1max, 0))
5480 && operand_less_p (vr1min, *vr0min) == 1
5481 && operand_less_p (vr1max, *vr0max) == 1)
5483 /* ( [ ) ] or ( )[ ] */
5484 if (*vr0type == VR_RANGE
5485 && vr1type == VR_RANGE)
5486 *vr0min = vr1min;
5487 else if (*vr0type == VR_ANTI_RANGE
5488 && vr1type == VR_ANTI_RANGE)
5489 *vr0max = vr1max;
5490 else if (*vr0type == VR_ANTI_RANGE
5491 && vr1type == VR_RANGE)
5493 if (TREE_CODE (vr1max) == INTEGER_CST)
5494 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5495 build_int_cst (TREE_TYPE (vr1max), 1));
5496 else
5497 goto give_up;
5499 else if (*vr0type == VR_RANGE
5500 && vr1type == VR_ANTI_RANGE)
5502 if (TREE_CODE (*vr0min) == INTEGER_CST)
5504 *vr0type = vr1type;
5505 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5506 build_int_cst (TREE_TYPE (*vr0min), 1));
5507 *vr0min = vr1min;
5509 else
5510 goto give_up;
5512 else
5513 gcc_unreachable ();
5515 else
5516 goto give_up;
5518 return;
5520 give_up:
5521 *vr0type = VR_VARYING;
5522 *vr0min = NULL_TREE;
5523 *vr0max = NULL_TREE;
5526 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5527 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5528 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5529 possible such range. The resulting range is not canonicalized. */
5531 static void
5532 intersect_ranges (enum value_range_type *vr0type,
5533 tree *vr0min, tree *vr0max,
5534 enum value_range_type vr1type,
5535 tree vr1min, tree vr1max)
5537 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5538 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5540 /* [] is vr0, () is vr1 in the following classification comments. */
5541 if (mineq && maxeq)
5543 /* [( )] */
5544 if (*vr0type == vr1type)
5545 /* Nothing to do for equal ranges. */
5547 else if ((*vr0type == VR_RANGE
5548 && vr1type == VR_ANTI_RANGE)
5549 || (*vr0type == VR_ANTI_RANGE
5550 && vr1type == VR_RANGE))
5552 /* For anti-range with range intersection the result is empty. */
5553 *vr0type = VR_UNDEFINED;
5554 *vr0min = NULL_TREE;
5555 *vr0max = NULL_TREE;
5557 else
5558 gcc_unreachable ();
5560 else if (operand_less_p (*vr0max, vr1min) == 1
5561 || operand_less_p (vr1max, *vr0min) == 1)
5563 /* [ ] ( ) or ( ) [ ]
5564 If the ranges have an empty intersection, the result of the
5565 intersect operation is the range for intersecting an
5566 anti-range with a range or empty when intersecting two ranges. */
5567 if (*vr0type == VR_RANGE
5568 && vr1type == VR_ANTI_RANGE)
5570 else if (*vr0type == VR_ANTI_RANGE
5571 && vr1type == VR_RANGE)
5573 *vr0type = vr1type;
5574 *vr0min = vr1min;
5575 *vr0max = vr1max;
5577 else if (*vr0type == VR_RANGE
5578 && vr1type == VR_RANGE)
5580 *vr0type = VR_UNDEFINED;
5581 *vr0min = NULL_TREE;
5582 *vr0max = NULL_TREE;
5584 else if (*vr0type == VR_ANTI_RANGE
5585 && vr1type == VR_ANTI_RANGE)
5587 /* If the anti-ranges are adjacent to each other merge them. */
5588 if (TREE_CODE (*vr0max) == INTEGER_CST
5589 && TREE_CODE (vr1min) == INTEGER_CST
5590 && operand_less_p (*vr0max, vr1min) == 1
5591 && integer_onep (int_const_binop (MINUS_EXPR,
5592 vr1min, *vr0max)))
5593 *vr0max = vr1max;
5594 else if (TREE_CODE (vr1max) == INTEGER_CST
5595 && TREE_CODE (*vr0min) == INTEGER_CST
5596 && operand_less_p (vr1max, *vr0min) == 1
5597 && integer_onep (int_const_binop (MINUS_EXPR,
5598 *vr0min, vr1max)))
5599 *vr0min = vr1min;
5600 /* Else arbitrarily take VR0. */
5603 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5604 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5606 /* [ ( ) ] or [( ) ] or [ ( )] */
5607 if (*vr0type == VR_RANGE
5608 && vr1type == VR_RANGE)
5610 /* If both are ranges the result is the inner one. */
5611 *vr0type = vr1type;
5612 *vr0min = vr1min;
5613 *vr0max = vr1max;
5615 else if (*vr0type == VR_RANGE
5616 && vr1type == VR_ANTI_RANGE)
5618 /* Choose the right gap if the left one is empty. */
5619 if (mineq)
5621 if (TREE_CODE (vr1max) != INTEGER_CST)
5622 *vr0min = vr1max;
5623 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5624 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5625 *vr0min
5626 = int_const_binop (MINUS_EXPR, vr1max,
5627 build_int_cst (TREE_TYPE (vr1max), -1));
5628 else
5629 *vr0min
5630 = int_const_binop (PLUS_EXPR, vr1max,
5631 build_int_cst (TREE_TYPE (vr1max), 1));
5633 /* Choose the left gap if the right one is empty. */
5634 else if (maxeq)
5636 if (TREE_CODE (vr1min) != INTEGER_CST)
5637 *vr0max = vr1min;
5638 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5639 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5640 *vr0max
5641 = int_const_binop (PLUS_EXPR, vr1min,
5642 build_int_cst (TREE_TYPE (vr1min), -1));
5643 else
5644 *vr0max
5645 = int_const_binop (MINUS_EXPR, vr1min,
5646 build_int_cst (TREE_TYPE (vr1min), 1));
5648 /* Choose the anti-range if the range is effectively varying. */
5649 else if (vrp_val_is_min (*vr0min)
5650 && vrp_val_is_max (*vr0max))
5652 *vr0type = vr1type;
5653 *vr0min = vr1min;
5654 *vr0max = vr1max;
5656 /* Else choose the range. */
5658 else if (*vr0type == VR_ANTI_RANGE
5659 && vr1type == VR_ANTI_RANGE)
5660 /* If both are anti-ranges the result is the outer one. */
5662 else if (*vr0type == VR_ANTI_RANGE
5663 && vr1type == VR_RANGE)
5665 /* The intersection is empty. */
5666 *vr0type = VR_UNDEFINED;
5667 *vr0min = NULL_TREE;
5668 *vr0max = NULL_TREE;
5670 else
5671 gcc_unreachable ();
5673 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5674 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5676 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5677 if (*vr0type == VR_RANGE
5678 && vr1type == VR_RANGE)
5679 /* Choose the inner range. */
5681 else if (*vr0type == VR_ANTI_RANGE
5682 && vr1type == VR_RANGE)
5684 /* Choose the right gap if the left is empty. */
5685 if (mineq)
5687 *vr0type = VR_RANGE;
5688 if (TREE_CODE (*vr0max) != INTEGER_CST)
5689 *vr0min = *vr0max;
5690 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5691 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5692 *vr0min
5693 = int_const_binop (MINUS_EXPR, *vr0max,
5694 build_int_cst (TREE_TYPE (*vr0max), -1));
5695 else
5696 *vr0min
5697 = int_const_binop (PLUS_EXPR, *vr0max,
5698 build_int_cst (TREE_TYPE (*vr0max), 1));
5699 *vr0max = vr1max;
5701 /* Choose the left gap if the right is empty. */
5702 else if (maxeq)
5704 *vr0type = VR_RANGE;
5705 if (TREE_CODE (*vr0min) != INTEGER_CST)
5706 *vr0max = *vr0min;
5707 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5708 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5709 *vr0max
5710 = int_const_binop (PLUS_EXPR, *vr0min,
5711 build_int_cst (TREE_TYPE (*vr0min), -1));
5712 else
5713 *vr0max
5714 = int_const_binop (MINUS_EXPR, *vr0min,
5715 build_int_cst (TREE_TYPE (*vr0min), 1));
5716 *vr0min = vr1min;
5718 /* Choose the anti-range if the range is effectively varying. */
5719 else if (vrp_val_is_min (vr1min)
5720 && vrp_val_is_max (vr1max))
5722 /* Choose the anti-range if it is ~[0,0], that range is special
5723 enough to special case when vr1's range is relatively wide.
5724 At least for types bigger than int - this covers pointers
5725 and arguments to functions like ctz. */
5726 else if (*vr0min == *vr0max
5727 && integer_zerop (*vr0min)
5728 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5729 >= TYPE_PRECISION (integer_type_node))
5730 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5731 && TREE_CODE (vr1max) == INTEGER_CST
5732 && TREE_CODE (vr1min) == INTEGER_CST
5733 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5734 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5736 /* Else choose the range. */
5737 else
5739 *vr0type = vr1type;
5740 *vr0min = vr1min;
5741 *vr0max = vr1max;
5744 else if (*vr0type == VR_ANTI_RANGE
5745 && vr1type == VR_ANTI_RANGE)
5747 /* If both are anti-ranges the result is the outer one. */
5748 *vr0type = vr1type;
5749 *vr0min = vr1min;
5750 *vr0max = vr1max;
5752 else if (vr1type == VR_ANTI_RANGE
5753 && *vr0type == VR_RANGE)
5755 /* The intersection is empty. */
5756 *vr0type = VR_UNDEFINED;
5757 *vr0min = NULL_TREE;
5758 *vr0max = NULL_TREE;
5760 else
5761 gcc_unreachable ();
5763 else if ((operand_less_p (vr1min, *vr0max) == 1
5764 || operand_equal_p (vr1min, *vr0max, 0))
5765 && operand_less_p (*vr0min, vr1min) == 1)
5767 /* [ ( ] ) or [ ]( ) */
5768 if (*vr0type == VR_ANTI_RANGE
5769 && vr1type == VR_ANTI_RANGE)
5770 *vr0max = vr1max;
5771 else if (*vr0type == VR_RANGE
5772 && vr1type == VR_RANGE)
5773 *vr0min = vr1min;
5774 else if (*vr0type == VR_RANGE
5775 && vr1type == VR_ANTI_RANGE)
5777 if (TREE_CODE (vr1min) == INTEGER_CST)
5778 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5779 build_int_cst (TREE_TYPE (vr1min), 1));
5780 else
5781 *vr0max = vr1min;
5783 else if (*vr0type == VR_ANTI_RANGE
5784 && vr1type == VR_RANGE)
5786 *vr0type = VR_RANGE;
5787 if (TREE_CODE (*vr0max) == INTEGER_CST)
5788 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5789 build_int_cst (TREE_TYPE (*vr0max), 1));
5790 else
5791 *vr0min = *vr0max;
5792 *vr0max = vr1max;
5794 else
5795 gcc_unreachable ();
5797 else if ((operand_less_p (*vr0min, vr1max) == 1
5798 || operand_equal_p (*vr0min, vr1max, 0))
5799 && operand_less_p (vr1min, *vr0min) == 1)
5801 /* ( [ ) ] or ( )[ ] */
5802 if (*vr0type == VR_ANTI_RANGE
5803 && vr1type == VR_ANTI_RANGE)
5804 *vr0min = vr1min;
5805 else if (*vr0type == VR_RANGE
5806 && vr1type == VR_RANGE)
5807 *vr0max = vr1max;
5808 else if (*vr0type == VR_RANGE
5809 && vr1type == VR_ANTI_RANGE)
5811 if (TREE_CODE (vr1max) == INTEGER_CST)
5812 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5813 build_int_cst (TREE_TYPE (vr1max), 1));
5814 else
5815 *vr0min = vr1max;
5817 else if (*vr0type == VR_ANTI_RANGE
5818 && vr1type == VR_RANGE)
5820 *vr0type = VR_RANGE;
5821 if (TREE_CODE (*vr0min) == INTEGER_CST)
5822 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5823 build_int_cst (TREE_TYPE (*vr0min), 1));
5824 else
5825 *vr0max = *vr0min;
5826 *vr0min = vr1min;
5828 else
5829 gcc_unreachable ();
5832 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5833 result for the intersection. That's always a conservative
5834 correct estimate unless VR1 is a constant singleton range
5835 in which case we choose that. */
5836 if (vr1type == VR_RANGE
5837 && is_gimple_min_invariant (vr1min)
5838 && vrp_operand_equal_p (vr1min, vr1max))
5840 *vr0type = vr1type;
5841 *vr0min = vr1min;
5842 *vr0max = vr1max;
5845 return;
5849 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
5850 in *VR0. This may not be the smallest possible such range. */
5852 static void
5853 vrp_intersect_ranges_1 (value_range *vr0, const value_range *vr1)
5855 value_range saved;
5857 /* If either range is VR_VARYING the other one wins. */
5858 if (vr1->type == VR_VARYING)
5859 return;
5860 if (vr0->type == VR_VARYING)
5862 copy_value_range (vr0, vr1);
5863 return;
5866 /* When either range is VR_UNDEFINED the resulting range is
5867 VR_UNDEFINED, too. */
5868 if (vr0->type == VR_UNDEFINED)
5869 return;
5870 if (vr1->type == VR_UNDEFINED)
5872 set_value_range_to_undefined (vr0);
5873 return;
5876 /* Save the original vr0 so we can return it as conservative intersection
5877 result when our worker turns things to varying. */
5878 saved = *vr0;
5879 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
5880 vr1->type, vr1->min, vr1->max);
5881 /* Make sure to canonicalize the result though as the inversion of a
5882 VR_RANGE can still be a VR_RANGE. */
5883 set_and_canonicalize_value_range (vr0, vr0->type,
5884 vr0->min, vr0->max, vr0->equiv);
5885 /* If that failed, use the saved original VR0. */
5886 if (vr0->type == VR_VARYING)
5888 *vr0 = saved;
5889 return;
5891 /* If the result is VR_UNDEFINED there is no need to mess with
5892 the equivalencies. */
5893 if (vr0->type == VR_UNDEFINED)
5894 return;
5896 /* The resulting set of equivalences for range intersection is the union of
5897 the two sets. */
5898 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5899 bitmap_ior_into (vr0->equiv, vr1->equiv);
5900 else if (vr1->equiv && !vr0->equiv)
5902 /* All equivalence bitmaps are allocated from the same obstack. So
5903 we can use the obstack associated with VR to allocate vr0->equiv. */
5904 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
5905 bitmap_copy (vr0->equiv, vr1->equiv);
5909 void
5910 vrp_intersect_ranges (value_range *vr0, const value_range *vr1)
5912 if (dump_file && (dump_flags & TDF_DETAILS))
5914 fprintf (dump_file, "Intersecting\n ");
5915 dump_value_range (dump_file, vr0);
5916 fprintf (dump_file, "\nand\n ");
5917 dump_value_range (dump_file, vr1);
5918 fprintf (dump_file, "\n");
5920 vrp_intersect_ranges_1 (vr0, vr1);
5921 if (dump_file && (dump_flags & TDF_DETAILS))
5923 fprintf (dump_file, "to\n ");
5924 dump_value_range (dump_file, vr0);
5925 fprintf (dump_file, "\n");
5929 /* Meet operation for value ranges. Given two value ranges VR0 and
5930 VR1, store in VR0 a range that contains both VR0 and VR1. This
5931 may not be the smallest possible such range. */
5933 static void
5934 vrp_meet_1 (value_range *vr0, const value_range *vr1)
5936 value_range saved;
5938 if (vr0->type == VR_UNDEFINED)
5940 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
5941 return;
5944 if (vr1->type == VR_UNDEFINED)
5946 /* VR0 already has the resulting range. */
5947 return;
5950 if (vr0->type == VR_VARYING)
5952 /* Nothing to do. VR0 already has the resulting range. */
5953 return;
5956 if (vr1->type == VR_VARYING)
5958 set_value_range_to_varying (vr0);
5959 return;
5962 saved = *vr0;
5963 union_ranges (&vr0->type, &vr0->min, &vr0->max,
5964 vr1->type, vr1->min, vr1->max);
5965 if (vr0->type == VR_VARYING)
5967 /* Failed to find an efficient meet. Before giving up and setting
5968 the result to VARYING, see if we can at least derive a useful
5969 anti-range. */
5970 if (range_includes_zero_p (&saved) == 0
5971 && range_includes_zero_p (vr1) == 0)
5973 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
5975 /* Since this meet operation did not result from the meeting of
5976 two equivalent names, VR0 cannot have any equivalences. */
5977 if (vr0->equiv)
5978 bitmap_clear (vr0->equiv);
5979 return;
5982 set_value_range_to_varying (vr0);
5983 return;
5985 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
5986 vr0->equiv);
5987 if (vr0->type == VR_VARYING)
5988 return;
5990 /* The resulting set of equivalences is always the intersection of
5991 the two sets. */
5992 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5993 bitmap_and_into (vr0->equiv, vr1->equiv);
5994 else if (vr0->equiv && !vr1->equiv)
5995 bitmap_clear (vr0->equiv);
5998 void
5999 vrp_meet (value_range *vr0, const value_range *vr1)
6001 if (dump_file && (dump_flags & TDF_DETAILS))
6003 fprintf (dump_file, "Meeting\n ");
6004 dump_value_range (dump_file, vr0);
6005 fprintf (dump_file, "\nand\n ");
6006 dump_value_range (dump_file, vr1);
6007 fprintf (dump_file, "\n");
6009 vrp_meet_1 (vr0, vr1);
6010 if (dump_file && (dump_flags & TDF_DETAILS))
6012 fprintf (dump_file, "to\n ");
6013 dump_value_range (dump_file, vr0);
6014 fprintf (dump_file, "\n");
6019 /* Visit all arguments for PHI node PHI that flow through executable
6020 edges. If a valid value range can be derived from all the incoming
6021 value ranges, set a new range for the LHS of PHI. */
6023 enum ssa_prop_result
6024 vrp_prop::visit_phi (gphi *phi)
6026 tree lhs = PHI_RESULT (phi);
6027 value_range vr_result = VR_INITIALIZER;
6028 extract_range_from_phi_node (phi, &vr_result);
6029 if (update_value_range (lhs, &vr_result))
6031 if (dump_file && (dump_flags & TDF_DETAILS))
6033 fprintf (dump_file, "Found new range for ");
6034 print_generic_expr (dump_file, lhs);
6035 fprintf (dump_file, ": ");
6036 dump_value_range (dump_file, &vr_result);
6037 fprintf (dump_file, "\n");
6040 if (vr_result.type == VR_VARYING)
6041 return SSA_PROP_VARYING;
6043 return SSA_PROP_INTERESTING;
6046 /* Nothing changed, don't add outgoing edges. */
6047 return SSA_PROP_NOT_INTERESTING;
6050 class vrp_folder : public substitute_and_fold_engine
6052 public:
6053 tree get_value (tree) FINAL OVERRIDE;
6054 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6055 bool fold_predicate_in (gimple_stmt_iterator *);
6057 class vr_values *vr_values;
6059 /* Delegators. */
6060 tree vrp_evaluate_conditional (tree_code code, tree op0,
6061 tree op1, gimple *stmt)
6062 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6063 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6064 { return vr_values->simplify_stmt_using_ranges (gsi); }
6065 tree op_with_constant_singleton_value_range (tree op)
6066 { return vr_values->op_with_constant_singleton_value_range (op); }
6069 /* If the statement pointed by SI has a predicate whose value can be
6070 computed using the value range information computed by VRP, compute
6071 its value and return true. Otherwise, return false. */
6073 bool
6074 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6076 bool assignment_p = false;
6077 tree val;
6078 gimple *stmt = gsi_stmt (*si);
6080 if (is_gimple_assign (stmt)
6081 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6083 assignment_p = true;
6084 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6085 gimple_assign_rhs1 (stmt),
6086 gimple_assign_rhs2 (stmt),
6087 stmt);
6089 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6090 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6091 gimple_cond_lhs (cond_stmt),
6092 gimple_cond_rhs (cond_stmt),
6093 stmt);
6094 else
6095 return false;
6097 if (val)
6099 if (assignment_p)
6100 val = fold_convert (gimple_expr_type (stmt), val);
6102 if (dump_file)
6104 fprintf (dump_file, "Folding predicate ");
6105 print_gimple_expr (dump_file, stmt, 0);
6106 fprintf (dump_file, " to ");
6107 print_generic_expr (dump_file, val);
6108 fprintf (dump_file, "\n");
6111 if (is_gimple_assign (stmt))
6112 gimple_assign_set_rhs_from_tree (si, val);
6113 else
6115 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6116 gcond *cond_stmt = as_a <gcond *> (stmt);
6117 if (integer_zerop (val))
6118 gimple_cond_make_false (cond_stmt);
6119 else if (integer_onep (val))
6120 gimple_cond_make_true (cond_stmt);
6121 else
6122 gcc_unreachable ();
6125 return true;
6128 return false;
6131 /* Callback for substitute_and_fold folding the stmt at *SI. */
6133 bool
6134 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6136 if (fold_predicate_in (si))
6137 return true;
6139 return simplify_stmt_using_ranges (si);
6142 /* If OP has a value range with a single constant value return that,
6143 otherwise return NULL_TREE. This returns OP itself if OP is a
6144 constant.
6146 Implemented as a pure wrapper right now, but this will change. */
6148 tree
6149 vrp_folder::get_value (tree op)
6151 return op_with_constant_singleton_value_range (op);
6154 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6155 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6156 BB. If no such ASSERT_EXPR is found, return OP. */
6158 static tree
6159 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6161 imm_use_iterator imm_iter;
6162 gimple *use_stmt;
6163 use_operand_p use_p;
6165 if (TREE_CODE (op) == SSA_NAME)
6167 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6169 use_stmt = USE_STMT (use_p);
6170 if (use_stmt != stmt
6171 && gimple_assign_single_p (use_stmt)
6172 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6173 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6174 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6175 return gimple_assign_lhs (use_stmt);
6178 return op;
6181 /* A hack. */
6182 static class vr_values *x_vr_values;
6184 /* A trivial wrapper so that we can present the generic jump threading
6185 code with a simple API for simplifying statements. STMT is the
6186 statement we want to simplify, WITHIN_STMT provides the location
6187 for any overflow warnings. */
6189 static tree
6190 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6191 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6192 basic_block bb)
6194 /* First see if the conditional is in the hash table. */
6195 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6196 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6197 return cached_lhs;
6199 vr_values *vr_values = x_vr_values;
6200 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6202 tree op0 = gimple_cond_lhs (cond_stmt);
6203 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6205 tree op1 = gimple_cond_rhs (cond_stmt);
6206 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6208 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6209 op0, op1, within_stmt);
6212 /* We simplify a switch statement by trying to determine which case label
6213 will be taken. If we are successful then we return the corresponding
6214 CASE_LABEL_EXPR. */
6215 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6217 tree op = gimple_switch_index (switch_stmt);
6218 if (TREE_CODE (op) != SSA_NAME)
6219 return NULL_TREE;
6221 op = lhs_of_dominating_assert (op, bb, stmt);
6223 const value_range *vr = vr_values->get_value_range (op);
6224 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6225 || symbolic_range_p (vr))
6226 return NULL_TREE;
6228 if (vr->type == VR_RANGE)
6230 size_t i, j;
6231 /* Get the range of labels that contain a part of the operand's
6232 value range. */
6233 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6235 /* Is there only one such label? */
6236 if (i == j)
6238 tree label = gimple_switch_label (switch_stmt, i);
6240 /* The i'th label will be taken only if the value range of the
6241 operand is entirely within the bounds of this label. */
6242 if (CASE_HIGH (label) != NULL_TREE
6243 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6244 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6245 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6246 && tree_int_cst_equal (vr->min, vr->max)))
6247 return label;
6250 /* If there are no such labels then the default label will be
6251 taken. */
6252 if (i > j)
6253 return gimple_switch_label (switch_stmt, 0);
6256 if (vr->type == VR_ANTI_RANGE)
6258 unsigned n = gimple_switch_num_labels (switch_stmt);
6259 tree min_label = gimple_switch_label (switch_stmt, 1);
6260 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6262 /* The default label will be taken only if the anti-range of the
6263 operand is entirely outside the bounds of all the (non-default)
6264 case labels. */
6265 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6266 && (CASE_HIGH (max_label) != NULL_TREE
6267 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6268 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6269 return gimple_switch_label (switch_stmt, 0);
6272 return NULL_TREE;
6275 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6277 tree lhs = gimple_assign_lhs (assign_stmt);
6278 if (TREE_CODE (lhs) == SSA_NAME
6279 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6280 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6281 && stmt_interesting_for_vrp (stmt))
6283 edge dummy_e;
6284 tree dummy_tree;
6285 value_range new_vr = VR_INITIALIZER;
6286 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6287 &dummy_tree, &new_vr);
6288 if (range_int_cst_singleton_p (&new_vr))
6289 return new_vr.min;
6293 return NULL_TREE;
6296 class vrp_dom_walker : public dom_walker
6298 public:
6299 vrp_dom_walker (cdi_direction direction,
6300 class const_and_copies *const_and_copies,
6301 class avail_exprs_stack *avail_exprs_stack)
6302 : dom_walker (direction, REACHABLE_BLOCKS),
6303 m_const_and_copies (const_and_copies),
6304 m_avail_exprs_stack (avail_exprs_stack),
6305 m_dummy_cond (NULL) {}
6307 virtual edge before_dom_children (basic_block);
6308 virtual void after_dom_children (basic_block);
6310 class vr_values *vr_values;
6312 private:
6313 class const_and_copies *m_const_and_copies;
6314 class avail_exprs_stack *m_avail_exprs_stack;
6316 gcond *m_dummy_cond;
6320 /* Called before processing dominator children of BB. We want to look
6321 at ASSERT_EXPRs and record information from them in the appropriate
6322 tables.
6324 We could look at other statements here. It's not seen as likely
6325 to significantly increase the jump threads we discover. */
6327 edge
6328 vrp_dom_walker::before_dom_children (basic_block bb)
6330 gimple_stmt_iterator gsi;
6332 m_avail_exprs_stack->push_marker ();
6333 m_const_and_copies->push_marker ();
6334 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6336 gimple *stmt = gsi_stmt (gsi);
6337 if (gimple_assign_single_p (stmt)
6338 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6340 tree rhs1 = gimple_assign_rhs1 (stmt);
6341 tree cond = TREE_OPERAND (rhs1, 1);
6342 tree inverted = invert_truthvalue (cond);
6343 vec<cond_equivalence> p;
6344 p.create (3);
6345 record_conditions (&p, cond, inverted);
6346 for (unsigned int i = 0; i < p.length (); i++)
6347 m_avail_exprs_stack->record_cond (&p[i]);
6349 tree lhs = gimple_assign_lhs (stmt);
6350 m_const_and_copies->record_const_or_copy (lhs,
6351 TREE_OPERAND (rhs1, 0));
6352 p.release ();
6353 continue;
6355 break;
6357 return NULL;
6360 /* Called after processing dominator children of BB. This is where we
6361 actually call into the threader. */
6362 void
6363 vrp_dom_walker::after_dom_children (basic_block bb)
6365 if (!m_dummy_cond)
6366 m_dummy_cond = gimple_build_cond (NE_EXPR,
6367 integer_zero_node, integer_zero_node,
6368 NULL, NULL);
6370 x_vr_values = vr_values;
6371 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6372 m_avail_exprs_stack, NULL,
6373 simplify_stmt_for_jump_threading);
6374 x_vr_values = NULL;
6376 m_avail_exprs_stack->pop_to_marker ();
6377 m_const_and_copies->pop_to_marker ();
6380 /* Blocks which have more than one predecessor and more than
6381 one successor present jump threading opportunities, i.e.,
6382 when the block is reached from a specific predecessor, we
6383 may be able to determine which of the outgoing edges will
6384 be traversed. When this optimization applies, we are able
6385 to avoid conditionals at runtime and we may expose secondary
6386 optimization opportunities.
6388 This routine is effectively a driver for the generic jump
6389 threading code. It basically just presents the generic code
6390 with edges that may be suitable for jump threading.
6392 Unlike DOM, we do not iterate VRP if jump threading was successful.
6393 While iterating may expose new opportunities for VRP, it is expected
6394 those opportunities would be very limited and the compile time cost
6395 to expose those opportunities would be significant.
6397 As jump threading opportunities are discovered, they are registered
6398 for later realization. */
6400 static void
6401 identify_jump_threads (class vr_values *vr_values)
6403 /* Ugh. When substituting values earlier in this pass we can
6404 wipe the dominance information. So rebuild the dominator
6405 information as we need it within the jump threading code. */
6406 calculate_dominance_info (CDI_DOMINATORS);
6408 /* We do not allow VRP information to be used for jump threading
6409 across a back edge in the CFG. Otherwise it becomes too
6410 difficult to avoid eliminating loop exit tests. Of course
6411 EDGE_DFS_BACK is not accurate at this time so we have to
6412 recompute it. */
6413 mark_dfs_back_edges ();
6415 /* Allocate our unwinder stack to unwind any temporary equivalences
6416 that might be recorded. */
6417 const_and_copies *equiv_stack = new const_and_copies ();
6419 hash_table<expr_elt_hasher> *avail_exprs
6420 = new hash_table<expr_elt_hasher> (1024);
6421 avail_exprs_stack *avail_exprs_stack
6422 = new class avail_exprs_stack (avail_exprs);
6424 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6425 walker.vr_values = vr_values;
6426 walker.walk (cfun->cfg->x_entry_block_ptr);
6428 /* We do not actually update the CFG or SSA graphs at this point as
6429 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6430 handle ASSERT_EXPRs gracefully. */
6431 delete equiv_stack;
6432 delete avail_exprs;
6433 delete avail_exprs_stack;
6436 /* Traverse all the blocks folding conditionals with known ranges. */
6438 void
6439 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6441 size_t i;
6443 /* We have completed propagating through the lattice. */
6444 vr_values.set_lattice_propagation_complete ();
6446 if (dump_file)
6448 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6449 vr_values.dump_all_value_ranges (dump_file);
6450 fprintf (dump_file, "\n");
6453 /* Set value range to non pointer SSA_NAMEs. */
6454 for (i = 0; i < num_ssa_names; i++)
6456 tree name = ssa_name (i);
6457 if (!name)
6458 continue;
6460 const value_range *vr = get_value_range (name);
6461 if (!name
6462 || (vr->type == VR_VARYING)
6463 || (vr->type == VR_UNDEFINED)
6464 || (TREE_CODE (vr->min) != INTEGER_CST)
6465 || (TREE_CODE (vr->max) != INTEGER_CST))
6466 continue;
6468 if (POINTER_TYPE_P (TREE_TYPE (name))
6469 && range_includes_zero_p (vr) == 0)
6470 set_ptr_nonnull (name);
6471 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6472 set_range_info (name, vr->type,
6473 wi::to_wide (vr->min),
6474 wi::to_wide (vr->max));
6477 /* If we're checking array refs, we want to merge information on
6478 the executability of each edge between vrp_folder and the
6479 check_array_bounds_dom_walker: each can clear the
6480 EDGE_EXECUTABLE flag on edges, in different ways.
6482 Hence, if we're going to call check_all_array_refs, set
6483 the flag on every edge now, rather than in
6484 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6485 it from some edges. */
6486 if (warn_array_bounds && warn_array_bounds_p)
6487 set_all_edges_as_executable (cfun);
6489 class vrp_folder vrp_folder;
6490 vrp_folder.vr_values = &vr_values;
6491 vrp_folder.substitute_and_fold ();
6493 if (warn_array_bounds && warn_array_bounds_p)
6494 check_all_array_refs ();
6497 /* Main entry point to VRP (Value Range Propagation). This pass is
6498 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6499 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6500 Programming Language Design and Implementation, pp. 67-78, 1995.
6501 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6503 This is essentially an SSA-CCP pass modified to deal with ranges
6504 instead of constants.
6506 While propagating ranges, we may find that two or more SSA name
6507 have equivalent, though distinct ranges. For instance,
6509 1 x_9 = p_3->a;
6510 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6511 3 if (p_4 == q_2)
6512 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6513 5 endif
6514 6 if (q_2)
6516 In the code above, pointer p_5 has range [q_2, q_2], but from the
6517 code we can also determine that p_5 cannot be NULL and, if q_2 had
6518 a non-varying range, p_5's range should also be compatible with it.
6520 These equivalences are created by two expressions: ASSERT_EXPR and
6521 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6522 result of another assertion, then we can use the fact that p_5 and
6523 p_4 are equivalent when evaluating p_5's range.
6525 Together with value ranges, we also propagate these equivalences
6526 between names so that we can take advantage of information from
6527 multiple ranges when doing final replacement. Note that this
6528 equivalency relation is transitive but not symmetric.
6530 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6531 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6532 in contexts where that assertion does not hold (e.g., in line 6).
6534 TODO, the main difference between this pass and Patterson's is that
6535 we do not propagate edge probabilities. We only compute whether
6536 edges can be taken or not. That is, instead of having a spectrum
6537 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6538 DON'T KNOW. In the future, it may be worthwhile to propagate
6539 probabilities to aid branch prediction. */
6541 static unsigned int
6542 execute_vrp (bool warn_array_bounds_p)
6545 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6546 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6547 scev_initialize ();
6549 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6550 Inserting assertions may split edges which will invalidate
6551 EDGE_DFS_BACK. */
6552 insert_range_assertions ();
6554 threadedge_initialize_values ();
6556 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6557 mark_dfs_back_edges ();
6559 class vrp_prop vrp_prop;
6560 vrp_prop.vrp_initialize ();
6561 vrp_prop.ssa_propagate ();
6562 vrp_prop.vrp_finalize (warn_array_bounds_p);
6564 /* We must identify jump threading opportunities before we release
6565 the datastructures built by VRP. */
6566 identify_jump_threads (&vrp_prop.vr_values);
6568 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6569 was set by a type conversion can often be rewritten to use the
6570 RHS of the type conversion.
6572 However, doing so inhibits jump threading through the comparison.
6573 So that transformation is not performed until after jump threading
6574 is complete. */
6575 basic_block bb;
6576 FOR_EACH_BB_FN (bb, cfun)
6578 gimple *last = last_stmt (bb);
6579 if (last && gimple_code (last) == GIMPLE_COND)
6580 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6583 free_numbers_of_iterations_estimates (cfun);
6585 /* ASSERT_EXPRs must be removed before finalizing jump threads
6586 as finalizing jump threads calls the CFG cleanup code which
6587 does not properly handle ASSERT_EXPRs. */
6588 remove_range_assertions ();
6590 /* If we exposed any new variables, go ahead and put them into
6591 SSA form now, before we handle jump threading. This simplifies
6592 interactions between rewriting of _DECL nodes into SSA form
6593 and rewriting SSA_NAME nodes into SSA form after block
6594 duplication and CFG manipulation. */
6595 update_ssa (TODO_update_ssa);
6597 /* We identified all the jump threading opportunities earlier, but could
6598 not transform the CFG at that time. This routine transforms the
6599 CFG and arranges for the dominator tree to be rebuilt if necessary.
6601 Note the SSA graph update will occur during the normal TODO
6602 processing by the pass manager. */
6603 thread_through_all_blocks (false);
6605 vrp_prop.vr_values.cleanup_edges_and_switches ();
6606 threadedge_finalize_values ();
6608 scev_finalize ();
6609 loop_optimizer_finalize ();
6610 return 0;
6613 namespace {
6615 const pass_data pass_data_vrp =
6617 GIMPLE_PASS, /* type */
6618 "vrp", /* name */
6619 OPTGROUP_NONE, /* optinfo_flags */
6620 TV_TREE_VRP, /* tv_id */
6621 PROP_ssa, /* properties_required */
6622 0, /* properties_provided */
6623 0, /* properties_destroyed */
6624 0, /* todo_flags_start */
6625 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6628 class pass_vrp : public gimple_opt_pass
6630 public:
6631 pass_vrp (gcc::context *ctxt)
6632 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6635 /* opt_pass methods: */
6636 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6637 void set_pass_param (unsigned int n, bool param)
6639 gcc_assert (n == 0);
6640 warn_array_bounds_p = param;
6642 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6643 virtual unsigned int execute (function *)
6644 { return execute_vrp (warn_array_bounds_p); }
6646 private:
6647 bool warn_array_bounds_p;
6648 }; // class pass_vrp
6650 } // anon namespace
6652 gimple_opt_pass *
6653 make_pass_vrp (gcc::context *ctxt)
6655 return new pass_vrp (ctxt);
6659 /* Worker for determine_value_range. */
6661 static void
6662 determine_value_range_1 (value_range *vr, tree expr)
6664 if (BINARY_CLASS_P (expr))
6666 value_range vr0 = VR_INITIALIZER, vr1 = VR_INITIALIZER;
6667 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6668 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6669 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr),
6670 &vr0, &vr1);
6672 else if (UNARY_CLASS_P (expr))
6674 value_range vr0 = VR_INITIALIZER;
6675 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6676 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6677 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6679 else if (TREE_CODE (expr) == INTEGER_CST)
6680 set_value_range_to_value (vr, expr, NULL);
6681 else
6683 value_range_type kind;
6684 wide_int min, max;
6685 /* For SSA names try to extract range info computed by VRP. Otherwise
6686 fall back to varying. */
6687 if (TREE_CODE (expr) == SSA_NAME
6688 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6689 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6690 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min),
6691 wide_int_to_tree (TREE_TYPE (expr), max), NULL);
6692 else
6693 set_value_range_to_varying (vr);
6697 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6698 the determined range type. */
6700 value_range_type
6701 determine_value_range (tree expr, wide_int *min, wide_int *max)
6703 value_range vr = VR_INITIALIZER;
6704 determine_value_range_1 (&vr, expr);
6705 if ((vr.type == VR_RANGE
6706 || vr.type == VR_ANTI_RANGE)
6707 && !symbolic_range_p (&vr))
6709 *min = wi::to_wide (vr.min);
6710 *max = wi::to_wide (vr.max);
6711 return vr.type;
6714 return VR_VARYING;