* tree-vect-loop-manip.c (vect_do_peeling): Do not use
[official-gcc.git] / gcc / tree-vrp.c
blob945b3a9935e48e1b1944f9fba23bec44957aa476
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-general.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 #include "params.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
65 #include "stringpool.h"
66 #include "attribs.h"
67 #include "vr-values.h"
69 /* Set of SSA names found live during the RPO traversal of the function
70 for still active basic-blocks. */
71 static sbitmap *live;
73 /* Return true if the SSA name NAME is live on the edge E. */
75 static bool
76 live_on_edge (edge e, tree name)
78 return (live[e->dest->index]
79 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
82 /* Location information for ASSERT_EXPRs. Each instance of this
83 structure describes an ASSERT_EXPR for an SSA name. Since a single
84 SSA name may have more than one assertion associated with it, these
85 locations are kept in a linked list attached to the corresponding
86 SSA name. */
87 struct assert_locus
89 /* Basic block where the assertion would be inserted. */
90 basic_block bb;
92 /* Some assertions need to be inserted on an edge (e.g., assertions
93 generated by COND_EXPRs). In those cases, BB will be NULL. */
94 edge e;
96 /* Pointer to the statement that generated this assertion. */
97 gimple_stmt_iterator si;
99 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
100 enum tree_code comp_code;
102 /* Value being compared against. */
103 tree val;
105 /* Expression to compare. */
106 tree expr;
108 /* Next node in the linked list. */
109 assert_locus *next;
112 /* If bit I is present, it means that SSA name N_i has a list of
113 assertions that should be inserted in the IL. */
114 static bitmap need_assert_for;
116 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
117 holds a list of ASSERT_LOCUS_T nodes that describe where
118 ASSERT_EXPRs for SSA name N_I should be inserted. */
119 static assert_locus **asserts_for;
121 vec<edge> to_remove_edges;
122 vec<switch_update> to_update_switch_stmts;
125 /* Return the maximum value for TYPE. */
127 tree
128 vrp_val_max (const_tree type)
130 if (!INTEGRAL_TYPE_P (type))
131 return NULL_TREE;
133 return TYPE_MAX_VALUE (type);
136 /* Return the minimum value for TYPE. */
138 tree
139 vrp_val_min (const_tree type)
141 if (!INTEGRAL_TYPE_P (type))
142 return NULL_TREE;
144 return TYPE_MIN_VALUE (type);
147 /* Return whether VAL is equal to the maximum value of its type.
148 We can't do a simple equality comparison with TYPE_MAX_VALUE because
149 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
150 is not == to the integer constant with the same value in the type. */
152 bool
153 vrp_val_is_max (const_tree val)
155 tree type_max = vrp_val_max (TREE_TYPE (val));
156 return (val == type_max
157 || (type_max != NULL_TREE
158 && operand_equal_p (val, type_max, 0)));
161 /* Return whether VAL is equal to the minimum value of its type. */
163 bool
164 vrp_val_is_min (const_tree val)
166 tree type_min = vrp_val_min (TREE_TYPE (val));
167 return (val == type_min
168 || (type_min != NULL_TREE
169 && operand_equal_p (val, type_min, 0)));
173 /* Set value range VR to VR_UNDEFINED. */
175 static inline void
176 set_value_range_to_undefined (value_range *vr)
178 vr->type = VR_UNDEFINED;
179 vr->min = vr->max = NULL_TREE;
180 if (vr->equiv)
181 bitmap_clear (vr->equiv);
184 /* Set value range VR to VR_VARYING. */
186 void
187 set_value_range_to_varying (value_range *vr)
189 vr->type = VR_VARYING;
190 vr->min = vr->max = NULL_TREE;
191 if (vr->equiv)
192 bitmap_clear (vr->equiv);
195 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
197 void
198 set_value_range (value_range *vr, enum value_range_type t, tree min,
199 tree max, bitmap equiv)
201 /* Check the validity of the range. */
202 if (flag_checking
203 && (t == VR_RANGE || t == VR_ANTI_RANGE))
205 int cmp;
207 gcc_assert (min && max);
209 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
211 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
212 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
214 cmp = compare_values (min, max);
215 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
218 if (flag_checking
219 && (t == VR_UNDEFINED || t == VR_VARYING))
221 gcc_assert (min == NULL_TREE && max == NULL_TREE);
222 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
225 vr->type = t;
226 vr->min = min;
227 vr->max = max;
229 /* Since updating the equivalence set involves deep copying the
230 bitmaps, only do it if absolutely necessary.
232 All equivalence bitmaps are allocated from the same obstack. So
233 we can use the obstack associated with EQUIV to allocate vr->equiv. */
234 if (vr->equiv == NULL
235 && equiv != NULL)
236 vr->equiv = BITMAP_ALLOC (equiv->obstack);
238 if (equiv != vr->equiv)
240 if (equiv && !bitmap_empty_p (equiv))
241 bitmap_copy (vr->equiv, equiv);
242 else
243 bitmap_clear (vr->equiv);
248 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
249 This means adjusting T, MIN and MAX representing the case of a
250 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
251 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
252 In corner cases where MAX+1 or MIN-1 wraps this will fall back
253 to varying.
254 This routine exists to ease canonicalization in the case where we
255 extract ranges from var + CST op limit. */
257 void
258 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
259 tree min, tree max, bitmap equiv)
261 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
262 if (t == VR_UNDEFINED)
264 set_value_range_to_undefined (vr);
265 return;
267 else if (t == VR_VARYING)
269 set_value_range_to_varying (vr);
270 return;
273 /* Nothing to canonicalize for symbolic ranges. */
274 if (TREE_CODE (min) != INTEGER_CST
275 || TREE_CODE (max) != INTEGER_CST)
277 set_value_range (vr, t, min, max, equiv);
278 return;
281 /* Wrong order for min and max, to swap them and the VR type we need
282 to adjust them. */
283 if (tree_int_cst_lt (max, min))
285 tree one, tmp;
287 /* For one bit precision if max < min, then the swapped
288 range covers all values, so for VR_RANGE it is varying and
289 for VR_ANTI_RANGE empty range, so drop to varying as well. */
290 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
292 set_value_range_to_varying (vr);
293 return;
296 one = build_int_cst (TREE_TYPE (min), 1);
297 tmp = int_const_binop (PLUS_EXPR, max, one);
298 max = int_const_binop (MINUS_EXPR, min, one);
299 min = tmp;
301 /* There's one corner case, if we had [C+1, C] before we now have
302 that again. But this represents an empty value range, so drop
303 to varying in this case. */
304 if (tree_int_cst_lt (max, min))
306 set_value_range_to_varying (vr);
307 return;
310 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
313 /* Anti-ranges that can be represented as ranges should be so. */
314 if (t == VR_ANTI_RANGE)
316 bool is_min = vrp_val_is_min (min);
317 bool is_max = vrp_val_is_max (max);
319 if (is_min && is_max)
321 /* We cannot deal with empty ranges, drop to varying.
322 ??? This could be VR_UNDEFINED instead. */
323 set_value_range_to_varying (vr);
324 return;
326 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
327 && (is_min || is_max))
329 /* Non-empty boolean ranges can always be represented
330 as a singleton range. */
331 if (is_min)
332 min = max = vrp_val_max (TREE_TYPE (min));
333 else
334 min = max = vrp_val_min (TREE_TYPE (min));
335 t = VR_RANGE;
337 else if (is_min
338 /* As a special exception preserve non-null ranges. */
339 && !(TYPE_UNSIGNED (TREE_TYPE (min))
340 && integer_zerop (max)))
342 tree one = build_int_cst (TREE_TYPE (max), 1);
343 min = int_const_binop (PLUS_EXPR, max, one);
344 max = vrp_val_max (TREE_TYPE (max));
345 t = VR_RANGE;
347 else if (is_max)
349 tree one = build_int_cst (TREE_TYPE (min), 1);
350 max = int_const_binop (MINUS_EXPR, min, one);
351 min = vrp_val_min (TREE_TYPE (min));
352 t = VR_RANGE;
356 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
357 to make sure VRP iteration terminates, otherwise we can get into
358 oscillations. */
360 set_value_range (vr, t, min, max, equiv);
363 /* Copy value range FROM into value range TO. */
365 void
366 copy_value_range (value_range *to, value_range *from)
368 set_value_range (to, from->type, from->min, from->max, from->equiv);
371 /* Set value range VR to a single value. This function is only called
372 with values we get from statements, and exists to clear the
373 TREE_OVERFLOW flag. */
375 void
376 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
378 gcc_assert (is_gimple_min_invariant (val));
379 if (TREE_OVERFLOW_P (val))
380 val = drop_tree_overflow (val);
381 set_value_range (vr, VR_RANGE, val, val, equiv);
384 /* Set value range VR to a non-NULL range of type TYPE. */
386 void
387 set_value_range_to_nonnull (value_range *vr, tree type)
389 tree zero = build_int_cst (type, 0);
390 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
394 /* Set value range VR to a NULL range of type TYPE. */
396 void
397 set_value_range_to_null (value_range *vr, tree type)
399 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
403 /* If abs (min) < abs (max), set VR to [-max, max], if
404 abs (min) >= abs (max), set VR to [-min, min]. */
406 static void
407 abs_extent_range (value_range *vr, tree min, tree max)
409 int cmp;
411 gcc_assert (TREE_CODE (min) == INTEGER_CST);
412 gcc_assert (TREE_CODE (max) == INTEGER_CST);
413 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
414 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
415 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
416 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
417 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
419 set_value_range_to_varying (vr);
420 return;
422 cmp = compare_values (min, max);
423 if (cmp == -1)
424 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
425 else if (cmp == 0 || cmp == 1)
427 max = min;
428 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
430 else
432 set_value_range_to_varying (vr);
433 return;
435 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
438 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
440 bool
441 vrp_operand_equal_p (const_tree val1, const_tree val2)
443 if (val1 == val2)
444 return true;
445 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
446 return false;
447 return true;
450 /* Return true, if the bitmaps B1 and B2 are equal. */
452 bool
453 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
455 return (b1 == b2
456 || ((!b1 || bitmap_empty_p (b1))
457 && (!b2 || bitmap_empty_p (b2)))
458 || (b1 && b2
459 && bitmap_equal_p (b1, b2)));
462 /* Return true if VR is ~[0, 0]. */
464 bool
465 range_is_nonnull (value_range *vr)
467 return vr->type == VR_ANTI_RANGE
468 && integer_zerop (vr->min)
469 && integer_zerop (vr->max);
473 /* Return true if VR is [0, 0]. */
475 static inline bool
476 range_is_null (value_range *vr)
478 return vr->type == VR_RANGE
479 && integer_zerop (vr->min)
480 && integer_zerop (vr->max);
483 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
484 a singleton. */
486 bool
487 range_int_cst_p (value_range *vr)
489 return (vr->type == VR_RANGE
490 && TREE_CODE (vr->max) == INTEGER_CST
491 && TREE_CODE (vr->min) == INTEGER_CST);
494 /* Return true if VR is a INTEGER_CST singleton. */
496 bool
497 range_int_cst_singleton_p (value_range *vr)
499 return (range_int_cst_p (vr)
500 && tree_int_cst_equal (vr->min, vr->max));
503 /* Return true if value range VR involves at least one symbol. */
505 bool
506 symbolic_range_p (value_range *vr)
508 return (!is_gimple_min_invariant (vr->min)
509 || !is_gimple_min_invariant (vr->max));
512 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
513 otherwise. We only handle additive operations and set NEG to true if the
514 symbol is negated and INV to the invariant part, if any. */
516 tree
517 get_single_symbol (tree t, bool *neg, tree *inv)
519 bool neg_;
520 tree inv_;
522 *inv = NULL_TREE;
523 *neg = false;
525 if (TREE_CODE (t) == PLUS_EXPR
526 || TREE_CODE (t) == POINTER_PLUS_EXPR
527 || TREE_CODE (t) == MINUS_EXPR)
529 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
531 neg_ = (TREE_CODE (t) == MINUS_EXPR);
532 inv_ = TREE_OPERAND (t, 0);
533 t = TREE_OPERAND (t, 1);
535 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
537 neg_ = false;
538 inv_ = TREE_OPERAND (t, 1);
539 t = TREE_OPERAND (t, 0);
541 else
542 return NULL_TREE;
544 else
546 neg_ = false;
547 inv_ = NULL_TREE;
550 if (TREE_CODE (t) == NEGATE_EXPR)
552 t = TREE_OPERAND (t, 0);
553 neg_ = !neg_;
556 if (TREE_CODE (t) != SSA_NAME)
557 return NULL_TREE;
559 if (inv_ && TREE_OVERFLOW_P (inv_))
560 inv_ = drop_tree_overflow (inv_);
562 *neg = neg_;
563 *inv = inv_;
564 return t;
567 /* The reverse operation: build a symbolic expression with TYPE
568 from symbol SYM, negated according to NEG, and invariant INV. */
570 static tree
571 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
573 const bool pointer_p = POINTER_TYPE_P (type);
574 tree t = sym;
576 if (neg)
577 t = build1 (NEGATE_EXPR, type, t);
579 if (integer_zerop (inv))
580 return t;
582 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
585 /* Return
586 1 if VAL < VAL2
587 0 if !(VAL < VAL2)
588 -2 if those are incomparable. */
590 operand_less_p (tree val, tree val2)
592 /* LT is folded faster than GE and others. Inline the common case. */
593 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
594 return tree_int_cst_lt (val, val2);
595 else
597 tree tcmp;
599 fold_defer_overflow_warnings ();
601 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
603 fold_undefer_and_ignore_overflow_warnings ();
605 if (!tcmp
606 || TREE_CODE (tcmp) != INTEGER_CST)
607 return -2;
609 if (!integer_zerop (tcmp))
610 return 1;
613 return 0;
616 /* Compare two values VAL1 and VAL2. Return
618 -2 if VAL1 and VAL2 cannot be compared at compile-time,
619 -1 if VAL1 < VAL2,
620 0 if VAL1 == VAL2,
621 +1 if VAL1 > VAL2, and
622 +2 if VAL1 != VAL2
624 This is similar to tree_int_cst_compare but supports pointer values
625 and values that cannot be compared at compile time.
627 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
628 true if the return value is only valid if we assume that signed
629 overflow is undefined. */
632 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
634 if (val1 == val2)
635 return 0;
637 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
638 both integers. */
639 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
640 == POINTER_TYPE_P (TREE_TYPE (val2)));
642 /* Convert the two values into the same type. This is needed because
643 sizetype causes sign extension even for unsigned types. */
644 val2 = fold_convert (TREE_TYPE (val1), val2);
645 STRIP_USELESS_TYPE_CONVERSION (val2);
647 const bool overflow_undefined
648 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
649 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
650 tree inv1, inv2;
651 bool neg1, neg2;
652 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
653 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
655 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
656 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
657 if (sym1 && sym2)
659 /* Both values must use the same name with the same sign. */
660 if (sym1 != sym2 || neg1 != neg2)
661 return -2;
663 /* [-]NAME + CST == [-]NAME + CST. */
664 if (inv1 == inv2)
665 return 0;
667 /* If overflow is defined we cannot simplify more. */
668 if (!overflow_undefined)
669 return -2;
671 if (strict_overflow_p != NULL
672 /* Symbolic range building sets TREE_NO_WARNING to declare
673 that overflow doesn't happen. */
674 && (!inv1 || !TREE_NO_WARNING (val1))
675 && (!inv2 || !TREE_NO_WARNING (val2)))
676 *strict_overflow_p = true;
678 if (!inv1)
679 inv1 = build_int_cst (TREE_TYPE (val1), 0);
680 if (!inv2)
681 inv2 = build_int_cst (TREE_TYPE (val2), 0);
683 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
684 TYPE_SIGN (TREE_TYPE (val1)));
687 const bool cst1 = is_gimple_min_invariant (val1);
688 const bool cst2 = is_gimple_min_invariant (val2);
690 /* If one is of the form '[-]NAME + CST' and the other is constant, then
691 it might be possible to say something depending on the constants. */
692 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
694 if (!overflow_undefined)
695 return -2;
697 if (strict_overflow_p != NULL
698 /* Symbolic range building sets TREE_NO_WARNING to declare
699 that overflow doesn't happen. */
700 && (!sym1 || !TREE_NO_WARNING (val1))
701 && (!sym2 || !TREE_NO_WARNING (val2)))
702 *strict_overflow_p = true;
704 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
705 tree cst = cst1 ? val1 : val2;
706 tree inv = cst1 ? inv2 : inv1;
708 /* Compute the difference between the constants. If it overflows or
709 underflows, this means that we can trivially compare the NAME with
710 it and, consequently, the two values with each other. */
711 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
712 if (wi::cmp (0, wi::to_wide (inv), sgn)
713 != wi::cmp (diff, wi::to_wide (cst), sgn))
715 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
716 return cst1 ? res : -res;
719 return -2;
722 /* We cannot say anything more for non-constants. */
723 if (!cst1 || !cst2)
724 return -2;
726 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
728 /* We cannot compare overflowed values. */
729 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
730 return -2;
732 return tree_int_cst_compare (val1, val2);
734 else
736 tree t;
738 /* First see if VAL1 and VAL2 are not the same. */
739 if (val1 == val2 || operand_equal_p (val1, val2, 0))
740 return 0;
742 /* If VAL1 is a lower address than VAL2, return -1. */
743 if (operand_less_p (val1, val2) == 1)
744 return -1;
746 /* If VAL1 is a higher address than VAL2, return +1. */
747 if (operand_less_p (val2, val1) == 1)
748 return 1;
750 /* If VAL1 is different than VAL2, return +2.
751 For integer constants we either have already returned -1 or 1
752 or they are equivalent. We still might succeed in proving
753 something about non-trivial operands. */
754 if (TREE_CODE (val1) != INTEGER_CST
755 || TREE_CODE (val2) != INTEGER_CST)
757 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
758 if (t && integer_onep (t))
759 return 2;
762 return -2;
766 /* Compare values like compare_values_warnv. */
769 compare_values (tree val1, tree val2)
771 bool sop;
772 return compare_values_warnv (val1, val2, &sop);
776 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
777 0 if VAL is not inside [MIN, MAX],
778 -2 if we cannot tell either way.
780 Benchmark compile/20001226-1.c compilation time after changing this
781 function. */
784 value_inside_range (tree val, tree min, tree max)
786 int cmp1, cmp2;
788 cmp1 = operand_less_p (val, min);
789 if (cmp1 == -2)
790 return -2;
791 if (cmp1 == 1)
792 return 0;
794 cmp2 = operand_less_p (max, val);
795 if (cmp2 == -2)
796 return -2;
798 return !cmp2;
802 /* Return true if value ranges VR0 and VR1 have a non-empty
803 intersection.
805 Benchmark compile/20001226-1.c compilation time after changing this
806 function.
809 static inline bool
810 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
812 /* The value ranges do not intersect if the maximum of the first range is
813 less than the minimum of the second range or vice versa.
814 When those relations are unknown, we can't do any better. */
815 if (operand_less_p (vr0->max, vr1->min) != 0)
816 return false;
817 if (operand_less_p (vr1->max, vr0->min) != 0)
818 return false;
819 return true;
823 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
824 include the value zero, -2 if we cannot tell. */
827 range_includes_zero_p (tree min, tree max)
829 tree zero = build_int_cst (TREE_TYPE (min), 0);
830 return value_inside_range (zero, min, max);
833 /* Return true if *VR is know to only contain nonnegative values. */
835 static inline bool
836 value_range_nonnegative_p (value_range *vr)
838 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
839 which would return a useful value should be encoded as a
840 VR_RANGE. */
841 if (vr->type == VR_RANGE)
843 int result = compare_values (vr->min, integer_zero_node);
844 return (result == 0 || result == 1);
847 return false;
850 /* If *VR has a value rante that is a single constant value return that,
851 otherwise return NULL_TREE. */
853 tree
854 value_range_constant_singleton (value_range *vr)
856 if (vr->type == VR_RANGE
857 && vrp_operand_equal_p (vr->min, vr->max)
858 && is_gimple_min_invariant (vr->min))
859 return vr->min;
861 return NULL_TREE;
864 /* Wrapper around int_const_binop. Return true if we can compute the
865 result; i.e. if the operation doesn't overflow or if the overflow is
866 undefined. In the latter case (if the operation overflows and
867 overflow is undefined), then adjust the result to be -INF or +INF
868 depending on CODE, VAL1 and VAL2. Return the value in *RES.
870 Return false for division by zero, for which the result is
871 indeterminate. */
873 static bool
874 vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
876 bool overflow = false;
877 signop sign = TYPE_SIGN (TREE_TYPE (val1));
879 switch (code)
881 case RSHIFT_EXPR:
882 case LSHIFT_EXPR:
884 wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
885 if (wi::neg_p (wval2))
887 wval2 = -wval2;
888 if (code == RSHIFT_EXPR)
889 code = LSHIFT_EXPR;
890 else
891 code = RSHIFT_EXPR;
894 if (code == RSHIFT_EXPR)
895 /* It's unclear from the C standard whether shifts can overflow.
896 The following code ignores overflow; perhaps a C standard
897 interpretation ruling is needed. */
898 *res = wi::rshift (wi::to_wide (val1), wval2, sign);
899 else
900 *res = wi::lshift (wi::to_wide (val1), wval2);
901 break;
904 case MULT_EXPR:
905 *res = wi::mul (wi::to_wide (val1),
906 wi::to_wide (val2), sign, &overflow);
907 break;
909 case TRUNC_DIV_EXPR:
910 case EXACT_DIV_EXPR:
911 if (val2 == 0)
912 return false;
913 else
914 *res = wi::div_trunc (wi::to_wide (val1),
915 wi::to_wide (val2), sign, &overflow);
916 break;
918 case FLOOR_DIV_EXPR:
919 if (val2 == 0)
920 return false;
921 *res = wi::div_floor (wi::to_wide (val1),
922 wi::to_wide (val2), sign, &overflow);
923 break;
925 case CEIL_DIV_EXPR:
926 if (val2 == 0)
927 return false;
928 *res = wi::div_ceil (wi::to_wide (val1),
929 wi::to_wide (val2), sign, &overflow);
930 break;
932 case ROUND_DIV_EXPR:
933 if (val2 == 0)
934 return false;
935 *res = wi::div_round (wi::to_wide (val1),
936 wi::to_wide (val2), sign, &overflow);
937 break;
939 default:
940 gcc_unreachable ();
943 if (overflow
944 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
946 /* If the operation overflowed return -INF or +INF depending
947 on the operation and the combination of signs of the operands. */
948 int sgn1 = tree_int_cst_sgn (val1);
949 int sgn2 = tree_int_cst_sgn (val2);
951 /* Notice that we only need to handle the restricted set of
952 operations handled by extract_range_from_binary_expr.
953 Among them, only multiplication, addition and subtraction
954 can yield overflow without overflown operands because we
955 are working with integral types only... except in the
956 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
957 for division too. */
959 /* For multiplication, the sign of the overflow is given
960 by the comparison of the signs of the operands. */
961 if ((code == MULT_EXPR && sgn1 == sgn2)
962 /* For addition, the operands must be of the same sign
963 to yield an overflow. Its sign is therefore that
964 of one of the operands, for example the first. */
965 || (code == PLUS_EXPR && sgn1 >= 0)
966 /* For subtraction, operands must be of
967 different signs to yield an overflow. Its sign is
968 therefore that of the first operand or the opposite of
969 that of the second operand. A first operand of 0 counts
970 as positive here, for the corner case 0 - (-INF), which
971 overflows, but must yield +INF. */
972 || (code == MINUS_EXPR && sgn1 >= 0)
973 /* For division, the only case is -INF / -1 = +INF. */
974 || code == TRUNC_DIV_EXPR
975 || code == FLOOR_DIV_EXPR
976 || code == CEIL_DIV_EXPR
977 || code == EXACT_DIV_EXPR
978 || code == ROUND_DIV_EXPR)
979 *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
980 TYPE_SIGN (TREE_TYPE (val1)));
981 else
982 *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
983 TYPE_SIGN (TREE_TYPE (val1)));
984 return true;
987 return !overflow;
991 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
992 bitmask if some bit is unset, it means for all numbers in the range
993 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
994 bitmask if some bit is set, it means for all numbers in the range
995 the bit is 1, otherwise it might be 0 or 1. */
997 bool
998 zero_nonzero_bits_from_vr (const tree expr_type,
999 value_range *vr,
1000 wide_int *may_be_nonzero,
1001 wide_int *must_be_nonzero)
1003 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1004 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1005 if (!range_int_cst_p (vr))
1006 return false;
1008 if (range_int_cst_singleton_p (vr))
1010 *may_be_nonzero = wi::to_wide (vr->min);
1011 *must_be_nonzero = *may_be_nonzero;
1013 else if (tree_int_cst_sgn (vr->min) >= 0
1014 || tree_int_cst_sgn (vr->max) < 0)
1016 wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
1017 *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
1018 *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
1019 if (xor_mask != 0)
1021 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1022 may_be_nonzero->get_precision ());
1023 *may_be_nonzero = *may_be_nonzero | mask;
1024 *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
1028 return true;
1031 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1032 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1033 false otherwise. If *AR can be represented with a single range
1034 *VR1 will be VR_UNDEFINED. */
1036 static bool
1037 ranges_from_anti_range (value_range *ar,
1038 value_range *vr0, value_range *vr1)
1040 tree type = TREE_TYPE (ar->min);
1042 vr0->type = VR_UNDEFINED;
1043 vr1->type = VR_UNDEFINED;
1045 if (ar->type != VR_ANTI_RANGE
1046 || TREE_CODE (ar->min) != INTEGER_CST
1047 || TREE_CODE (ar->max) != INTEGER_CST
1048 || !vrp_val_min (type)
1049 || !vrp_val_max (type))
1050 return false;
1052 if (!vrp_val_is_min (ar->min))
1054 vr0->type = VR_RANGE;
1055 vr0->min = vrp_val_min (type);
1056 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
1058 if (!vrp_val_is_max (ar->max))
1060 vr1->type = VR_RANGE;
1061 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
1062 vr1->max = vrp_val_max (type);
1064 if (vr0->type == VR_UNDEFINED)
1066 *vr0 = *vr1;
1067 vr1->type = VR_UNDEFINED;
1070 return vr0->type != VR_UNDEFINED;
1073 /* Helper to extract a value-range *VR for a multiplicative operation
1074 *VR0 CODE *VR1. */
1076 static void
1077 extract_range_from_multiplicative_op_1 (value_range *vr,
1078 enum tree_code code,
1079 value_range *vr0, value_range *vr1)
1081 enum value_range_type rtype;
1082 wide_int val, min, max;
1083 tree type;
1085 /* Multiplications, divisions and shifts are a bit tricky to handle,
1086 depending on the mix of signs we have in the two ranges, we
1087 need to operate on different values to get the minimum and
1088 maximum values for the new range. One approach is to figure
1089 out all the variations of range combinations and do the
1090 operations.
1092 However, this involves several calls to compare_values and it
1093 is pretty convoluted. It's simpler to do the 4 operations
1094 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
1095 MAX1) and then figure the smallest and largest values to form
1096 the new range. */
1097 gcc_assert (code == MULT_EXPR
1098 || code == TRUNC_DIV_EXPR
1099 || code == FLOOR_DIV_EXPR
1100 || code == CEIL_DIV_EXPR
1101 || code == EXACT_DIV_EXPR
1102 || code == ROUND_DIV_EXPR
1103 || code == RSHIFT_EXPR
1104 || code == LSHIFT_EXPR);
1105 gcc_assert (vr0->type == VR_RANGE
1106 && vr0->type == vr1->type);
1108 rtype = vr0->type;
1109 type = TREE_TYPE (vr0->min);
1110 signop sgn = TYPE_SIGN (type);
1112 /* Compute the 4 cross operations and their minimum and maximum value. */
1113 if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val))
1115 set_value_range_to_varying (vr);
1116 return;
1118 min = max = val;
1120 if (vr1->max != vr1->min)
1122 if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val))
1124 set_value_range_to_varying (vr);
1125 return;
1127 if (wi::lt_p (val, min, sgn))
1128 min = val;
1129 else if (wi::gt_p (val, max, sgn))
1130 max = val;
1133 if (vr0->max != vr0->min)
1135 if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val))
1137 set_value_range_to_varying (vr);
1138 return;
1140 if (wi::lt_p (val, min, sgn))
1141 min = val;
1142 else if (wi::gt_p (val, max, sgn))
1143 max = val;
1146 if (vr0->min != vr0->max && vr1->min != vr1->max)
1148 if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val))
1150 set_value_range_to_varying (vr);
1151 return;
1153 if (wi::lt_p (val, min, sgn))
1154 min = val;
1155 else if (wi::gt_p (val, max, sgn))
1156 max = val;
1159 /* If the new range has its limits swapped around (MIN > MAX),
1160 then the operation caused one of them to wrap around, mark
1161 the new range VARYING. */
1162 if (wi::gt_p (min, max, sgn))
1164 set_value_range_to_varying (vr);
1165 return;
1168 /* We punt for [-INF, +INF].
1169 We learn nothing when we have INF on both sides.
1170 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1171 if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn))
1172 && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn)))
1174 set_value_range_to_varying (vr);
1175 return;
1178 set_value_range (vr, rtype,
1179 wide_int_to_tree (type, min),
1180 wide_int_to_tree (type, max), NULL);
1183 /* Extract range information from a binary operation CODE based on
1184 the ranges of each of its operands *VR0 and *VR1 with resulting
1185 type EXPR_TYPE. The resulting range is stored in *VR. */
1187 void
1188 extract_range_from_binary_expr_1 (value_range *vr,
1189 enum tree_code code, tree expr_type,
1190 value_range *vr0_, value_range *vr1_)
1192 value_range vr0 = *vr0_, vr1 = *vr1_;
1193 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1194 enum value_range_type type;
1195 tree min = NULL_TREE, max = NULL_TREE;
1196 int cmp;
1198 if (!INTEGRAL_TYPE_P (expr_type)
1199 && !POINTER_TYPE_P (expr_type))
1201 set_value_range_to_varying (vr);
1202 return;
1205 /* Not all binary expressions can be applied to ranges in a
1206 meaningful way. Handle only arithmetic operations. */
1207 if (code != PLUS_EXPR
1208 && code != MINUS_EXPR
1209 && code != POINTER_PLUS_EXPR
1210 && code != MULT_EXPR
1211 && code != TRUNC_DIV_EXPR
1212 && code != FLOOR_DIV_EXPR
1213 && code != CEIL_DIV_EXPR
1214 && code != EXACT_DIV_EXPR
1215 && code != ROUND_DIV_EXPR
1216 && code != TRUNC_MOD_EXPR
1217 && code != RSHIFT_EXPR
1218 && code != LSHIFT_EXPR
1219 && code != MIN_EXPR
1220 && code != MAX_EXPR
1221 && code != BIT_AND_EXPR
1222 && code != BIT_IOR_EXPR
1223 && code != BIT_XOR_EXPR)
1225 set_value_range_to_varying (vr);
1226 return;
1229 /* If both ranges are UNDEFINED, so is the result. */
1230 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1232 set_value_range_to_undefined (vr);
1233 return;
1235 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1236 code. At some point we may want to special-case operations that
1237 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1238 operand. */
1239 else if (vr0.type == VR_UNDEFINED)
1240 set_value_range_to_varying (&vr0);
1241 else if (vr1.type == VR_UNDEFINED)
1242 set_value_range_to_varying (&vr1);
1244 /* We get imprecise results from ranges_from_anti_range when
1245 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1246 range, but then we also need to hack up vrp_meet. It's just
1247 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1248 if (code == EXACT_DIV_EXPR
1249 && vr0.type == VR_ANTI_RANGE
1250 && vr0.min == vr0.max
1251 && integer_zerop (vr0.min))
1253 set_value_range_to_nonnull (vr, expr_type);
1254 return;
1257 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1258 and express ~[] op X as ([]' op X) U ([]'' op X). */
1259 if (vr0.type == VR_ANTI_RANGE
1260 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1262 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1263 if (vrtem1.type != VR_UNDEFINED)
1265 value_range vrres = VR_INITIALIZER;
1266 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1267 &vrtem1, vr1_);
1268 vrp_meet (vr, &vrres);
1270 return;
1272 /* Likewise for X op ~[]. */
1273 if (vr1.type == VR_ANTI_RANGE
1274 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1276 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1277 if (vrtem1.type != VR_UNDEFINED)
1279 value_range vrres = VR_INITIALIZER;
1280 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1281 vr0_, &vrtem1);
1282 vrp_meet (vr, &vrres);
1284 return;
1287 /* The type of the resulting value range defaults to VR0.TYPE. */
1288 type = vr0.type;
1290 /* Refuse to operate on VARYING ranges, ranges of different kinds
1291 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1292 because we may be able to derive a useful range even if one of
1293 the operands is VR_VARYING or symbolic range. Similarly for
1294 divisions, MIN/MAX and PLUS/MINUS.
1296 TODO, we may be able to derive anti-ranges in some cases. */
1297 if (code != BIT_AND_EXPR
1298 && code != BIT_IOR_EXPR
1299 && code != TRUNC_DIV_EXPR
1300 && code != FLOOR_DIV_EXPR
1301 && code != CEIL_DIV_EXPR
1302 && code != EXACT_DIV_EXPR
1303 && code != ROUND_DIV_EXPR
1304 && code != TRUNC_MOD_EXPR
1305 && code != MIN_EXPR
1306 && code != MAX_EXPR
1307 && code != PLUS_EXPR
1308 && code != MINUS_EXPR
1309 && code != RSHIFT_EXPR
1310 && (vr0.type == VR_VARYING
1311 || vr1.type == VR_VARYING
1312 || vr0.type != vr1.type
1313 || symbolic_range_p (&vr0)
1314 || symbolic_range_p (&vr1)))
1316 set_value_range_to_varying (vr);
1317 return;
1320 /* Now evaluate the expression to determine the new range. */
1321 if (POINTER_TYPE_P (expr_type))
1323 if (code == MIN_EXPR || code == MAX_EXPR)
1325 /* For MIN/MAX expressions with pointers, we only care about
1326 nullness, if both are non null, then the result is nonnull.
1327 If both are null, then the result is null. Otherwise they
1328 are varying. */
1329 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1330 set_value_range_to_nonnull (vr, expr_type);
1331 else if (range_is_null (&vr0) && range_is_null (&vr1))
1332 set_value_range_to_null (vr, expr_type);
1333 else
1334 set_value_range_to_varying (vr);
1336 else if (code == POINTER_PLUS_EXPR)
1338 /* For pointer types, we are really only interested in asserting
1339 whether the expression evaluates to non-NULL. */
1340 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
1341 set_value_range_to_nonnull (vr, expr_type);
1342 else if (range_is_null (&vr0) && range_is_null (&vr1))
1343 set_value_range_to_null (vr, expr_type);
1344 else
1345 set_value_range_to_varying (vr);
1347 else if (code == BIT_AND_EXPR)
1349 /* For pointer types, we are really only interested in asserting
1350 whether the expression evaluates to non-NULL. */
1351 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1352 set_value_range_to_nonnull (vr, expr_type);
1353 else if (range_is_null (&vr0) || range_is_null (&vr1))
1354 set_value_range_to_null (vr, expr_type);
1355 else
1356 set_value_range_to_varying (vr);
1358 else
1359 set_value_range_to_varying (vr);
1361 return;
1364 /* For integer ranges, apply the operation to each end of the
1365 range and see what we end up with. */
1366 if (code == PLUS_EXPR || code == MINUS_EXPR)
1368 const bool minus_p = (code == MINUS_EXPR);
1369 tree min_op0 = vr0.min;
1370 tree min_op1 = minus_p ? vr1.max : vr1.min;
1371 tree max_op0 = vr0.max;
1372 tree max_op1 = minus_p ? vr1.min : vr1.max;
1373 tree sym_min_op0 = NULL_TREE;
1374 tree sym_min_op1 = NULL_TREE;
1375 tree sym_max_op0 = NULL_TREE;
1376 tree sym_max_op1 = NULL_TREE;
1377 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1379 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1380 single-symbolic ranges, try to compute the precise resulting range,
1381 but only if we know that this resulting range will also be constant
1382 or single-symbolic. */
1383 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1384 && (TREE_CODE (min_op0) == INTEGER_CST
1385 || (sym_min_op0
1386 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1387 && (TREE_CODE (min_op1) == INTEGER_CST
1388 || (sym_min_op1
1389 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1390 && (!(sym_min_op0 && sym_min_op1)
1391 || (sym_min_op0 == sym_min_op1
1392 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1393 && (TREE_CODE (max_op0) == INTEGER_CST
1394 || (sym_max_op0
1395 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1396 && (TREE_CODE (max_op1) == INTEGER_CST
1397 || (sym_max_op1
1398 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1399 && (!(sym_max_op0 && sym_max_op1)
1400 || (sym_max_op0 == sym_max_op1
1401 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1403 const signop sgn = TYPE_SIGN (expr_type);
1404 const unsigned int prec = TYPE_PRECISION (expr_type);
1405 wide_int type_min, type_max, wmin, wmax;
1406 int min_ovf = 0;
1407 int max_ovf = 0;
1409 /* Get the lower and upper bounds of the type. */
1410 if (TYPE_OVERFLOW_WRAPS (expr_type))
1412 type_min = wi::min_value (prec, sgn);
1413 type_max = wi::max_value (prec, sgn);
1415 else
1417 type_min = wi::to_wide (vrp_val_min (expr_type));
1418 type_max = wi::to_wide (vrp_val_max (expr_type));
1421 /* Combine the lower bounds, if any. */
1422 if (min_op0 && min_op1)
1424 if (minus_p)
1426 wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
1428 /* Check for overflow. */
1429 if (wi::cmp (0, wi::to_wide (min_op1), sgn)
1430 != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1431 min_ovf = wi::cmp (wi::to_wide (min_op0),
1432 wi::to_wide (min_op1), sgn);
1434 else
1436 wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
1438 /* Check for overflow. */
1439 if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
1440 != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1441 min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
1444 else if (min_op0)
1445 wmin = wi::to_wide (min_op0);
1446 else if (min_op1)
1448 if (minus_p)
1450 wmin = -wi::to_wide (min_op1);
1452 /* Check for overflow. */
1453 if (sgn == SIGNED
1454 && wi::neg_p (wi::to_wide (min_op1))
1455 && wi::neg_p (wmin))
1456 min_ovf = 1;
1457 else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
1458 min_ovf = -1;
1460 else
1461 wmin = wi::to_wide (min_op1);
1463 else
1464 wmin = wi::shwi (0, prec);
1466 /* Combine the upper bounds, if any. */
1467 if (max_op0 && max_op1)
1469 if (minus_p)
1471 wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
1473 /* Check for overflow. */
1474 if (wi::cmp (0, wi::to_wide (max_op1), sgn)
1475 != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1476 max_ovf = wi::cmp (wi::to_wide (max_op0),
1477 wi::to_wide (max_op1), sgn);
1479 else
1481 wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
1483 if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
1484 != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1485 max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
1488 else if (max_op0)
1489 wmax = wi::to_wide (max_op0);
1490 else if (max_op1)
1492 if (minus_p)
1494 wmax = -wi::to_wide (max_op1);
1496 /* Check for overflow. */
1497 if (sgn == SIGNED
1498 && wi::neg_p (wi::to_wide (max_op1))
1499 && wi::neg_p (wmax))
1500 max_ovf = 1;
1501 else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
1502 max_ovf = -1;
1504 else
1505 wmax = wi::to_wide (max_op1);
1507 else
1508 wmax = wi::shwi (0, prec);
1510 /* Check for type overflow. */
1511 if (min_ovf == 0)
1513 if (wi::cmp (wmin, type_min, sgn) == -1)
1514 min_ovf = -1;
1515 else if (wi::cmp (wmin, type_max, sgn) == 1)
1516 min_ovf = 1;
1518 if (max_ovf == 0)
1520 if (wi::cmp (wmax, type_min, sgn) == -1)
1521 max_ovf = -1;
1522 else if (wi::cmp (wmax, type_max, sgn) == 1)
1523 max_ovf = 1;
1526 /* If we have overflow for the constant part and the resulting
1527 range will be symbolic, drop to VR_VARYING. */
1528 if ((min_ovf && sym_min_op0 != sym_min_op1)
1529 || (max_ovf && sym_max_op0 != sym_max_op1))
1531 set_value_range_to_varying (vr);
1532 return;
1535 if (TYPE_OVERFLOW_WRAPS (expr_type))
1537 /* If overflow wraps, truncate the values and adjust the
1538 range kind and bounds appropriately. */
1539 wide_int tmin = wide_int::from (wmin, prec, sgn);
1540 wide_int tmax = wide_int::from (wmax, prec, sgn);
1541 if (min_ovf == max_ovf)
1543 /* No overflow or both overflow or underflow. The
1544 range kind stays VR_RANGE. */
1545 min = wide_int_to_tree (expr_type, tmin);
1546 max = wide_int_to_tree (expr_type, tmax);
1548 else if ((min_ovf == -1 && max_ovf == 0)
1549 || (max_ovf == 1 && min_ovf == 0))
1551 /* Min underflow or max overflow. The range kind
1552 changes to VR_ANTI_RANGE. */
1553 bool covers = false;
1554 wide_int tem = tmin;
1555 type = VR_ANTI_RANGE;
1556 tmin = tmax + 1;
1557 if (wi::cmp (tmin, tmax, sgn) < 0)
1558 covers = true;
1559 tmax = tem - 1;
1560 if (wi::cmp (tmax, tem, sgn) > 0)
1561 covers = true;
1562 /* If the anti-range would cover nothing, drop to varying.
1563 Likewise if the anti-range bounds are outside of the
1564 types values. */
1565 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1567 set_value_range_to_varying (vr);
1568 return;
1570 min = wide_int_to_tree (expr_type, tmin);
1571 max = wide_int_to_tree (expr_type, tmax);
1573 else
1575 /* Other underflow and/or overflow, drop to VR_VARYING. */
1576 set_value_range_to_varying (vr);
1577 return;
1580 else
1582 /* If overflow does not wrap, saturate to the types min/max
1583 value. */
1584 if (min_ovf == -1)
1585 min = wide_int_to_tree (expr_type, type_min);
1586 else if (min_ovf == 1)
1587 min = wide_int_to_tree (expr_type, type_max);
1588 else
1589 min = wide_int_to_tree (expr_type, wmin);
1591 if (max_ovf == -1)
1592 max = wide_int_to_tree (expr_type, type_min);
1593 else if (max_ovf == 1)
1594 max = wide_int_to_tree (expr_type, type_max);
1595 else
1596 max = wide_int_to_tree (expr_type, wmax);
1599 /* If the result lower bound is constant, we're done;
1600 otherwise, build the symbolic lower bound. */
1601 if (sym_min_op0 == sym_min_op1)
1603 else if (sym_min_op0)
1604 min = build_symbolic_expr (expr_type, sym_min_op0,
1605 neg_min_op0, min);
1606 else if (sym_min_op1)
1608 /* We may not negate if that might introduce
1609 undefined overflow. */
1610 if (! minus_p
1611 || neg_min_op1
1612 || TYPE_OVERFLOW_WRAPS (expr_type))
1613 min = build_symbolic_expr (expr_type, sym_min_op1,
1614 neg_min_op1 ^ minus_p, min);
1615 else
1616 min = NULL_TREE;
1619 /* Likewise for the upper bound. */
1620 if (sym_max_op0 == sym_max_op1)
1622 else if (sym_max_op0)
1623 max = build_symbolic_expr (expr_type, sym_max_op0,
1624 neg_max_op0, max);
1625 else if (sym_max_op1)
1627 /* We may not negate if that might introduce
1628 undefined overflow. */
1629 if (! minus_p
1630 || neg_max_op1
1631 || TYPE_OVERFLOW_WRAPS (expr_type))
1632 max = build_symbolic_expr (expr_type, sym_max_op1,
1633 neg_max_op1 ^ minus_p, max);
1634 else
1635 max = NULL_TREE;
1638 else
1640 /* For other cases, for example if we have a PLUS_EXPR with two
1641 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1642 to compute a precise range for such a case.
1643 ??? General even mixed range kind operations can be expressed
1644 by for example transforming ~[3, 5] + [1, 2] to range-only
1645 operations and a union primitive:
1646 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1647 [-INF+1, 4] U [6, +INF(OVF)]
1648 though usually the union is not exactly representable with
1649 a single range or anti-range as the above is
1650 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1651 but one could use a scheme similar to equivalences for this. */
1652 set_value_range_to_varying (vr);
1653 return;
1656 else if (code == MIN_EXPR
1657 || code == MAX_EXPR)
1659 if (vr0.type == VR_RANGE
1660 && !symbolic_range_p (&vr0))
1662 type = VR_RANGE;
1663 if (vr1.type == VR_RANGE
1664 && !symbolic_range_p (&vr1))
1666 /* For operations that make the resulting range directly
1667 proportional to the original ranges, apply the operation to
1668 the same end of each range. */
1669 min = int_const_binop (code, vr0.min, vr1.min);
1670 max = int_const_binop (code, vr0.max, vr1.max);
1672 else if (code == MIN_EXPR)
1674 min = vrp_val_min (expr_type);
1675 max = vr0.max;
1677 else if (code == MAX_EXPR)
1679 min = vr0.min;
1680 max = vrp_val_max (expr_type);
1683 else if (vr1.type == VR_RANGE
1684 && !symbolic_range_p (&vr1))
1686 type = VR_RANGE;
1687 if (code == MIN_EXPR)
1689 min = vrp_val_min (expr_type);
1690 max = vr1.max;
1692 else if (code == MAX_EXPR)
1694 min = vr1.min;
1695 max = vrp_val_max (expr_type);
1698 else
1700 set_value_range_to_varying (vr);
1701 return;
1704 else if (code == MULT_EXPR)
1706 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
1707 drop to varying. This test requires 2*prec bits if both
1708 operands are signed and 2*prec + 2 bits if either is not. */
1710 signop sign = TYPE_SIGN (expr_type);
1711 unsigned int prec = TYPE_PRECISION (expr_type);
1713 if (!range_int_cst_p (&vr0)
1714 || !range_int_cst_p (&vr1))
1716 set_value_range_to_varying (vr);
1717 return;
1720 if (TYPE_OVERFLOW_WRAPS (expr_type))
1722 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
1723 typedef generic_wide_int
1724 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
1725 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
1726 vrp_int size = sizem1 + 1;
1728 /* Extend the values using the sign of the result to PREC2.
1729 From here on out, everthing is just signed math no matter
1730 what the input types were. */
1731 vrp_int min0 = vrp_int_cst (vr0.min);
1732 vrp_int max0 = vrp_int_cst (vr0.max);
1733 vrp_int min1 = vrp_int_cst (vr1.min);
1734 vrp_int max1 = vrp_int_cst (vr1.max);
1735 /* Canonicalize the intervals. */
1736 if (sign == UNSIGNED)
1738 if (wi::ltu_p (size, min0 + max0))
1740 min0 -= size;
1741 max0 -= size;
1744 if (wi::ltu_p (size, min1 + max1))
1746 min1 -= size;
1747 max1 -= size;
1751 vrp_int prod0 = min0 * min1;
1752 vrp_int prod1 = min0 * max1;
1753 vrp_int prod2 = max0 * min1;
1754 vrp_int prod3 = max0 * max1;
1756 /* Sort the 4 products so that min is in prod0 and max is in
1757 prod3. */
1758 /* min0min1 > max0max1 */
1759 if (prod0 > prod3)
1760 std::swap (prod0, prod3);
1762 /* min0max1 > max0min1 */
1763 if (prod1 > prod2)
1764 std::swap (prod1, prod2);
1766 if (prod0 > prod1)
1767 std::swap (prod0, prod1);
1769 if (prod2 > prod3)
1770 std::swap (prod2, prod3);
1772 /* diff = max - min. */
1773 prod2 = prod3 - prod0;
1774 if (wi::geu_p (prod2, sizem1))
1776 /* the range covers all values. */
1777 set_value_range_to_varying (vr);
1778 return;
1781 /* The following should handle the wrapping and selecting
1782 VR_ANTI_RANGE for us. */
1783 min = wide_int_to_tree (expr_type, prod0);
1784 max = wide_int_to_tree (expr_type, prod3);
1785 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
1786 return;
1789 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
1790 drop to VR_VARYING. It would take more effort to compute a
1791 precise range for such a case. For example, if we have
1792 op0 == 65536 and op1 == 65536 with their ranges both being
1793 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
1794 we cannot claim that the product is in ~[0,0]. Note that we
1795 are guaranteed to have vr0.type == vr1.type at this
1796 point. */
1797 if (vr0.type == VR_ANTI_RANGE
1798 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
1800 set_value_range_to_varying (vr);
1801 return;
1804 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1805 return;
1807 else if (code == RSHIFT_EXPR
1808 || code == LSHIFT_EXPR)
1810 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
1811 then drop to VR_VARYING. Outside of this range we get undefined
1812 behavior from the shift operation. We cannot even trust
1813 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
1814 shifts, and the operation at the tree level may be widened. */
1815 if (range_int_cst_p (&vr1)
1816 && compare_tree_int (vr1.min, 0) >= 0
1817 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
1819 if (code == RSHIFT_EXPR)
1821 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1822 useful ranges just from the shift count. E.g.
1823 x >> 63 for signed 64-bit x is always [-1, 0]. */
1824 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1826 vr0.type = type = VR_RANGE;
1827 vr0.min = vrp_val_min (expr_type);
1828 vr0.max = vrp_val_max (expr_type);
1830 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1831 return;
1833 /* We can map lshifts by constants to MULT_EXPR handling. */
1834 else if (code == LSHIFT_EXPR
1835 && range_int_cst_singleton_p (&vr1))
1837 bool saved_flag_wrapv;
1838 value_range vr1p = VR_INITIALIZER;
1839 vr1p.type = VR_RANGE;
1840 vr1p.min = (wide_int_to_tree
1841 (expr_type,
1842 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
1843 TYPE_PRECISION (expr_type))));
1844 vr1p.max = vr1p.min;
1845 /* We have to use a wrapping multiply though as signed overflow
1846 on lshifts is implementation defined in C89. */
1847 saved_flag_wrapv = flag_wrapv;
1848 flag_wrapv = 1;
1849 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
1850 &vr0, &vr1p);
1851 flag_wrapv = saved_flag_wrapv;
1852 return;
1854 else if (code == LSHIFT_EXPR
1855 && range_int_cst_p (&vr0))
1857 int prec = TYPE_PRECISION (expr_type);
1858 int overflow_pos = prec;
1859 int bound_shift;
1860 wide_int low_bound, high_bound;
1861 bool uns = TYPE_UNSIGNED (expr_type);
1862 bool in_bounds = false;
1864 if (!uns)
1865 overflow_pos -= 1;
1867 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
1868 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1869 overflow. However, for that to happen, vr1.max needs to be
1870 zero, which means vr1 is a singleton range of zero, which
1871 means it should be handled by the previous LSHIFT_EXPR
1872 if-clause. */
1873 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
1874 wide_int complement = ~(bound - 1);
1876 if (uns)
1878 low_bound = bound;
1879 high_bound = complement;
1880 if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
1882 /* [5, 6] << [1, 2] == [10, 24]. */
1883 /* We're shifting out only zeroes, the value increases
1884 monotonically. */
1885 in_bounds = true;
1887 else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
1889 /* [0xffffff00, 0xffffffff] << [1, 2]
1890 == [0xfffffc00, 0xfffffffe]. */
1891 /* We're shifting out only ones, the value decreases
1892 monotonically. */
1893 in_bounds = true;
1896 else
1898 /* [-1, 1] << [1, 2] == [-4, 4]. */
1899 low_bound = complement;
1900 high_bound = bound;
1901 if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
1902 && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
1904 /* For non-negative numbers, we're shifting out only
1905 zeroes, the value increases monotonically.
1906 For negative numbers, we're shifting out only ones, the
1907 value decreases monotomically. */
1908 in_bounds = true;
1912 if (in_bounds)
1914 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1915 return;
1919 set_value_range_to_varying (vr);
1920 return;
1922 else if (code == TRUNC_DIV_EXPR
1923 || code == FLOOR_DIV_EXPR
1924 || code == CEIL_DIV_EXPR
1925 || code == EXACT_DIV_EXPR
1926 || code == ROUND_DIV_EXPR)
1928 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1930 /* For division, if op1 has VR_RANGE but op0 does not, something
1931 can be deduced just from that range. Say [min, max] / [4, max]
1932 gives [min / 4, max / 4] range. */
1933 if (vr1.type == VR_RANGE
1934 && !symbolic_range_p (&vr1)
1935 && range_includes_zero_p (vr1.min, vr1.max) == 0)
1937 vr0.type = type = VR_RANGE;
1938 vr0.min = vrp_val_min (expr_type);
1939 vr0.max = vrp_val_max (expr_type);
1941 else
1943 set_value_range_to_varying (vr);
1944 return;
1948 /* For divisions, if flag_non_call_exceptions is true, we must
1949 not eliminate a division by zero. */
1950 if (cfun->can_throw_non_call_exceptions
1951 && (vr1.type != VR_RANGE
1952 || range_includes_zero_p (vr1.min, vr1.max) != 0))
1954 set_value_range_to_varying (vr);
1955 return;
1958 /* For divisions, if op0 is VR_RANGE, we can deduce a range
1959 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
1960 include 0. */
1961 if (vr0.type == VR_RANGE
1962 && (vr1.type != VR_RANGE
1963 || range_includes_zero_p (vr1.min, vr1.max) != 0))
1965 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
1966 int cmp;
1968 min = NULL_TREE;
1969 max = NULL_TREE;
1970 if (TYPE_UNSIGNED (expr_type)
1971 || value_range_nonnegative_p (&vr1))
1973 /* For unsigned division or when divisor is known
1974 to be non-negative, the range has to cover
1975 all numbers from 0 to max for positive max
1976 and all numbers from min to 0 for negative min. */
1977 cmp = compare_values (vr0.max, zero);
1978 if (cmp == -1)
1980 /* When vr0.max < 0, vr1.min != 0 and value
1981 ranges for dividend and divisor are available. */
1982 if (vr1.type == VR_RANGE
1983 && !symbolic_range_p (&vr0)
1984 && !symbolic_range_p (&vr1)
1985 && compare_values (vr1.min, zero) != 0)
1986 max = int_const_binop (code, vr0.max, vr1.min);
1987 else
1988 max = zero;
1990 else if (cmp == 0 || cmp == 1)
1991 max = vr0.max;
1992 else
1993 type = VR_VARYING;
1994 cmp = compare_values (vr0.min, zero);
1995 if (cmp == 1)
1997 /* For unsigned division when value ranges for dividend
1998 and divisor are available. */
1999 if (vr1.type == VR_RANGE
2000 && !symbolic_range_p (&vr0)
2001 && !symbolic_range_p (&vr1)
2002 && compare_values (vr1.max, zero) != 0)
2003 min = int_const_binop (code, vr0.min, vr1.max);
2004 else
2005 min = zero;
2007 else if (cmp == 0 || cmp == -1)
2008 min = vr0.min;
2009 else
2010 type = VR_VARYING;
2012 else
2014 /* Otherwise the range is -max .. max or min .. -min
2015 depending on which bound is bigger in absolute value,
2016 as the division can change the sign. */
2017 abs_extent_range (vr, vr0.min, vr0.max);
2018 return;
2020 if (type == VR_VARYING)
2022 set_value_range_to_varying (vr);
2023 return;
2026 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
2028 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2029 return;
2032 else if (code == TRUNC_MOD_EXPR)
2034 if (range_is_null (&vr1))
2036 set_value_range_to_undefined (vr);
2037 return;
2039 /* ABS (A % B) < ABS (B) and either
2040 0 <= A % B <= A or A <= A % B <= 0. */
2041 type = VR_RANGE;
2042 signop sgn = TYPE_SIGN (expr_type);
2043 unsigned int prec = TYPE_PRECISION (expr_type);
2044 wide_int wmin, wmax, tmp;
2045 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2047 wmax = wi::to_wide (vr1.max) - 1;
2048 if (sgn == SIGNED)
2050 tmp = -1 - wi::to_wide (vr1.min);
2051 wmax = wi::smax (wmax, tmp);
2054 else
2056 wmax = wi::max_value (prec, sgn);
2057 /* X % INT_MIN may be INT_MAX. */
2058 if (sgn == UNSIGNED)
2059 wmax = wmax - 1;
2062 if (sgn == UNSIGNED)
2063 wmin = wi::zero (prec);
2064 else
2066 wmin = -wmax;
2067 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
2069 tmp = wi::to_wide (vr0.min);
2070 if (wi::gts_p (tmp, 0))
2071 tmp = wi::zero (prec);
2072 wmin = wi::smax (wmin, tmp);
2076 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
2078 tmp = wi::to_wide (vr0.max);
2079 if (sgn == SIGNED && wi::neg_p (tmp))
2080 tmp = wi::zero (prec);
2081 wmax = wi::min (wmax, tmp, sgn);
2084 min = wide_int_to_tree (expr_type, wmin);
2085 max = wide_int_to_tree (expr_type, wmax);
2087 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2089 bool int_cst_range0, int_cst_range1;
2090 wide_int may_be_nonzero0, may_be_nonzero1;
2091 wide_int must_be_nonzero0, must_be_nonzero1;
2093 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
2094 &may_be_nonzero0,
2095 &must_be_nonzero0);
2096 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
2097 &may_be_nonzero1,
2098 &must_be_nonzero1);
2100 if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2102 value_range *vr0p = NULL, *vr1p = NULL;
2103 if (range_int_cst_singleton_p (&vr1))
2105 vr0p = &vr0;
2106 vr1p = &vr1;
2108 else if (range_int_cst_singleton_p (&vr0))
2110 vr0p = &vr1;
2111 vr1p = &vr0;
2113 /* For op & or | attempt to optimize:
2114 [x, y] op z into [x op z, y op z]
2115 if z is a constant which (for op | its bitwise not) has n
2116 consecutive least significant bits cleared followed by m 1
2117 consecutive bits set immediately above it and either
2118 m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2119 The least significant n bits of all the values in the range are
2120 cleared or set, the m bits above it are preserved and any bits
2121 above these are required to be the same for all values in the
2122 range. */
2123 if (vr0p && range_int_cst_p (vr0p))
2125 wide_int w = wi::to_wide (vr1p->min);
2126 int m = 0, n = 0;
2127 if (code == BIT_IOR_EXPR)
2128 w = ~w;
2129 if (wi::eq_p (w, 0))
2130 n = TYPE_PRECISION (expr_type);
2131 else
2133 n = wi::ctz (w);
2134 w = ~(w | wi::mask (n, false, w.get_precision ()));
2135 if (wi::eq_p (w, 0))
2136 m = TYPE_PRECISION (expr_type) - n;
2137 else
2138 m = wi::ctz (w) - n;
2140 wide_int mask = wi::mask (m + n, true, w.get_precision ());
2141 if ((mask & wi::to_wide (vr0p->min))
2142 == (mask & wi::to_wide (vr0p->max)))
2144 min = int_const_binop (code, vr0p->min, vr1p->min);
2145 max = int_const_binop (code, vr0p->max, vr1p->min);
2150 type = VR_RANGE;
2151 if (min && max)
2152 /* Optimized above already. */;
2153 else if (code == BIT_AND_EXPR)
2155 min = wide_int_to_tree (expr_type,
2156 must_be_nonzero0 & must_be_nonzero1);
2157 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
2158 /* If both input ranges contain only negative values we can
2159 truncate the result range maximum to the minimum of the
2160 input range maxima. */
2161 if (int_cst_range0 && int_cst_range1
2162 && tree_int_cst_sgn (vr0.max) < 0
2163 && tree_int_cst_sgn (vr1.max) < 0)
2165 wmax = wi::min (wmax, wi::to_wide (vr0.max),
2166 TYPE_SIGN (expr_type));
2167 wmax = wi::min (wmax, wi::to_wide (vr1.max),
2168 TYPE_SIGN (expr_type));
2170 /* If either input range contains only non-negative values
2171 we can truncate the result range maximum to the respective
2172 maximum of the input range. */
2173 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2174 wmax = wi::min (wmax, wi::to_wide (vr0.max),
2175 TYPE_SIGN (expr_type));
2176 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2177 wmax = wi::min (wmax, wi::to_wide (vr1.max),
2178 TYPE_SIGN (expr_type));
2179 max = wide_int_to_tree (expr_type, wmax);
2180 cmp = compare_values (min, max);
2181 /* PR68217: In case of signed & sign-bit-CST should
2182 result in [-INF, 0] instead of [-INF, INF]. */
2183 if (cmp == -2 || cmp == 1)
2185 wide_int sign_bit
2186 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
2187 TYPE_PRECISION (expr_type));
2188 if (!TYPE_UNSIGNED (expr_type)
2189 && ((int_cst_range0
2190 && value_range_constant_singleton (&vr0)
2191 && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
2192 || (int_cst_range1
2193 && value_range_constant_singleton (&vr1)
2194 && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
2196 min = TYPE_MIN_VALUE (expr_type);
2197 max = build_int_cst (expr_type, 0);
2201 else if (code == BIT_IOR_EXPR)
2203 max = wide_int_to_tree (expr_type,
2204 may_be_nonzero0 | may_be_nonzero1);
2205 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
2206 /* If the input ranges contain only positive values we can
2207 truncate the minimum of the result range to the maximum
2208 of the input range minima. */
2209 if (int_cst_range0 && int_cst_range1
2210 && tree_int_cst_sgn (vr0.min) >= 0
2211 && tree_int_cst_sgn (vr1.min) >= 0)
2213 wmin = wi::max (wmin, wi::to_wide (vr0.min),
2214 TYPE_SIGN (expr_type));
2215 wmin = wi::max (wmin, wi::to_wide (vr1.min),
2216 TYPE_SIGN (expr_type));
2218 /* If either input range contains only negative values
2219 we can truncate the minimum of the result range to the
2220 respective minimum range. */
2221 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2222 wmin = wi::max (wmin, wi::to_wide (vr0.min),
2223 TYPE_SIGN (expr_type));
2224 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2225 wmin = wi::max (wmin, wi::to_wide (vr1.min),
2226 TYPE_SIGN (expr_type));
2227 min = wide_int_to_tree (expr_type, wmin);
2229 else if (code == BIT_XOR_EXPR)
2231 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
2232 | ~(may_be_nonzero0 | may_be_nonzero1));
2233 wide_int result_one_bits
2234 = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1)
2235 | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0));
2236 max = wide_int_to_tree (expr_type, ~result_zero_bits);
2237 min = wide_int_to_tree (expr_type, result_one_bits);
2238 /* If the range has all positive or all negative values the
2239 result is better than VARYING. */
2240 if (tree_int_cst_sgn (min) < 0
2241 || tree_int_cst_sgn (max) >= 0)
2243 else
2244 max = min = NULL_TREE;
2247 else
2248 gcc_unreachable ();
2250 /* If either MIN or MAX overflowed, then set the resulting range to
2251 VARYING. */
2252 if (min == NULL_TREE
2253 || TREE_OVERFLOW_P (min)
2254 || max == NULL_TREE
2255 || TREE_OVERFLOW_P (max))
2257 set_value_range_to_varying (vr);
2258 return;
2261 /* We punt for [-INF, +INF].
2262 We learn nothing when we have INF on both sides.
2263 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2264 if (vrp_val_is_min (min) && vrp_val_is_max (max))
2266 set_value_range_to_varying (vr);
2267 return;
2270 cmp = compare_values (min, max);
2271 if (cmp == -2 || cmp == 1)
2273 /* If the new range has its limits swapped around (MIN > MAX),
2274 then the operation caused one of them to wrap around, mark
2275 the new range VARYING. */
2276 set_value_range_to_varying (vr);
2278 else
2279 set_value_range (vr, type, min, max, NULL);
2282 /* Extract range information from a unary operation CODE based on
2283 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2284 The resulting range is stored in *VR. */
2286 void
2287 extract_range_from_unary_expr (value_range *vr,
2288 enum tree_code code, tree type,
2289 value_range *vr0_, tree op0_type)
2291 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2293 /* VRP only operates on integral and pointer types. */
2294 if (!(INTEGRAL_TYPE_P (op0_type)
2295 || POINTER_TYPE_P (op0_type))
2296 || !(INTEGRAL_TYPE_P (type)
2297 || POINTER_TYPE_P (type)))
2299 set_value_range_to_varying (vr);
2300 return;
2303 /* If VR0 is UNDEFINED, so is the result. */
2304 if (vr0.type == VR_UNDEFINED)
2306 set_value_range_to_undefined (vr);
2307 return;
2310 /* Handle operations that we express in terms of others. */
2311 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
2313 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
2314 copy_value_range (vr, &vr0);
2315 return;
2317 else if (code == NEGATE_EXPR)
2319 /* -X is simply 0 - X, so re-use existing code that also handles
2320 anti-ranges fine. */
2321 value_range zero = VR_INITIALIZER;
2322 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
2323 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
2324 return;
2326 else if (code == BIT_NOT_EXPR)
2328 /* ~X is simply -1 - X, so re-use existing code that also handles
2329 anti-ranges fine. */
2330 value_range minusone = VR_INITIALIZER;
2331 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
2332 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
2333 type, &minusone, &vr0);
2334 return;
2337 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2338 and express op ~[] as (op []') U (op []''). */
2339 if (vr0.type == VR_ANTI_RANGE
2340 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2342 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2343 if (vrtem1.type != VR_UNDEFINED)
2345 value_range vrres = VR_INITIALIZER;
2346 extract_range_from_unary_expr (&vrres, code, type,
2347 &vrtem1, op0_type);
2348 vrp_meet (vr, &vrres);
2350 return;
2353 if (CONVERT_EXPR_CODE_P (code))
2355 tree inner_type = op0_type;
2356 tree outer_type = type;
2358 /* If the expression evaluates to a pointer, we are only interested in
2359 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2360 if (POINTER_TYPE_P (type))
2362 if (range_is_nonnull (&vr0))
2363 set_value_range_to_nonnull (vr, type);
2364 else if (range_is_null (&vr0))
2365 set_value_range_to_null (vr, type);
2366 else
2367 set_value_range_to_varying (vr);
2368 return;
2371 /* If VR0 is varying and we increase the type precision, assume
2372 a full range for the following transformation. */
2373 if (vr0.type == VR_VARYING
2374 && INTEGRAL_TYPE_P (inner_type)
2375 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2377 vr0.type = VR_RANGE;
2378 vr0.min = TYPE_MIN_VALUE (inner_type);
2379 vr0.max = TYPE_MAX_VALUE (inner_type);
2382 /* If VR0 is a constant range or anti-range and the conversion is
2383 not truncating we can convert the min and max values and
2384 canonicalize the resulting range. Otherwise we can do the
2385 conversion if the size of the range is less than what the
2386 precision of the target type can represent and the range is
2387 not an anti-range. */
2388 if ((vr0.type == VR_RANGE
2389 || vr0.type == VR_ANTI_RANGE)
2390 && TREE_CODE (vr0.min) == INTEGER_CST
2391 && TREE_CODE (vr0.max) == INTEGER_CST
2392 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2393 || (vr0.type == VR_RANGE
2394 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2395 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2396 size_int (TYPE_PRECISION (outer_type)))))))
2398 tree new_min, new_max;
2399 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
2400 0, false);
2401 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
2402 0, false);
2403 set_and_canonicalize_value_range (vr, vr0.type,
2404 new_min, new_max, NULL);
2405 return;
2408 set_value_range_to_varying (vr);
2409 return;
2411 else if (code == ABS_EXPR)
2413 tree min, max;
2414 int cmp;
2416 /* Pass through vr0 in the easy cases. */
2417 if (TYPE_UNSIGNED (type)
2418 || value_range_nonnegative_p (&vr0))
2420 copy_value_range (vr, &vr0);
2421 return;
2424 /* For the remaining varying or symbolic ranges we can't do anything
2425 useful. */
2426 if (vr0.type == VR_VARYING
2427 || symbolic_range_p (&vr0))
2429 set_value_range_to_varying (vr);
2430 return;
2433 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2434 useful range. */
2435 if (!TYPE_OVERFLOW_UNDEFINED (type)
2436 && ((vr0.type == VR_RANGE
2437 && vrp_val_is_min (vr0.min))
2438 || (vr0.type == VR_ANTI_RANGE
2439 && !vrp_val_is_min (vr0.min))))
2441 set_value_range_to_varying (vr);
2442 return;
2445 /* ABS_EXPR may flip the range around, if the original range
2446 included negative values. */
2447 if (!vrp_val_is_min (vr0.min))
2448 min = fold_unary_to_constant (code, type, vr0.min);
2449 else
2450 min = TYPE_MAX_VALUE (type);
2452 if (!vrp_val_is_min (vr0.max))
2453 max = fold_unary_to_constant (code, type, vr0.max);
2454 else
2455 max = TYPE_MAX_VALUE (type);
2457 cmp = compare_values (min, max);
2459 /* If a VR_ANTI_RANGEs contains zero, then we have
2460 ~[-INF, min(MIN, MAX)]. */
2461 if (vr0.type == VR_ANTI_RANGE)
2463 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2465 /* Take the lower of the two values. */
2466 if (cmp != 1)
2467 max = min;
2469 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2470 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2471 flag_wrapv is set and the original anti-range doesn't include
2472 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2473 if (TYPE_OVERFLOW_WRAPS (type))
2475 tree type_min_value = TYPE_MIN_VALUE (type);
2477 min = (vr0.min != type_min_value
2478 ? int_const_binop (PLUS_EXPR, type_min_value,
2479 build_int_cst (TREE_TYPE (type_min_value), 1))
2480 : type_min_value);
2482 else
2483 min = TYPE_MIN_VALUE (type);
2485 else
2487 /* All else has failed, so create the range [0, INF], even for
2488 flag_wrapv since TYPE_MIN_VALUE is in the original
2489 anti-range. */
2490 vr0.type = VR_RANGE;
2491 min = build_int_cst (type, 0);
2492 max = TYPE_MAX_VALUE (type);
2496 /* If the range contains zero then we know that the minimum value in the
2497 range will be zero. */
2498 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2500 if (cmp == 1)
2501 max = min;
2502 min = build_int_cst (type, 0);
2504 else
2506 /* If the range was reversed, swap MIN and MAX. */
2507 if (cmp == 1)
2508 std::swap (min, max);
2511 cmp = compare_values (min, max);
2512 if (cmp == -2 || cmp == 1)
2514 /* If the new range has its limits swapped around (MIN > MAX),
2515 then the operation caused one of them to wrap around, mark
2516 the new range VARYING. */
2517 set_value_range_to_varying (vr);
2519 else
2520 set_value_range (vr, vr0.type, min, max, NULL);
2521 return;
2524 /* For unhandled operations fall back to varying. */
2525 set_value_range_to_varying (vr);
2526 return;
2529 /* Debugging dumps. */
2531 void dump_value_range (FILE *, const value_range *);
2532 void debug_value_range (value_range *);
2533 void dump_all_value_ranges (FILE *);
2534 void dump_vr_equiv (FILE *, bitmap);
2535 void debug_vr_equiv (bitmap);
2538 /* Dump value range VR to FILE. */
2540 void
2541 dump_value_range (FILE *file, const value_range *vr)
2543 if (vr == NULL)
2544 fprintf (file, "[]");
2545 else if (vr->type == VR_UNDEFINED)
2546 fprintf (file, "UNDEFINED");
2547 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
2549 tree type = TREE_TYPE (vr->min);
2551 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
2553 if (INTEGRAL_TYPE_P (type)
2554 && !TYPE_UNSIGNED (type)
2555 && vrp_val_is_min (vr->min))
2556 fprintf (file, "-INF");
2557 else
2558 print_generic_expr (file, vr->min);
2560 fprintf (file, ", ");
2562 if (INTEGRAL_TYPE_P (type)
2563 && vrp_val_is_max (vr->max))
2564 fprintf (file, "+INF");
2565 else
2566 print_generic_expr (file, vr->max);
2568 fprintf (file, "]");
2570 if (vr->equiv)
2572 bitmap_iterator bi;
2573 unsigned i, c = 0;
2575 fprintf (file, " EQUIVALENCES: { ");
2577 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
2579 print_generic_expr (file, ssa_name (i));
2580 fprintf (file, " ");
2581 c++;
2584 fprintf (file, "} (%u elements)", c);
2587 else if (vr->type == VR_VARYING)
2588 fprintf (file, "VARYING");
2589 else
2590 fprintf (file, "INVALID RANGE");
2594 /* Dump value range VR to stderr. */
2596 DEBUG_FUNCTION void
2597 debug_value_range (value_range *vr)
2599 dump_value_range (stderr, vr);
2600 fprintf (stderr, "\n");
2604 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2605 create a new SSA name N and return the assertion assignment
2606 'N = ASSERT_EXPR <V, V OP W>'. */
2608 static gimple *
2609 build_assert_expr_for (tree cond, tree v)
2611 tree a;
2612 gassign *assertion;
2614 gcc_assert (TREE_CODE (v) == SSA_NAME
2615 && COMPARISON_CLASS_P (cond));
2617 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2618 assertion = gimple_build_assign (NULL_TREE, a);
2620 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2621 operand of the ASSERT_EXPR. Create it so the new name and the old one
2622 are registered in the replacement table so that we can fix the SSA web
2623 after adding all the ASSERT_EXPRs. */
2624 tree new_def = create_new_def_for (v, assertion, NULL);
2625 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2626 given we have to be able to fully propagate those out to re-create
2627 valid SSA when removing the asserts. */
2628 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2629 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2631 return assertion;
2635 /* Return false if EXPR is a predicate expression involving floating
2636 point values. */
2638 static inline bool
2639 fp_predicate (gimple *stmt)
2641 GIMPLE_CHECK (stmt, GIMPLE_COND);
2643 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2646 /* If the range of values taken by OP can be inferred after STMT executes,
2647 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2648 describes the inferred range. Return true if a range could be
2649 inferred. */
2651 bool
2652 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2654 *val_p = NULL_TREE;
2655 *comp_code_p = ERROR_MARK;
2657 /* Do not attempt to infer anything in names that flow through
2658 abnormal edges. */
2659 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2660 return false;
2662 /* If STMT is the last statement of a basic block with no normal
2663 successors, there is no point inferring anything about any of its
2664 operands. We would not be able to find a proper insertion point
2665 for the assertion, anyway. */
2666 if (stmt_ends_bb_p (stmt))
2668 edge_iterator ei;
2669 edge e;
2671 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2672 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2673 break;
2674 if (e == NULL)
2675 return false;
2678 if (infer_nonnull_range (stmt, op))
2680 *val_p = build_int_cst (TREE_TYPE (op), 0);
2681 *comp_code_p = NE_EXPR;
2682 return true;
2685 return false;
2689 void dump_asserts_for (FILE *, tree);
2690 void debug_asserts_for (tree);
2691 void dump_all_asserts (FILE *);
2692 void debug_all_asserts (void);
2694 /* Dump all the registered assertions for NAME to FILE. */
2696 void
2697 dump_asserts_for (FILE *file, tree name)
2699 assert_locus *loc;
2701 fprintf (file, "Assertions to be inserted for ");
2702 print_generic_expr (file, name);
2703 fprintf (file, "\n");
2705 loc = asserts_for[SSA_NAME_VERSION (name)];
2706 while (loc)
2708 fprintf (file, "\t");
2709 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2710 fprintf (file, "\n\tBB #%d", loc->bb->index);
2711 if (loc->e)
2713 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2714 loc->e->dest->index);
2715 dump_edge_info (file, loc->e, dump_flags, 0);
2717 fprintf (file, "\n\tPREDICATE: ");
2718 print_generic_expr (file, loc->expr);
2719 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2720 print_generic_expr (file, loc->val);
2721 fprintf (file, "\n\n");
2722 loc = loc->next;
2725 fprintf (file, "\n");
2729 /* Dump all the registered assertions for NAME to stderr. */
2731 DEBUG_FUNCTION void
2732 debug_asserts_for (tree name)
2734 dump_asserts_for (stderr, name);
2738 /* Dump all the registered assertions for all the names to FILE. */
2740 void
2741 dump_all_asserts (FILE *file)
2743 unsigned i;
2744 bitmap_iterator bi;
2746 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2747 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2748 dump_asserts_for (file, ssa_name (i));
2749 fprintf (file, "\n");
2753 /* Dump all the registered assertions for all the names to stderr. */
2755 DEBUG_FUNCTION void
2756 debug_all_asserts (void)
2758 dump_all_asserts (stderr);
2761 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2763 static void
2764 add_assert_info (vec<assert_info> &asserts,
2765 tree name, tree expr, enum tree_code comp_code, tree val)
2767 assert_info info;
2768 info.comp_code = comp_code;
2769 info.name = name;
2770 info.val = val;
2771 info.expr = expr;
2772 asserts.safe_push (info);
2775 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2776 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2777 E->DEST, then register this location as a possible insertion point
2778 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2780 BB, E and SI provide the exact insertion point for the new
2781 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2782 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2783 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2784 must not be NULL. */
2786 static void
2787 register_new_assert_for (tree name, tree expr,
2788 enum tree_code comp_code,
2789 tree val,
2790 basic_block bb,
2791 edge e,
2792 gimple_stmt_iterator si)
2794 assert_locus *n, *loc, *last_loc;
2795 basic_block dest_bb;
2797 gcc_checking_assert (bb == NULL || e == NULL);
2799 if (e == NULL)
2800 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2801 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2803 /* Never build an assert comparing against an integer constant with
2804 TREE_OVERFLOW set. This confuses our undefined overflow warning
2805 machinery. */
2806 if (TREE_OVERFLOW_P (val))
2807 val = drop_tree_overflow (val);
2809 /* The new assertion A will be inserted at BB or E. We need to
2810 determine if the new location is dominated by a previously
2811 registered location for A. If we are doing an edge insertion,
2812 assume that A will be inserted at E->DEST. Note that this is not
2813 necessarily true.
2815 If E is a critical edge, it will be split. But even if E is
2816 split, the new block will dominate the same set of blocks that
2817 E->DEST dominates.
2819 The reverse, however, is not true, blocks dominated by E->DEST
2820 will not be dominated by the new block created to split E. So,
2821 if the insertion location is on a critical edge, we will not use
2822 the new location to move another assertion previously registered
2823 at a block dominated by E->DEST. */
2824 dest_bb = (bb) ? bb : e->dest;
2826 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2827 VAL at a block dominating DEST_BB, then we don't need to insert a new
2828 one. Similarly, if the same assertion already exists at a block
2829 dominated by DEST_BB and the new location is not on a critical
2830 edge, then update the existing location for the assertion (i.e.,
2831 move the assertion up in the dominance tree).
2833 Note, this is implemented as a simple linked list because there
2834 should not be more than a handful of assertions registered per
2835 name. If this becomes a performance problem, a table hashed by
2836 COMP_CODE and VAL could be implemented. */
2837 loc = asserts_for[SSA_NAME_VERSION (name)];
2838 last_loc = loc;
2839 while (loc)
2841 if (loc->comp_code == comp_code
2842 && (loc->val == val
2843 || operand_equal_p (loc->val, val, 0))
2844 && (loc->expr == expr
2845 || operand_equal_p (loc->expr, expr, 0)))
2847 /* If E is not a critical edge and DEST_BB
2848 dominates the existing location for the assertion, move
2849 the assertion up in the dominance tree by updating its
2850 location information. */
2851 if ((e == NULL || !EDGE_CRITICAL_P (e))
2852 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2854 loc->bb = dest_bb;
2855 loc->e = e;
2856 loc->si = si;
2857 return;
2861 /* Update the last node of the list and move to the next one. */
2862 last_loc = loc;
2863 loc = loc->next;
2866 /* If we didn't find an assertion already registered for
2867 NAME COMP_CODE VAL, add a new one at the end of the list of
2868 assertions associated with NAME. */
2869 n = XNEW (struct assert_locus);
2870 n->bb = dest_bb;
2871 n->e = e;
2872 n->si = si;
2873 n->comp_code = comp_code;
2874 n->val = val;
2875 n->expr = expr;
2876 n->next = NULL;
2878 if (last_loc)
2879 last_loc->next = n;
2880 else
2881 asserts_for[SSA_NAME_VERSION (name)] = n;
2883 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2886 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2887 Extract a suitable test code and value and store them into *CODE_P and
2888 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2890 If no extraction was possible, return FALSE, otherwise return TRUE.
2892 If INVERT is true, then we invert the result stored into *CODE_P. */
2894 static bool
2895 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2896 tree cond_op0, tree cond_op1,
2897 bool invert, enum tree_code *code_p,
2898 tree *val_p)
2900 enum tree_code comp_code;
2901 tree val;
2903 /* Otherwise, we have a comparison of the form NAME COMP VAL
2904 or VAL COMP NAME. */
2905 if (name == cond_op1)
2907 /* If the predicate is of the form VAL COMP NAME, flip
2908 COMP around because we need to register NAME as the
2909 first operand in the predicate. */
2910 comp_code = swap_tree_comparison (cond_code);
2911 val = cond_op0;
2913 else if (name == cond_op0)
2915 /* The comparison is of the form NAME COMP VAL, so the
2916 comparison code remains unchanged. */
2917 comp_code = cond_code;
2918 val = cond_op1;
2920 else
2921 gcc_unreachable ();
2923 /* Invert the comparison code as necessary. */
2924 if (invert)
2925 comp_code = invert_tree_comparison (comp_code, 0);
2927 /* VRP only handles integral and pointer types. */
2928 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2929 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2930 return false;
2932 /* Do not register always-false predicates.
2933 FIXME: this works around a limitation in fold() when dealing with
2934 enumerations. Given 'enum { N1, N2 } x;', fold will not
2935 fold 'if (x > N2)' to 'if (0)'. */
2936 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2937 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2939 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2940 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2942 if (comp_code == GT_EXPR
2943 && (!max
2944 || compare_values (val, max) == 0))
2945 return false;
2947 if (comp_code == LT_EXPR
2948 && (!min
2949 || compare_values (val, min) == 0))
2950 return false;
2952 *code_p = comp_code;
2953 *val_p = val;
2954 return true;
2957 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2958 (otherwise return VAL). VAL and MASK must be zero-extended for
2959 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2960 (to transform signed values into unsigned) and at the end xor
2961 SGNBIT back. */
2963 static wide_int
2964 masked_increment (const wide_int &val_in, const wide_int &mask,
2965 const wide_int &sgnbit, unsigned int prec)
2967 wide_int bit = wi::one (prec), res;
2968 unsigned int i;
2970 wide_int val = val_in ^ sgnbit;
2971 for (i = 0; i < prec; i++, bit += bit)
2973 res = mask;
2974 if ((res & bit) == 0)
2975 continue;
2976 res = bit - 1;
2977 res = wi::bit_and_not (val + bit, res);
2978 res &= mask;
2979 if (wi::gtu_p (res, val))
2980 return res ^ sgnbit;
2982 return val ^ sgnbit;
2985 /* Helper for overflow_comparison_p
2987 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2988 OP1's defining statement to see if it ultimately has the form
2989 OP0 CODE (OP0 PLUS INTEGER_CST)
2991 If so, return TRUE indicating this is an overflow test and store into
2992 *NEW_CST an updated constant that can be used in a narrowed range test.
2994 REVERSED indicates if the comparison was originally:
2996 OP1 CODE' OP0.
2998 This affects how we build the updated constant. */
3000 static bool
3001 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
3002 bool follow_assert_exprs, bool reversed, tree *new_cst)
3004 /* See if this is a relational operation between two SSA_NAMES with
3005 unsigned, overflow wrapping values. If so, check it more deeply. */
3006 if ((code == LT_EXPR || code == LE_EXPR
3007 || code == GE_EXPR || code == GT_EXPR)
3008 && TREE_CODE (op0) == SSA_NAME
3009 && TREE_CODE (op1) == SSA_NAME
3010 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
3011 && TYPE_UNSIGNED (TREE_TYPE (op0))
3012 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
3014 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
3016 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
3017 if (follow_assert_exprs)
3019 while (gimple_assign_single_p (op1_def)
3020 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
3022 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
3023 if (TREE_CODE (op1) != SSA_NAME)
3024 break;
3025 op1_def = SSA_NAME_DEF_STMT (op1);
3029 /* Now look at the defining statement of OP1 to see if it adds
3030 or subtracts a nonzero constant from another operand. */
3031 if (op1_def
3032 && is_gimple_assign (op1_def)
3033 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
3034 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
3035 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
3037 tree target = gimple_assign_rhs1 (op1_def);
3039 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
3040 for one where TARGET appears on the RHS. */
3041 if (follow_assert_exprs)
3043 /* Now see if that "other operand" is op0, following the chain
3044 of ASSERT_EXPRs if necessary. */
3045 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
3046 while (op0 != target
3047 && gimple_assign_single_p (op0_def)
3048 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
3050 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
3051 if (TREE_CODE (op0) != SSA_NAME)
3052 break;
3053 op0_def = SSA_NAME_DEF_STMT (op0);
3057 /* If we did not find our target SSA_NAME, then this is not
3058 an overflow test. */
3059 if (op0 != target)
3060 return false;
3062 tree type = TREE_TYPE (op0);
3063 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
3064 tree inc = gimple_assign_rhs2 (op1_def);
3065 if (reversed)
3066 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
3067 else
3068 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
3069 return true;
3072 return false;
3075 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
3076 OP1's defining statement to see if it ultimately has the form
3077 OP0 CODE (OP0 PLUS INTEGER_CST)
3079 If so, return TRUE indicating this is an overflow test and store into
3080 *NEW_CST an updated constant that can be used in a narrowed range test.
3082 These statements are left as-is in the IL to facilitate discovery of
3083 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
3084 the alternate range representation is often useful within VRP. */
3086 bool
3087 overflow_comparison_p (tree_code code, tree name, tree val,
3088 bool use_equiv_p, tree *new_cst)
3090 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
3091 return true;
3092 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
3093 use_equiv_p, true, new_cst);
3097 /* Try to register an edge assertion for SSA name NAME on edge E for
3098 the condition COND contributing to the conditional jump pointed to by BSI.
3099 Invert the condition COND if INVERT is true. */
3101 static void
3102 register_edge_assert_for_2 (tree name, edge e,
3103 enum tree_code cond_code,
3104 tree cond_op0, tree cond_op1, bool invert,
3105 vec<assert_info> &asserts)
3107 tree val;
3108 enum tree_code comp_code;
3110 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3111 cond_op0,
3112 cond_op1,
3113 invert, &comp_code, &val))
3114 return;
3116 /* Queue the assert. */
3117 tree x;
3118 if (overflow_comparison_p (comp_code, name, val, false, &x))
3120 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
3121 ? GT_EXPR : LE_EXPR);
3122 add_assert_info (asserts, name, name, new_code, x);
3124 add_assert_info (asserts, name, name, comp_code, val);
3126 /* In the case of NAME <= CST and NAME being defined as
3127 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
3128 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
3129 This catches range and anti-range tests. */
3130 if ((comp_code == LE_EXPR
3131 || comp_code == GT_EXPR)
3132 && TREE_CODE (val) == INTEGER_CST
3133 && TYPE_UNSIGNED (TREE_TYPE (val)))
3135 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3136 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
3138 /* Extract CST2 from the (optional) addition. */
3139 if (is_gimple_assign (def_stmt)
3140 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
3142 name2 = gimple_assign_rhs1 (def_stmt);
3143 cst2 = gimple_assign_rhs2 (def_stmt);
3144 if (TREE_CODE (name2) == SSA_NAME
3145 && TREE_CODE (cst2) == INTEGER_CST)
3146 def_stmt = SSA_NAME_DEF_STMT (name2);
3149 /* Extract NAME2 from the (optional) sign-changing cast. */
3150 if (gimple_assign_cast_p (def_stmt))
3152 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
3153 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3154 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
3155 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
3156 name3 = gimple_assign_rhs1 (def_stmt);
3159 /* If name3 is used later, create an ASSERT_EXPR for it. */
3160 if (name3 != NULL_TREE
3161 && TREE_CODE (name3) == SSA_NAME
3162 && (cst2 == NULL_TREE
3163 || TREE_CODE (cst2) == INTEGER_CST)
3164 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
3166 tree tmp;
3168 /* Build an expression for the range test. */
3169 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
3170 if (cst2 != NULL_TREE)
3171 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3173 if (dump_file)
3175 fprintf (dump_file, "Adding assert for ");
3176 print_generic_expr (dump_file, name3);
3177 fprintf (dump_file, " from ");
3178 print_generic_expr (dump_file, tmp);
3179 fprintf (dump_file, "\n");
3182 add_assert_info (asserts, name3, tmp, comp_code, val);
3185 /* If name2 is used later, create an ASSERT_EXPR for it. */
3186 if (name2 != NULL_TREE
3187 && TREE_CODE (name2) == SSA_NAME
3188 && TREE_CODE (cst2) == INTEGER_CST
3189 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
3191 tree tmp;
3193 /* Build an expression for the range test. */
3194 tmp = name2;
3195 if (TREE_TYPE (name) != TREE_TYPE (name2))
3196 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
3197 if (cst2 != NULL_TREE)
3198 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3200 if (dump_file)
3202 fprintf (dump_file, "Adding assert for ");
3203 print_generic_expr (dump_file, name2);
3204 fprintf (dump_file, " from ");
3205 print_generic_expr (dump_file, tmp);
3206 fprintf (dump_file, "\n");
3209 add_assert_info (asserts, name2, tmp, comp_code, val);
3213 /* In the case of post-in/decrement tests like if (i++) ... and uses
3214 of the in/decremented value on the edge the extra name we want to
3215 assert for is not on the def chain of the name compared. Instead
3216 it is in the set of use stmts.
3217 Similar cases happen for conversions that were simplified through
3218 fold_{sign_changed,widened}_comparison. */
3219 if ((comp_code == NE_EXPR
3220 || comp_code == EQ_EXPR)
3221 && TREE_CODE (val) == INTEGER_CST)
3223 imm_use_iterator ui;
3224 gimple *use_stmt;
3225 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
3227 if (!is_gimple_assign (use_stmt))
3228 continue;
3230 /* Cut off to use-stmts that are dominating the predecessor. */
3231 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
3232 continue;
3234 tree name2 = gimple_assign_lhs (use_stmt);
3235 if (TREE_CODE (name2) != SSA_NAME)
3236 continue;
3238 enum tree_code code = gimple_assign_rhs_code (use_stmt);
3239 tree cst;
3240 if (code == PLUS_EXPR
3241 || code == MINUS_EXPR)
3243 cst = gimple_assign_rhs2 (use_stmt);
3244 if (TREE_CODE (cst) != INTEGER_CST)
3245 continue;
3246 cst = int_const_binop (code, val, cst);
3248 else if (CONVERT_EXPR_CODE_P (code))
3250 /* For truncating conversions we cannot record
3251 an inequality. */
3252 if (comp_code == NE_EXPR
3253 && (TYPE_PRECISION (TREE_TYPE (name2))
3254 < TYPE_PRECISION (TREE_TYPE (name))))
3255 continue;
3256 cst = fold_convert (TREE_TYPE (name2), val);
3258 else
3259 continue;
3261 if (TREE_OVERFLOW_P (cst))
3262 cst = drop_tree_overflow (cst);
3263 add_assert_info (asserts, name2, name2, comp_code, cst);
3267 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
3268 && TREE_CODE (val) == INTEGER_CST)
3270 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3271 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
3272 tree val2 = NULL_TREE;
3273 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
3274 wide_int mask = wi::zero (prec);
3275 unsigned int nprec = prec;
3276 enum tree_code rhs_code = ERROR_MARK;
3278 if (is_gimple_assign (def_stmt))
3279 rhs_code = gimple_assign_rhs_code (def_stmt);
3281 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
3282 assert that A != CST1 -+ CST2. */
3283 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3284 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
3286 tree op0 = gimple_assign_rhs1 (def_stmt);
3287 tree op1 = gimple_assign_rhs2 (def_stmt);
3288 if (TREE_CODE (op0) == SSA_NAME
3289 && TREE_CODE (op1) == INTEGER_CST)
3291 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
3292 ? MINUS_EXPR : PLUS_EXPR);
3293 op1 = int_const_binop (reverse_op, val, op1);
3294 if (TREE_OVERFLOW (op1))
3295 op1 = drop_tree_overflow (op1);
3296 add_assert_info (asserts, op0, op0, comp_code, op1);
3300 /* Add asserts for NAME cmp CST and NAME being defined
3301 as NAME = (int) NAME2. */
3302 if (!TYPE_UNSIGNED (TREE_TYPE (val))
3303 && (comp_code == LE_EXPR || comp_code == LT_EXPR
3304 || comp_code == GT_EXPR || comp_code == GE_EXPR)
3305 && gimple_assign_cast_p (def_stmt))
3307 name2 = gimple_assign_rhs1 (def_stmt);
3308 if (CONVERT_EXPR_CODE_P (rhs_code)
3309 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3310 && TYPE_UNSIGNED (TREE_TYPE (name2))
3311 && prec == TYPE_PRECISION (TREE_TYPE (name2))
3312 && (comp_code == LE_EXPR || comp_code == GT_EXPR
3313 || !tree_int_cst_equal (val,
3314 TYPE_MIN_VALUE (TREE_TYPE (val)))))
3316 tree tmp, cst;
3317 enum tree_code new_comp_code = comp_code;
3319 cst = fold_convert (TREE_TYPE (name2),
3320 TYPE_MIN_VALUE (TREE_TYPE (val)));
3321 /* Build an expression for the range test. */
3322 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
3323 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
3324 fold_convert (TREE_TYPE (name2), val));
3325 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3327 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
3328 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
3329 build_int_cst (TREE_TYPE (name2), 1));
3332 if (dump_file)
3334 fprintf (dump_file, "Adding assert for ");
3335 print_generic_expr (dump_file, name2);
3336 fprintf (dump_file, " from ");
3337 print_generic_expr (dump_file, tmp);
3338 fprintf (dump_file, "\n");
3341 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
3345 /* Add asserts for NAME cmp CST and NAME being defined as
3346 NAME = NAME2 >> CST2.
3348 Extract CST2 from the right shift. */
3349 if (rhs_code == RSHIFT_EXPR)
3351 name2 = gimple_assign_rhs1 (def_stmt);
3352 cst2 = gimple_assign_rhs2 (def_stmt);
3353 if (TREE_CODE (name2) == SSA_NAME
3354 && tree_fits_uhwi_p (cst2)
3355 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3356 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
3357 && type_has_mode_precision_p (TREE_TYPE (val)))
3359 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
3360 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
3363 if (val2 != NULL_TREE
3364 && TREE_CODE (val2) == INTEGER_CST
3365 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
3366 TREE_TYPE (val),
3367 val2, cst2), val))
3369 enum tree_code new_comp_code = comp_code;
3370 tree tmp, new_val;
3372 tmp = name2;
3373 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
3375 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
3377 tree type = build_nonstandard_integer_type (prec, 1);
3378 tmp = build1 (NOP_EXPR, type, name2);
3379 val2 = fold_convert (type, val2);
3381 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
3382 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3383 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
3385 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3387 wide_int minval
3388 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3389 new_val = val2;
3390 if (minval == wi::to_wide (new_val))
3391 new_val = NULL_TREE;
3393 else
3395 wide_int maxval
3396 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3397 mask |= wi::to_wide (val2);
3398 if (wi::eq_p (mask, maxval))
3399 new_val = NULL_TREE;
3400 else
3401 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3404 if (new_val)
3406 if (dump_file)
3408 fprintf (dump_file, "Adding assert for ");
3409 print_generic_expr (dump_file, name2);
3410 fprintf (dump_file, " from ");
3411 print_generic_expr (dump_file, tmp);
3412 fprintf (dump_file, "\n");
3415 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
3419 /* Add asserts for NAME cmp CST and NAME being defined as
3420 NAME = NAME2 & CST2.
3422 Extract CST2 from the and.
3424 Also handle
3425 NAME = (unsigned) NAME2;
3426 casts where NAME's type is unsigned and has smaller precision
3427 than NAME2's type as if it was NAME = NAME2 & MASK. */
3428 names[0] = NULL_TREE;
3429 names[1] = NULL_TREE;
3430 cst2 = NULL_TREE;
3431 if (rhs_code == BIT_AND_EXPR
3432 || (CONVERT_EXPR_CODE_P (rhs_code)
3433 && INTEGRAL_TYPE_P (TREE_TYPE (val))
3434 && TYPE_UNSIGNED (TREE_TYPE (val))
3435 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3436 > prec))
3438 name2 = gimple_assign_rhs1 (def_stmt);
3439 if (rhs_code == BIT_AND_EXPR)
3440 cst2 = gimple_assign_rhs2 (def_stmt);
3441 else
3443 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3444 nprec = TYPE_PRECISION (TREE_TYPE (name2));
3446 if (TREE_CODE (name2) == SSA_NAME
3447 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3448 && TREE_CODE (cst2) == INTEGER_CST
3449 && !integer_zerop (cst2)
3450 && (nprec > 1
3451 || TYPE_UNSIGNED (TREE_TYPE (val))))
3453 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3454 if (gimple_assign_cast_p (def_stmt2))
3456 names[1] = gimple_assign_rhs1 (def_stmt2);
3457 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3458 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3459 || (TYPE_PRECISION (TREE_TYPE (name2))
3460 != TYPE_PRECISION (TREE_TYPE (names[1]))))
3461 names[1] = NULL_TREE;
3463 names[0] = name2;
3466 if (names[0] || names[1])
3468 wide_int minv, maxv, valv, cst2v;
3469 wide_int tem, sgnbit;
3470 bool valid_p = false, valn, cst2n;
3471 enum tree_code ccode = comp_code;
3473 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3474 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3475 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3476 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3477 /* If CST2 doesn't have most significant bit set,
3478 but VAL is negative, we have comparison like
3479 if ((x & 0x123) > -4) (always true). Just give up. */
3480 if (!cst2n && valn)
3481 ccode = ERROR_MARK;
3482 if (cst2n)
3483 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3484 else
3485 sgnbit = wi::zero (nprec);
3486 minv = valv & cst2v;
3487 switch (ccode)
3489 case EQ_EXPR:
3490 /* Minimum unsigned value for equality is VAL & CST2
3491 (should be equal to VAL, otherwise we probably should
3492 have folded the comparison into false) and
3493 maximum unsigned value is VAL | ~CST2. */
3494 maxv = valv | ~cst2v;
3495 valid_p = true;
3496 break;
3498 case NE_EXPR:
3499 tem = valv | ~cst2v;
3500 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
3501 if (valv == 0)
3503 cst2n = false;
3504 sgnbit = wi::zero (nprec);
3505 goto gt_expr;
3507 /* If (VAL | ~CST2) is all ones, handle it as
3508 (X & CST2) < VAL. */
3509 if (tem == -1)
3511 cst2n = false;
3512 valn = false;
3513 sgnbit = wi::zero (nprec);
3514 goto lt_expr;
3516 if (!cst2n && wi::neg_p (cst2v))
3517 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3518 if (sgnbit != 0)
3520 if (valv == sgnbit)
3522 cst2n = true;
3523 valn = true;
3524 goto gt_expr;
3526 if (tem == wi::mask (nprec - 1, false, nprec))
3528 cst2n = true;
3529 goto lt_expr;
3531 if (!cst2n)
3532 sgnbit = wi::zero (nprec);
3534 break;
3536 case GE_EXPR:
3537 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3538 is VAL and maximum unsigned value is ~0. For signed
3539 comparison, if CST2 doesn't have most significant bit
3540 set, handle it similarly. If CST2 has MSB set,
3541 the minimum is the same, and maximum is ~0U/2. */
3542 if (minv != valv)
3544 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3545 VAL. */
3546 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3547 if (minv == valv)
3548 break;
3550 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3551 valid_p = true;
3552 break;
3554 case GT_EXPR:
3555 gt_expr:
3556 /* Find out smallest MINV where MINV > VAL
3557 && (MINV & CST2) == MINV, if any. If VAL is signed and
3558 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
3559 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3560 if (minv == valv)
3561 break;
3562 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3563 valid_p = true;
3564 break;
3566 case LE_EXPR:
3567 /* Minimum unsigned value for <= is 0 and maximum
3568 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3569 Otherwise, find smallest VAL2 where VAL2 > VAL
3570 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3571 as maximum.
3572 For signed comparison, if CST2 doesn't have most
3573 significant bit set, handle it similarly. If CST2 has
3574 MSB set, the maximum is the same and minimum is INT_MIN. */
3575 if (minv == valv)
3576 maxv = valv;
3577 else
3579 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3580 if (maxv == valv)
3581 break;
3582 maxv -= 1;
3584 maxv |= ~cst2v;
3585 minv = sgnbit;
3586 valid_p = true;
3587 break;
3589 case LT_EXPR:
3590 lt_expr:
3591 /* Minimum unsigned value for < is 0 and maximum
3592 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3593 Otherwise, find smallest VAL2 where VAL2 > VAL
3594 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3595 as maximum.
3596 For signed comparison, if CST2 doesn't have most
3597 significant bit set, handle it similarly. If CST2 has
3598 MSB set, the maximum is the same and minimum is INT_MIN. */
3599 if (minv == valv)
3601 if (valv == sgnbit)
3602 break;
3603 maxv = valv;
3605 else
3607 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3608 if (maxv == valv)
3609 break;
3611 maxv -= 1;
3612 maxv |= ~cst2v;
3613 minv = sgnbit;
3614 valid_p = true;
3615 break;
3617 default:
3618 break;
3620 if (valid_p
3621 && (maxv - minv) != -1)
3623 tree tmp, new_val, type;
3624 int i;
3626 for (i = 0; i < 2; i++)
3627 if (names[i])
3629 wide_int maxv2 = maxv;
3630 tmp = names[i];
3631 type = TREE_TYPE (names[i]);
3632 if (!TYPE_UNSIGNED (type))
3634 type = build_nonstandard_integer_type (nprec, 1);
3635 tmp = build1 (NOP_EXPR, type, names[i]);
3637 if (minv != 0)
3639 tmp = build2 (PLUS_EXPR, type, tmp,
3640 wide_int_to_tree (type, -minv));
3641 maxv2 = maxv - minv;
3643 new_val = wide_int_to_tree (type, maxv2);
3645 if (dump_file)
3647 fprintf (dump_file, "Adding assert for ");
3648 print_generic_expr (dump_file, names[i]);
3649 fprintf (dump_file, " from ");
3650 print_generic_expr (dump_file, tmp);
3651 fprintf (dump_file, "\n");
3654 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3661 /* OP is an operand of a truth value expression which is known to have
3662 a particular value. Register any asserts for OP and for any
3663 operands in OP's defining statement.
3665 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3666 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3668 static void
3669 register_edge_assert_for_1 (tree op, enum tree_code code,
3670 edge e, vec<assert_info> &asserts)
3672 gimple *op_def;
3673 tree val;
3674 enum tree_code rhs_code;
3676 /* We only care about SSA_NAMEs. */
3677 if (TREE_CODE (op) != SSA_NAME)
3678 return;
3680 /* We know that OP will have a zero or nonzero value. */
3681 val = build_int_cst (TREE_TYPE (op), 0);
3682 add_assert_info (asserts, op, op, code, val);
3684 /* Now look at how OP is set. If it's set from a comparison,
3685 a truth operation or some bit operations, then we may be able
3686 to register information about the operands of that assignment. */
3687 op_def = SSA_NAME_DEF_STMT (op);
3688 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3689 return;
3691 rhs_code = gimple_assign_rhs_code (op_def);
3693 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3695 bool invert = (code == EQ_EXPR ? true : false);
3696 tree op0 = gimple_assign_rhs1 (op_def);
3697 tree op1 = gimple_assign_rhs2 (op_def);
3699 if (TREE_CODE (op0) == SSA_NAME)
3700 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3701 if (TREE_CODE (op1) == SSA_NAME)
3702 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3704 else if ((code == NE_EXPR
3705 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3706 || (code == EQ_EXPR
3707 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3709 /* Recurse on each operand. */
3710 tree op0 = gimple_assign_rhs1 (op_def);
3711 tree op1 = gimple_assign_rhs2 (op_def);
3712 if (TREE_CODE (op0) == SSA_NAME
3713 && has_single_use (op0))
3714 register_edge_assert_for_1 (op0, code, e, asserts);
3715 if (TREE_CODE (op1) == SSA_NAME
3716 && has_single_use (op1))
3717 register_edge_assert_for_1 (op1, code, e, asserts);
3719 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3720 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3722 /* Recurse, flipping CODE. */
3723 code = invert_tree_comparison (code, false);
3724 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3726 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3728 /* Recurse through the copy. */
3729 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3731 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3733 /* Recurse through the type conversion, unless it is a narrowing
3734 conversion or conversion from non-integral type. */
3735 tree rhs = gimple_assign_rhs1 (op_def);
3736 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3737 && (TYPE_PRECISION (TREE_TYPE (rhs))
3738 <= TYPE_PRECISION (TREE_TYPE (op))))
3739 register_edge_assert_for_1 (rhs, code, e, asserts);
3743 /* Check if comparison
3744 NAME COND_OP INTEGER_CST
3745 has a form of
3746 (X & 11...100..0) COND_OP XX...X00...0
3747 Such comparison can yield assertions like
3748 X >= XX...X00...0
3749 X <= XX...X11...1
3750 in case of COND_OP being NE_EXPR or
3751 X < XX...X00...0
3752 X > XX...X11...1
3753 in case of EQ_EXPR. */
3755 static bool
3756 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3757 tree *new_name, tree *low, enum tree_code *low_code,
3758 tree *high, enum tree_code *high_code)
3760 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3762 if (!is_gimple_assign (def_stmt)
3763 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3764 return false;
3766 tree t = gimple_assign_rhs1 (def_stmt);
3767 tree maskt = gimple_assign_rhs2 (def_stmt);
3768 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3769 return false;
3771 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3772 wide_int inv_mask = ~mask;
3773 /* Assume VALT is INTEGER_CST. */
3774 wi::tree_to_wide_ref val = wi::to_wide (valt);
3776 if ((inv_mask & (inv_mask + 1)) != 0
3777 || (val & mask) != val)
3778 return false;
3780 bool is_range = cond_code == EQ_EXPR;
3782 tree type = TREE_TYPE (t);
3783 wide_int min = wi::min_value (type),
3784 max = wi::max_value (type);
3786 if (is_range)
3788 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3789 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3791 else
3793 /* We can still generate assertion if one of alternatives
3794 is known to always be false. */
3795 if (val == min)
3797 *low_code = (enum tree_code) 0;
3798 *high_code = GT_EXPR;
3800 else if ((val | inv_mask) == max)
3802 *low_code = LT_EXPR;
3803 *high_code = (enum tree_code) 0;
3805 else
3806 return false;
3809 *new_name = t;
3810 *low = wide_int_to_tree (type, val);
3811 *high = wide_int_to_tree (type, val | inv_mask);
3813 if (wi::neg_p (val, TYPE_SIGN (type)))
3814 std::swap (*low, *high);
3816 return true;
3819 /* Try to register an edge assertion for SSA name NAME on edge E for
3820 the condition COND contributing to the conditional jump pointed to by
3821 SI. */
3823 void
3824 register_edge_assert_for (tree name, edge e,
3825 enum tree_code cond_code, tree cond_op0,
3826 tree cond_op1, vec<assert_info> &asserts)
3828 tree val;
3829 enum tree_code comp_code;
3830 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3832 /* Do not attempt to infer anything in names that flow through
3833 abnormal edges. */
3834 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3835 return;
3837 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3838 cond_op0, cond_op1,
3839 is_else_edge,
3840 &comp_code, &val))
3841 return;
3843 /* Register ASSERT_EXPRs for name. */
3844 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3845 cond_op1, is_else_edge, asserts);
3848 /* If COND is effectively an equality test of an SSA_NAME against
3849 the value zero or one, then we may be able to assert values
3850 for SSA_NAMEs which flow into COND. */
3852 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3853 statement of NAME we can assert both operands of the BIT_AND_EXPR
3854 have nonzero value. */
3855 if (((comp_code == EQ_EXPR && integer_onep (val))
3856 || (comp_code == NE_EXPR && integer_zerop (val))))
3858 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3860 if (is_gimple_assign (def_stmt)
3861 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3863 tree op0 = gimple_assign_rhs1 (def_stmt);
3864 tree op1 = gimple_assign_rhs2 (def_stmt);
3865 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3866 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3870 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3871 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3872 have zero value. */
3873 if (((comp_code == EQ_EXPR && integer_zerop (val))
3874 || (comp_code == NE_EXPR && integer_onep (val))))
3876 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3878 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3879 necessarily zero value, or if type-precision is one. */
3880 if (is_gimple_assign (def_stmt)
3881 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3882 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3883 || comp_code == EQ_EXPR)))
3885 tree op0 = gimple_assign_rhs1 (def_stmt);
3886 tree op1 = gimple_assign_rhs2 (def_stmt);
3887 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3888 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3892 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3893 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3894 && TREE_CODE (val) == INTEGER_CST)
3896 enum tree_code low_code, high_code;
3897 tree low, high;
3898 if (is_masked_range_test (name, val, comp_code, &name, &low,
3899 &low_code, &high, &high_code))
3901 if (low_code != ERROR_MARK)
3902 register_edge_assert_for_2 (name, e, low_code, name,
3903 low, /*invert*/false, asserts);
3904 if (high_code != ERROR_MARK)
3905 register_edge_assert_for_2 (name, e, high_code, name,
3906 high, /*invert*/false, asserts);
3911 /* Finish found ASSERTS for E and register them at GSI. */
3913 static void
3914 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3915 vec<assert_info> &asserts)
3917 for (unsigned i = 0; i < asserts.length (); ++i)
3918 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3919 reachable from E. */
3920 if (live_on_edge (e, asserts[i].name))
3921 register_new_assert_for (asserts[i].name, asserts[i].expr,
3922 asserts[i].comp_code, asserts[i].val,
3923 NULL, e, gsi);
3928 /* Determine whether the outgoing edges of BB should receive an
3929 ASSERT_EXPR for each of the operands of BB's LAST statement.
3930 The last statement of BB must be a COND_EXPR.
3932 If any of the sub-graphs rooted at BB have an interesting use of
3933 the predicate operands, an assert location node is added to the
3934 list of assertions for the corresponding operands. */
3936 static void
3937 find_conditional_asserts (basic_block bb, gcond *last)
3939 gimple_stmt_iterator bsi;
3940 tree op;
3941 edge_iterator ei;
3942 edge e;
3943 ssa_op_iter iter;
3945 bsi = gsi_for_stmt (last);
3947 /* Look for uses of the operands in each of the sub-graphs
3948 rooted at BB. We need to check each of the outgoing edges
3949 separately, so that we know what kind of ASSERT_EXPR to
3950 insert. */
3951 FOR_EACH_EDGE (e, ei, bb->succs)
3953 if (e->dest == bb)
3954 continue;
3956 /* Register the necessary assertions for each operand in the
3957 conditional predicate. */
3958 auto_vec<assert_info, 8> asserts;
3959 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3960 register_edge_assert_for (op, e,
3961 gimple_cond_code (last),
3962 gimple_cond_lhs (last),
3963 gimple_cond_rhs (last), asserts);
3964 finish_register_edge_assert_for (e, bsi, asserts);
3968 struct case_info
3970 tree expr;
3971 basic_block bb;
3974 /* Compare two case labels sorting first by the destination bb index
3975 and then by the case value. */
3977 static int
3978 compare_case_labels (const void *p1, const void *p2)
3980 const struct case_info *ci1 = (const struct case_info *) p1;
3981 const struct case_info *ci2 = (const struct case_info *) p2;
3982 int idx1 = ci1->bb->index;
3983 int idx2 = ci2->bb->index;
3985 if (idx1 < idx2)
3986 return -1;
3987 else if (idx1 == idx2)
3989 /* Make sure the default label is first in a group. */
3990 if (!CASE_LOW (ci1->expr))
3991 return -1;
3992 else if (!CASE_LOW (ci2->expr))
3993 return 1;
3994 else
3995 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3996 CASE_LOW (ci2->expr));
3998 else
3999 return 1;
4002 /* Determine whether the outgoing edges of BB should receive an
4003 ASSERT_EXPR for each of the operands of BB's LAST statement.
4004 The last statement of BB must be a SWITCH_EXPR.
4006 If any of the sub-graphs rooted at BB have an interesting use of
4007 the predicate operands, an assert location node is added to the
4008 list of assertions for the corresponding operands. */
4010 static void
4011 find_switch_asserts (basic_block bb, gswitch *last)
4013 gimple_stmt_iterator bsi;
4014 tree op;
4015 edge e;
4016 struct case_info *ci;
4017 size_t n = gimple_switch_num_labels (last);
4018 #if GCC_VERSION >= 4000
4019 unsigned int idx;
4020 #else
4021 /* Work around GCC 3.4 bug (PR 37086). */
4022 volatile unsigned int idx;
4023 #endif
4025 bsi = gsi_for_stmt (last);
4026 op = gimple_switch_index (last);
4027 if (TREE_CODE (op) != SSA_NAME)
4028 return;
4030 /* Build a vector of case labels sorted by destination label. */
4031 ci = XNEWVEC (struct case_info, n);
4032 for (idx = 0; idx < n; ++idx)
4034 ci[idx].expr = gimple_switch_label (last, idx);
4035 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4037 edge default_edge = find_edge (bb, ci[0].bb);
4038 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4040 for (idx = 0; idx < n; ++idx)
4042 tree min, max;
4043 tree cl = ci[idx].expr;
4044 basic_block cbb = ci[idx].bb;
4046 min = CASE_LOW (cl);
4047 max = CASE_HIGH (cl);
4049 /* If there are multiple case labels with the same destination
4050 we need to combine them to a single value range for the edge. */
4051 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4053 /* Skip labels until the last of the group. */
4054 do {
4055 ++idx;
4056 } while (idx < n && cbb == ci[idx].bb);
4057 --idx;
4059 /* Pick up the maximum of the case label range. */
4060 if (CASE_HIGH (ci[idx].expr))
4061 max = CASE_HIGH (ci[idx].expr);
4062 else
4063 max = CASE_LOW (ci[idx].expr);
4066 /* Can't extract a useful assertion out of a range that includes the
4067 default label. */
4068 if (min == NULL_TREE)
4069 continue;
4071 /* Find the edge to register the assert expr on. */
4072 e = find_edge (bb, cbb);
4074 /* Register the necessary assertions for the operand in the
4075 SWITCH_EXPR. */
4076 auto_vec<assert_info, 8> asserts;
4077 register_edge_assert_for (op, e,
4078 max ? GE_EXPR : EQ_EXPR,
4079 op, fold_convert (TREE_TYPE (op), min),
4080 asserts);
4081 if (max)
4082 register_edge_assert_for (op, e, LE_EXPR, op,
4083 fold_convert (TREE_TYPE (op), max),
4084 asserts);
4085 finish_register_edge_assert_for (e, bsi, asserts);
4088 XDELETEVEC (ci);
4090 if (!live_on_edge (default_edge, op))
4091 return;
4093 /* Now register along the default label assertions that correspond to the
4094 anti-range of each label. */
4095 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
4096 if (insertion_limit == 0)
4097 return;
4099 /* We can't do this if the default case shares a label with another case. */
4100 tree default_cl = gimple_switch_default_label (last);
4101 for (idx = 1; idx < n; idx++)
4103 tree min, max;
4104 tree cl = gimple_switch_label (last, idx);
4105 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
4106 continue;
4108 min = CASE_LOW (cl);
4109 max = CASE_HIGH (cl);
4111 /* Combine contiguous case ranges to reduce the number of assertions
4112 to insert. */
4113 for (idx = idx + 1; idx < n; idx++)
4115 tree next_min, next_max;
4116 tree next_cl = gimple_switch_label (last, idx);
4117 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
4118 break;
4120 next_min = CASE_LOW (next_cl);
4121 next_max = CASE_HIGH (next_cl);
4123 wide_int difference = (wi::to_wide (next_min)
4124 - wi::to_wide (max ? max : min));
4125 if (wi::eq_p (difference, 1))
4126 max = next_max ? next_max : next_min;
4127 else
4128 break;
4130 idx--;
4132 if (max == NULL_TREE)
4134 /* Register the assertion OP != MIN. */
4135 auto_vec<assert_info, 8> asserts;
4136 min = fold_convert (TREE_TYPE (op), min);
4137 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
4138 asserts);
4139 finish_register_edge_assert_for (default_edge, bsi, asserts);
4141 else
4143 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
4144 which will give OP the anti-range ~[MIN,MAX]. */
4145 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
4146 min = fold_convert (TREE_TYPE (uop), min);
4147 max = fold_convert (TREE_TYPE (uop), max);
4149 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
4150 tree rhs = int_const_binop (MINUS_EXPR, max, min);
4151 register_new_assert_for (op, lhs, GT_EXPR, rhs,
4152 NULL, default_edge, bsi);
4155 if (--insertion_limit == 0)
4156 break;
4161 /* Traverse all the statements in block BB looking for statements that
4162 may generate useful assertions for the SSA names in their operand.
4163 If a statement produces a useful assertion A for name N_i, then the
4164 list of assertions already generated for N_i is scanned to
4165 determine if A is actually needed.
4167 If N_i already had the assertion A at a location dominating the
4168 current location, then nothing needs to be done. Otherwise, the
4169 new location for A is recorded instead.
4171 1- For every statement S in BB, all the variables used by S are
4172 added to bitmap FOUND_IN_SUBGRAPH.
4174 2- If statement S uses an operand N in a way that exposes a known
4175 value range for N, then if N was not already generated by an
4176 ASSERT_EXPR, create a new assert location for N. For instance,
4177 if N is a pointer and the statement dereferences it, we can
4178 assume that N is not NULL.
4180 3- COND_EXPRs are a special case of #2. We can derive range
4181 information from the predicate but need to insert different
4182 ASSERT_EXPRs for each of the sub-graphs rooted at the
4183 conditional block. If the last statement of BB is a conditional
4184 expression of the form 'X op Y', then
4186 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4188 b) If the conditional is the only entry point to the sub-graph
4189 corresponding to the THEN_CLAUSE, recurse into it. On
4190 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4191 an ASSERT_EXPR is added for the corresponding variable.
4193 c) Repeat step (b) on the ELSE_CLAUSE.
4195 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4197 For instance,
4199 if (a == 9)
4200 b = a;
4201 else
4202 b = c + 1;
4204 In this case, an assertion on the THEN clause is useful to
4205 determine that 'a' is always 9 on that edge. However, an assertion
4206 on the ELSE clause would be unnecessary.
4208 4- If BB does not end in a conditional expression, then we recurse
4209 into BB's dominator children.
4211 At the end of the recursive traversal, every SSA name will have a
4212 list of locations where ASSERT_EXPRs should be added. When a new
4213 location for name N is found, it is registered by calling
4214 register_new_assert_for. That function keeps track of all the
4215 registered assertions to prevent adding unnecessary assertions.
4216 For instance, if a pointer P_4 is dereferenced more than once in a
4217 dominator tree, only the location dominating all the dereference of
4218 P_4 will receive an ASSERT_EXPR. */
4220 static void
4221 find_assert_locations_1 (basic_block bb, sbitmap live)
4223 gimple *last;
4225 last = last_stmt (bb);
4227 /* If BB's last statement is a conditional statement involving integer
4228 operands, determine if we need to add ASSERT_EXPRs. */
4229 if (last
4230 && gimple_code (last) == GIMPLE_COND
4231 && !fp_predicate (last)
4232 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4233 find_conditional_asserts (bb, as_a <gcond *> (last));
4235 /* If BB's last statement is a switch statement involving integer
4236 operands, determine if we need to add ASSERT_EXPRs. */
4237 if (last
4238 && gimple_code (last) == GIMPLE_SWITCH
4239 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4240 find_switch_asserts (bb, as_a <gswitch *> (last));
4242 /* Traverse all the statements in BB marking used names and looking
4243 for statements that may infer assertions for their used operands. */
4244 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
4245 gsi_prev (&si))
4247 gimple *stmt;
4248 tree op;
4249 ssa_op_iter i;
4251 stmt = gsi_stmt (si);
4253 if (is_gimple_debug (stmt))
4254 continue;
4256 /* See if we can derive an assertion for any of STMT's operands. */
4257 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4259 tree value;
4260 enum tree_code comp_code;
4262 /* If op is not live beyond this stmt, do not bother to insert
4263 asserts for it. */
4264 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
4265 continue;
4267 /* If OP is used in such a way that we can infer a value
4268 range for it, and we don't find a previous assertion for
4269 it, create a new assertion location node for OP. */
4270 if (infer_value_range (stmt, op, &comp_code, &value))
4272 /* If we are able to infer a nonzero value range for OP,
4273 then walk backwards through the use-def chain to see if OP
4274 was set via a typecast.
4276 If so, then we can also infer a nonzero value range
4277 for the operand of the NOP_EXPR. */
4278 if (comp_code == NE_EXPR && integer_zerop (value))
4280 tree t = op;
4281 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
4283 while (is_gimple_assign (def_stmt)
4284 && CONVERT_EXPR_CODE_P
4285 (gimple_assign_rhs_code (def_stmt))
4286 && TREE_CODE
4287 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4288 && POINTER_TYPE_P
4289 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4291 t = gimple_assign_rhs1 (def_stmt);
4292 def_stmt = SSA_NAME_DEF_STMT (t);
4294 /* Note we want to register the assert for the
4295 operand of the NOP_EXPR after SI, not after the
4296 conversion. */
4297 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
4298 register_new_assert_for (t, t, comp_code, value,
4299 bb, NULL, si);
4303 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
4307 /* Update live. */
4308 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4309 bitmap_set_bit (live, SSA_NAME_VERSION (op));
4310 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
4311 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
4314 /* Traverse all PHI nodes in BB, updating live. */
4315 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4316 gsi_next (&si))
4318 use_operand_p arg_p;
4319 ssa_op_iter i;
4320 gphi *phi = si.phi ();
4321 tree res = gimple_phi_result (phi);
4323 if (virtual_operand_p (res))
4324 continue;
4326 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4328 tree arg = USE_FROM_PTR (arg_p);
4329 if (TREE_CODE (arg) == SSA_NAME)
4330 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
4333 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
4337 /* Do an RPO walk over the function computing SSA name liveness
4338 on-the-fly and deciding on assert expressions to insert. */
4340 static void
4341 find_assert_locations (void)
4343 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4344 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4345 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
4346 int rpo_cnt, i;
4348 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
4349 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4350 for (i = 0; i < rpo_cnt; ++i)
4351 bb_rpo[rpo[i]] = i;
4353 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
4354 the order we compute liveness and insert asserts we otherwise
4355 fail to insert asserts into the loop latch. */
4356 loop_p loop;
4357 FOR_EACH_LOOP (loop, 0)
4359 i = loop->latch->index;
4360 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
4361 for (gphi_iterator gsi = gsi_start_phis (loop->header);
4362 !gsi_end_p (gsi); gsi_next (&gsi))
4364 gphi *phi = gsi.phi ();
4365 if (virtual_operand_p (gimple_phi_result (phi)))
4366 continue;
4367 tree arg = gimple_phi_arg_def (phi, j);
4368 if (TREE_CODE (arg) == SSA_NAME)
4370 if (live[i] == NULL)
4372 live[i] = sbitmap_alloc (num_ssa_names);
4373 bitmap_clear (live[i]);
4375 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
4380 for (i = rpo_cnt - 1; i >= 0; --i)
4382 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
4383 edge e;
4384 edge_iterator ei;
4386 if (!live[rpo[i]])
4388 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4389 bitmap_clear (live[rpo[i]]);
4392 /* Process BB and update the live information with uses in
4393 this block. */
4394 find_assert_locations_1 (bb, live[rpo[i]]);
4396 /* Merge liveness into the predecessor blocks and free it. */
4397 if (!bitmap_empty_p (live[rpo[i]]))
4399 int pred_rpo = i;
4400 FOR_EACH_EDGE (e, ei, bb->preds)
4402 int pred = e->src->index;
4403 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4404 continue;
4406 if (!live[pred])
4408 live[pred] = sbitmap_alloc (num_ssa_names);
4409 bitmap_clear (live[pred]);
4411 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4413 if (bb_rpo[pred] < pred_rpo)
4414 pred_rpo = bb_rpo[pred];
4417 /* Record the RPO number of the last visited block that needs
4418 live information from this block. */
4419 last_rpo[rpo[i]] = pred_rpo;
4421 else
4423 sbitmap_free (live[rpo[i]]);
4424 live[rpo[i]] = NULL;
4427 /* We can free all successors live bitmaps if all their
4428 predecessors have been visited already. */
4429 FOR_EACH_EDGE (e, ei, bb->succs)
4430 if (last_rpo[e->dest->index] == i
4431 && live[e->dest->index])
4433 sbitmap_free (live[e->dest->index]);
4434 live[e->dest->index] = NULL;
4438 XDELETEVEC (rpo);
4439 XDELETEVEC (bb_rpo);
4440 XDELETEVEC (last_rpo);
4441 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4442 if (live[i])
4443 sbitmap_free (live[i]);
4444 XDELETEVEC (live);
4447 /* Create an ASSERT_EXPR for NAME and insert it in the location
4448 indicated by LOC. Return true if we made any edge insertions. */
4450 static bool
4451 process_assert_insertions_for (tree name, assert_locus *loc)
4453 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4454 gimple *stmt;
4455 tree cond;
4456 gimple *assert_stmt;
4457 edge_iterator ei;
4458 edge e;
4460 /* If we have X <=> X do not insert an assert expr for that. */
4461 if (loc->expr == loc->val)
4462 return false;
4464 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4465 assert_stmt = build_assert_expr_for (cond, name);
4466 if (loc->e)
4468 /* We have been asked to insert the assertion on an edge. This
4469 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4470 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4471 || (gimple_code (gsi_stmt (loc->si))
4472 == GIMPLE_SWITCH));
4474 gsi_insert_on_edge (loc->e, assert_stmt);
4475 return true;
4478 /* If the stmt iterator points at the end then this is an insertion
4479 at the beginning of a block. */
4480 if (gsi_end_p (loc->si))
4482 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4483 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4484 return false;
4487 /* Otherwise, we can insert right after LOC->SI iff the
4488 statement must not be the last statement in the block. */
4489 stmt = gsi_stmt (loc->si);
4490 if (!stmt_ends_bb_p (stmt))
4492 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4493 return false;
4496 /* If STMT must be the last statement in BB, we can only insert new
4497 assertions on the non-abnormal edge out of BB. Note that since
4498 STMT is not control flow, there may only be one non-abnormal/eh edge
4499 out of BB. */
4500 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4501 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4503 gsi_insert_on_edge (e, assert_stmt);
4504 return true;
4507 gcc_unreachable ();
4510 /* Qsort helper for sorting assert locations. If stable is true, don't
4511 use iterative_hash_expr because it can be unstable for -fcompare-debug,
4512 on the other side some pointers might be NULL. */
4514 template <bool stable>
4515 static int
4516 compare_assert_loc (const void *pa, const void *pb)
4518 assert_locus * const a = *(assert_locus * const *)pa;
4519 assert_locus * const b = *(assert_locus * const *)pb;
4521 /* If stable, some asserts might be optimized away already, sort
4522 them last. */
4523 if (stable)
4525 if (a == NULL)
4526 return b != NULL;
4527 else if (b == NULL)
4528 return -1;
4531 if (a->e == NULL && b->e != NULL)
4532 return 1;
4533 else if (a->e != NULL && b->e == NULL)
4534 return -1;
4536 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4537 no need to test both a->e and b->e. */
4539 /* Sort after destination index. */
4540 if (a->e == NULL)
4542 else if (a->e->dest->index > b->e->dest->index)
4543 return 1;
4544 else if (a->e->dest->index < b->e->dest->index)
4545 return -1;
4547 /* Sort after comp_code. */
4548 if (a->comp_code > b->comp_code)
4549 return 1;
4550 else if (a->comp_code < b->comp_code)
4551 return -1;
4553 hashval_t ha, hb;
4555 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4556 uses DECL_UID of the VAR_DECL, so sorting might differ between
4557 -g and -g0. When doing the removal of redundant assert exprs
4558 and commonization to successors, this does not matter, but for
4559 the final sort needs to be stable. */
4560 if (stable)
4562 ha = 0;
4563 hb = 0;
4565 else
4567 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4568 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4571 /* Break the tie using hashing and source/bb index. */
4572 if (ha == hb)
4573 return (a->e != NULL
4574 ? a->e->src->index - b->e->src->index
4575 : a->bb->index - b->bb->index);
4576 return ha > hb ? 1 : -1;
4579 /* Process all the insertions registered for every name N_i registered
4580 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4581 found in ASSERTS_FOR[i]. */
4583 static void
4584 process_assert_insertions (void)
4586 unsigned i;
4587 bitmap_iterator bi;
4588 bool update_edges_p = false;
4589 int num_asserts = 0;
4591 if (dump_file && (dump_flags & TDF_DETAILS))
4592 dump_all_asserts (dump_file);
4594 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4596 assert_locus *loc = asserts_for[i];
4597 gcc_assert (loc);
4599 auto_vec<assert_locus *, 16> asserts;
4600 for (; loc; loc = loc->next)
4601 asserts.safe_push (loc);
4602 asserts.qsort (compare_assert_loc<false>);
4604 /* Push down common asserts to successors and remove redundant ones. */
4605 unsigned ecnt = 0;
4606 assert_locus *common = NULL;
4607 unsigned commonj = 0;
4608 for (unsigned j = 0; j < asserts.length (); ++j)
4610 loc = asserts[j];
4611 if (! loc->e)
4612 common = NULL;
4613 else if (! common
4614 || loc->e->dest != common->e->dest
4615 || loc->comp_code != common->comp_code
4616 || ! operand_equal_p (loc->val, common->val, 0)
4617 || ! operand_equal_p (loc->expr, common->expr, 0))
4619 commonj = j;
4620 common = loc;
4621 ecnt = 1;
4623 else if (loc->e == asserts[j-1]->e)
4625 /* Remove duplicate asserts. */
4626 if (commonj == j - 1)
4628 commonj = j;
4629 common = loc;
4631 free (asserts[j-1]);
4632 asserts[j-1] = NULL;
4634 else
4636 ecnt++;
4637 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4639 /* We have the same assertion on all incoming edges of a BB.
4640 Insert it at the beginning of that block. */
4641 loc->bb = loc->e->dest;
4642 loc->e = NULL;
4643 loc->si = gsi_none ();
4644 common = NULL;
4645 /* Clear asserts commoned. */
4646 for (; commonj != j; ++commonj)
4647 if (asserts[commonj])
4649 free (asserts[commonj]);
4650 asserts[commonj] = NULL;
4656 /* The asserts vector sorting above might be unstable for
4657 -fcompare-debug, sort again to ensure a stable sort. */
4658 asserts.qsort (compare_assert_loc<true>);
4659 for (unsigned j = 0; j < asserts.length (); ++j)
4661 loc = asserts[j];
4662 if (! loc)
4663 break;
4664 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4665 num_asserts++;
4666 free (loc);
4670 if (update_edges_p)
4671 gsi_commit_edge_inserts ();
4673 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4674 num_asserts);
4678 /* Traverse the flowgraph looking for conditional jumps to insert range
4679 expressions. These range expressions are meant to provide information
4680 to optimizations that need to reason in terms of value ranges. They
4681 will not be expanded into RTL. For instance, given:
4683 x = ...
4684 y = ...
4685 if (x < y)
4686 y = x - 2;
4687 else
4688 x = y + 3;
4690 this pass will transform the code into:
4692 x = ...
4693 y = ...
4694 if (x < y)
4696 x = ASSERT_EXPR <x, x < y>
4697 y = x - 2
4699 else
4701 y = ASSERT_EXPR <y, x >= y>
4702 x = y + 3
4705 The idea is that once copy and constant propagation have run, other
4706 optimizations will be able to determine what ranges of values can 'x'
4707 take in different paths of the code, simply by checking the reaching
4708 definition of 'x'. */
4710 static void
4711 insert_range_assertions (void)
4713 need_assert_for = BITMAP_ALLOC (NULL);
4714 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4716 calculate_dominance_info (CDI_DOMINATORS);
4718 find_assert_locations ();
4719 if (!bitmap_empty_p (need_assert_for))
4721 process_assert_insertions ();
4722 update_ssa (TODO_update_ssa_no_phi);
4725 if (dump_file && (dump_flags & TDF_DETAILS))
4727 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4728 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4731 free (asserts_for);
4732 BITMAP_FREE (need_assert_for);
4735 class vrp_prop : public ssa_propagation_engine
4737 public:
4738 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4739 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4741 void vrp_initialize (void);
4742 void vrp_finalize (bool);
4743 void check_all_array_refs (void);
4744 void check_array_ref (location_t, tree, bool);
4745 void search_for_addr_array (tree, location_t);
4747 class vr_values vr_values;
4748 /* Temporary delegator to minimize code churn. */
4749 value_range *get_value_range (const_tree op)
4750 { return vr_values.get_value_range (op); }
4751 void set_defs_to_varying (gimple *stmt)
4752 { return vr_values.set_defs_to_varying (stmt); }
4753 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4754 tree *output_p, value_range *vr)
4755 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4756 bool update_value_range (const_tree op, value_range *vr)
4757 { return vr_values.update_value_range (op, vr); }
4758 void extract_range_basic (value_range *vr, gimple *stmt)
4759 { vr_values.extract_range_basic (vr, stmt); }
4760 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4761 { vr_values.extract_range_from_phi_node (phi, vr); }
4763 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4764 and "struct" hacks. If VRP can determine that the
4765 array subscript is a constant, check if it is outside valid
4766 range. If the array subscript is a RANGE, warn if it is
4767 non-overlapping with valid range.
4768 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4770 void
4771 vrp_prop::check_array_ref (location_t location, tree ref,
4772 bool ignore_off_by_one)
4774 value_range *vr = NULL;
4775 tree low_sub, up_sub;
4776 tree low_bound, up_bound, up_bound_p1;
4778 if (TREE_NO_WARNING (ref))
4779 return;
4781 low_sub = up_sub = TREE_OPERAND (ref, 1);
4782 up_bound = array_ref_up_bound (ref);
4784 /* Can not check flexible arrays. */
4785 if (!up_bound
4786 || TREE_CODE (up_bound) != INTEGER_CST)
4787 return;
4789 /* Accesses to trailing arrays via pointers may access storage
4790 beyond the types array bounds. */
4791 if (warn_array_bounds < 2
4792 && array_at_struct_end_p (ref))
4793 return;
4795 low_bound = array_ref_low_bound (ref);
4796 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4797 build_int_cst (TREE_TYPE (up_bound), 1));
4799 /* Empty array. */
4800 if (tree_int_cst_equal (low_bound, up_bound_p1))
4802 warning_at (location, OPT_Warray_bounds,
4803 "array subscript is above array bounds");
4804 TREE_NO_WARNING (ref) = 1;
4807 if (TREE_CODE (low_sub) == SSA_NAME)
4809 vr = get_value_range (low_sub);
4810 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4812 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4813 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4817 if (vr && vr->type == VR_ANTI_RANGE)
4819 if (TREE_CODE (up_sub) == INTEGER_CST
4820 && (ignore_off_by_one
4821 ? tree_int_cst_lt (up_bound, up_sub)
4822 : tree_int_cst_le (up_bound, up_sub))
4823 && TREE_CODE (low_sub) == INTEGER_CST
4824 && tree_int_cst_le (low_sub, low_bound))
4826 warning_at (location, OPT_Warray_bounds,
4827 "array subscript is outside array bounds");
4828 TREE_NO_WARNING (ref) = 1;
4831 else if (TREE_CODE (up_sub) == INTEGER_CST
4832 && (ignore_off_by_one
4833 ? !tree_int_cst_le (up_sub, up_bound_p1)
4834 : !tree_int_cst_le (up_sub, up_bound)))
4836 if (dump_file && (dump_flags & TDF_DETAILS))
4838 fprintf (dump_file, "Array bound warning for ");
4839 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4840 fprintf (dump_file, "\n");
4842 warning_at (location, OPT_Warray_bounds,
4843 "array subscript is above array bounds");
4844 TREE_NO_WARNING (ref) = 1;
4846 else if (TREE_CODE (low_sub) == INTEGER_CST
4847 && tree_int_cst_lt (low_sub, low_bound))
4849 if (dump_file && (dump_flags & TDF_DETAILS))
4851 fprintf (dump_file, "Array bound warning for ");
4852 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4853 fprintf (dump_file, "\n");
4855 warning_at (location, OPT_Warray_bounds,
4856 "array subscript is below array bounds");
4857 TREE_NO_WARNING (ref) = 1;
4861 /* Searches if the expr T, located at LOCATION computes
4862 address of an ARRAY_REF, and call check_array_ref on it. */
4864 void
4865 vrp_prop::search_for_addr_array (tree t, location_t location)
4867 /* Check each ARRAY_REFs in the reference chain. */
4870 if (TREE_CODE (t) == ARRAY_REF)
4871 check_array_ref (location, t, true /*ignore_off_by_one*/);
4873 t = TREE_OPERAND (t, 0);
4875 while (handled_component_p (t));
4877 if (TREE_CODE (t) == MEM_REF
4878 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
4879 && !TREE_NO_WARNING (t))
4881 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4882 tree low_bound, up_bound, el_sz;
4883 offset_int idx;
4884 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4885 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4886 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4887 return;
4889 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4890 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4891 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4892 if (!low_bound
4893 || TREE_CODE (low_bound) != INTEGER_CST
4894 || !up_bound
4895 || TREE_CODE (up_bound) != INTEGER_CST
4896 || !el_sz
4897 || TREE_CODE (el_sz) != INTEGER_CST)
4898 return;
4900 idx = mem_ref_offset (t);
4901 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4902 if (idx < 0)
4904 if (dump_file && (dump_flags & TDF_DETAILS))
4906 fprintf (dump_file, "Array bound warning for ");
4907 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4908 fprintf (dump_file, "\n");
4910 warning_at (location, OPT_Warray_bounds,
4911 "array subscript is below array bounds");
4912 TREE_NO_WARNING (t) = 1;
4914 else if (idx > (wi::to_offset (up_bound)
4915 - wi::to_offset (low_bound) + 1))
4917 if (dump_file && (dump_flags & TDF_DETAILS))
4919 fprintf (dump_file, "Array bound warning for ");
4920 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4921 fprintf (dump_file, "\n");
4923 warning_at (location, OPT_Warray_bounds,
4924 "array subscript is above array bounds");
4925 TREE_NO_WARNING (t) = 1;
4930 /* walk_tree() callback that checks if *TP is
4931 an ARRAY_REF inside an ADDR_EXPR (in which an array
4932 subscript one outside the valid range is allowed). Call
4933 check_array_ref for each ARRAY_REF found. The location is
4934 passed in DATA. */
4936 static tree
4937 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4939 tree t = *tp;
4940 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4941 location_t location;
4943 if (EXPR_HAS_LOCATION (t))
4944 location = EXPR_LOCATION (t);
4945 else
4946 location = gimple_location (wi->stmt);
4948 *walk_subtree = TRUE;
4950 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4951 if (TREE_CODE (t) == ARRAY_REF)
4952 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4954 else if (TREE_CODE (t) == ADDR_EXPR)
4956 vrp_prop->search_for_addr_array (t, location);
4957 *walk_subtree = FALSE;
4960 return NULL_TREE;
4963 /* Walk over all statements of all reachable BBs and call check_array_bounds
4964 on them. */
4966 void
4967 vrp_prop::check_all_array_refs ()
4969 basic_block bb;
4970 gimple_stmt_iterator si;
4972 FOR_EACH_BB_FN (bb, cfun)
4974 edge_iterator ei;
4975 edge e;
4976 bool executable = false;
4978 /* Skip blocks that were found to be unreachable. */
4979 FOR_EACH_EDGE (e, ei, bb->preds)
4980 executable |= !!(e->flags & EDGE_EXECUTABLE);
4981 if (!executable)
4982 continue;
4984 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4986 gimple *stmt = gsi_stmt (si);
4987 struct walk_stmt_info wi;
4988 if (!gimple_has_location (stmt)
4989 || is_gimple_debug (stmt))
4990 continue;
4992 memset (&wi, 0, sizeof (wi));
4994 wi.info = this;
4996 walk_gimple_op (gsi_stmt (si),
4997 check_array_bounds,
4998 &wi);
5003 /* Return true if all imm uses of VAR are either in STMT, or
5004 feed (optionally through a chain of single imm uses) GIMPLE_COND
5005 in basic block COND_BB. */
5007 static bool
5008 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
5010 use_operand_p use_p, use2_p;
5011 imm_use_iterator iter;
5013 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
5014 if (USE_STMT (use_p) != stmt)
5016 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
5017 if (is_gimple_debug (use_stmt))
5018 continue;
5019 while (is_gimple_assign (use_stmt)
5020 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
5021 && single_imm_use (gimple_assign_lhs (use_stmt),
5022 &use2_p, &use_stmt2))
5023 use_stmt = use_stmt2;
5024 if (gimple_code (use_stmt) != GIMPLE_COND
5025 || gimple_bb (use_stmt) != cond_bb)
5026 return false;
5028 return true;
5031 /* Handle
5032 _4 = x_3 & 31;
5033 if (_4 != 0)
5034 goto <bb 6>;
5035 else
5036 goto <bb 7>;
5037 <bb 6>:
5038 __builtin_unreachable ();
5039 <bb 7>:
5040 x_5 = ASSERT_EXPR <x_3, ...>;
5041 If x_3 has no other immediate uses (checked by caller),
5042 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5043 from the non-zero bitmask. */
5045 static void
5046 maybe_set_nonzero_bits (basic_block bb, tree var)
5048 edge e = single_pred_edge (bb);
5049 basic_block cond_bb = e->src;
5050 gimple *stmt = last_stmt (cond_bb);
5051 tree cst;
5053 if (stmt == NULL
5054 || gimple_code (stmt) != GIMPLE_COND
5055 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5056 ? EQ_EXPR : NE_EXPR)
5057 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5058 || !integer_zerop (gimple_cond_rhs (stmt)))
5059 return;
5061 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5062 if (!is_gimple_assign (stmt)
5063 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5064 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5065 return;
5066 if (gimple_assign_rhs1 (stmt) != var)
5068 gimple *stmt2;
5070 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5071 return;
5072 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5073 if (!gimple_assign_cast_p (stmt2)
5074 || gimple_assign_rhs1 (stmt2) != var
5075 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5076 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5077 != TYPE_PRECISION (TREE_TYPE (var))))
5078 return;
5080 cst = gimple_assign_rhs2 (stmt);
5081 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5082 wi::to_wide (cst)));
5085 /* Convert range assertion expressions into the implied copies and
5086 copy propagate away the copies. Doing the trivial copy propagation
5087 here avoids the need to run the full copy propagation pass after
5088 VRP.
5090 FIXME, this will eventually lead to copy propagation removing the
5091 names that had useful range information attached to them. For
5092 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5093 then N_i will have the range [3, +INF].
5095 However, by converting the assertion into the implied copy
5096 operation N_i = N_j, we will then copy-propagate N_j into the uses
5097 of N_i and lose the range information. We may want to hold on to
5098 ASSERT_EXPRs a little while longer as the ranges could be used in
5099 things like jump threading.
5101 The problem with keeping ASSERT_EXPRs around is that passes after
5102 VRP need to handle them appropriately.
5104 Another approach would be to make the range information a first
5105 class property of the SSA_NAME so that it can be queried from
5106 any pass. This is made somewhat more complex by the need for
5107 multiple ranges to be associated with one SSA_NAME. */
5109 static void
5110 remove_range_assertions (void)
5112 basic_block bb;
5113 gimple_stmt_iterator si;
5114 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5115 a basic block preceeded by GIMPLE_COND branching to it and
5116 __builtin_trap, -1 if not yet checked, 0 otherwise. */
5117 int is_unreachable;
5119 /* Note that the BSI iterator bump happens at the bottom of the
5120 loop and no bump is necessary if we're removing the statement
5121 referenced by the current BSI. */
5122 FOR_EACH_BB_FN (bb, cfun)
5123 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5125 gimple *stmt = gsi_stmt (si);
5127 if (is_gimple_assign (stmt)
5128 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5130 tree lhs = gimple_assign_lhs (stmt);
5131 tree rhs = gimple_assign_rhs1 (stmt);
5132 tree var;
5134 var = ASSERT_EXPR_VAR (rhs);
5136 if (TREE_CODE (var) == SSA_NAME
5137 && !POINTER_TYPE_P (TREE_TYPE (lhs))
5138 && SSA_NAME_RANGE_INFO (lhs))
5140 if (is_unreachable == -1)
5142 is_unreachable = 0;
5143 if (single_pred_p (bb)
5144 && assert_unreachable_fallthru_edge_p
5145 (single_pred_edge (bb)))
5146 is_unreachable = 1;
5148 /* Handle
5149 if (x_7 >= 10 && x_7 < 20)
5150 __builtin_unreachable ();
5151 x_8 = ASSERT_EXPR <x_7, ...>;
5152 if the only uses of x_7 are in the ASSERT_EXPR and
5153 in the condition. In that case, we can copy the
5154 range info from x_8 computed in this pass also
5155 for x_7. */
5156 if (is_unreachable
5157 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5158 single_pred (bb)))
5160 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5161 SSA_NAME_RANGE_INFO (lhs)->get_min (),
5162 SSA_NAME_RANGE_INFO (lhs)->get_max ());
5163 maybe_set_nonzero_bits (bb, var);
5167 /* Propagate the RHS into every use of the LHS. For SSA names
5168 also propagate abnormals as it merely restores the original
5169 IL in this case (an replace_uses_by would assert). */
5170 if (TREE_CODE (var) == SSA_NAME)
5172 imm_use_iterator iter;
5173 use_operand_p use_p;
5174 gimple *use_stmt;
5175 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5176 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5177 SET_USE (use_p, var);
5179 else
5180 replace_uses_by (lhs, var);
5182 /* And finally, remove the copy, it is not needed. */
5183 gsi_remove (&si, true);
5184 release_defs (stmt);
5186 else
5188 if (!is_gimple_debug (gsi_stmt (si)))
5189 is_unreachable = 0;
5190 gsi_next (&si);
5196 /* Return true if STMT is interesting for VRP. */
5198 bool
5199 stmt_interesting_for_vrp (gimple *stmt)
5201 if (gimple_code (stmt) == GIMPLE_PHI)
5203 tree res = gimple_phi_result (stmt);
5204 return (!virtual_operand_p (res)
5205 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5206 || POINTER_TYPE_P (TREE_TYPE (res))));
5208 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5210 tree lhs = gimple_get_lhs (stmt);
5212 /* In general, assignments with virtual operands are not useful
5213 for deriving ranges, with the obvious exception of calls to
5214 builtin functions. */
5215 if (lhs && TREE_CODE (lhs) == SSA_NAME
5216 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5217 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5218 && (is_gimple_call (stmt)
5219 || !gimple_vuse (stmt)))
5220 return true;
5221 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5222 switch (gimple_call_internal_fn (stmt))
5224 case IFN_ADD_OVERFLOW:
5225 case IFN_SUB_OVERFLOW:
5226 case IFN_MUL_OVERFLOW:
5227 case IFN_ATOMIC_COMPARE_EXCHANGE:
5228 /* These internal calls return _Complex integer type,
5229 but are interesting to VRP nevertheless. */
5230 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5231 return true;
5232 break;
5233 default:
5234 break;
5237 else if (gimple_code (stmt) == GIMPLE_COND
5238 || gimple_code (stmt) == GIMPLE_SWITCH)
5239 return true;
5241 return false;
5244 /* Initialization required by ssa_propagate engine. */
5246 void
5247 vrp_prop::vrp_initialize ()
5249 basic_block bb;
5251 FOR_EACH_BB_FN (bb, cfun)
5253 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5254 gsi_next (&si))
5256 gphi *phi = si.phi ();
5257 if (!stmt_interesting_for_vrp (phi))
5259 tree lhs = PHI_RESULT (phi);
5260 set_value_range_to_varying (get_value_range (lhs));
5261 prop_set_simulate_again (phi, false);
5263 else
5264 prop_set_simulate_again (phi, true);
5267 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5268 gsi_next (&si))
5270 gimple *stmt = gsi_stmt (si);
5272 /* If the statement is a control insn, then we do not
5273 want to avoid simulating the statement once. Failure
5274 to do so means that those edges will never get added. */
5275 if (stmt_ends_bb_p (stmt))
5276 prop_set_simulate_again (stmt, true);
5277 else if (!stmt_interesting_for_vrp (stmt))
5279 set_defs_to_varying (stmt);
5280 prop_set_simulate_again (stmt, false);
5282 else
5283 prop_set_simulate_again (stmt, true);
5288 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5289 that includes the value VAL. The search is restricted to the range
5290 [START_IDX, n - 1] where n is the size of VEC.
5292 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5293 returned.
5295 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5296 it is placed in IDX and false is returned.
5298 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5299 returned. */
5301 bool
5302 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5304 size_t n = gimple_switch_num_labels (stmt);
5305 size_t low, high;
5307 /* Find case label for minimum of the value range or the next one.
5308 At each iteration we are searching in [low, high - 1]. */
5310 for (low = start_idx, high = n; high != low; )
5312 tree t;
5313 int cmp;
5314 /* Note that i != high, so we never ask for n. */
5315 size_t i = (high + low) / 2;
5316 t = gimple_switch_label (stmt, i);
5318 /* Cache the result of comparing CASE_LOW and val. */
5319 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5321 if (cmp == 0)
5323 /* Ranges cannot be empty. */
5324 *idx = i;
5325 return true;
5327 else if (cmp > 0)
5328 high = i;
5329 else
5331 low = i + 1;
5332 if (CASE_HIGH (t) != NULL
5333 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5335 *idx = i;
5336 return true;
5341 *idx = high;
5342 return false;
5345 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5346 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5347 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5348 then MAX_IDX < MIN_IDX.
5349 Returns true if the default label is not needed. */
5351 bool
5352 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5353 size_t *max_idx)
5355 size_t i, j;
5356 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5357 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5359 if (i == j
5360 && min_take_default
5361 && max_take_default)
5363 /* Only the default case label reached.
5364 Return an empty range. */
5365 *min_idx = 1;
5366 *max_idx = 0;
5367 return false;
5369 else
5371 bool take_default = min_take_default || max_take_default;
5372 tree low, high;
5373 size_t k;
5375 if (max_take_default)
5376 j--;
5378 /* If the case label range is continuous, we do not need
5379 the default case label. Verify that. */
5380 high = CASE_LOW (gimple_switch_label (stmt, i));
5381 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5382 high = CASE_HIGH (gimple_switch_label (stmt, i));
5383 for (k = i + 1; k <= j; ++k)
5385 low = CASE_LOW (gimple_switch_label (stmt, k));
5386 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5388 take_default = true;
5389 break;
5391 high = low;
5392 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5393 high = CASE_HIGH (gimple_switch_label (stmt, k));
5396 *min_idx = i;
5397 *max_idx = j;
5398 return !take_default;
5402 /* Evaluate statement STMT. If the statement produces a useful range,
5403 return SSA_PROP_INTERESTING and record the SSA name with the
5404 interesting range into *OUTPUT_P.
5406 If STMT is a conditional branch and we can determine its truth
5407 value, the taken edge is recorded in *TAKEN_EDGE_P.
5409 If STMT produces a varying value, return SSA_PROP_VARYING. */
5411 enum ssa_prop_result
5412 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5414 value_range vr = VR_INITIALIZER;
5415 tree lhs = gimple_get_lhs (stmt);
5416 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5418 if (*output_p)
5420 if (update_value_range (*output_p, &vr))
5422 if (dump_file && (dump_flags & TDF_DETAILS))
5424 fprintf (dump_file, "Found new range for ");
5425 print_generic_expr (dump_file, *output_p);
5426 fprintf (dump_file, ": ");
5427 dump_value_range (dump_file, &vr);
5428 fprintf (dump_file, "\n");
5431 if (vr.type == VR_VARYING)
5432 return SSA_PROP_VARYING;
5434 return SSA_PROP_INTERESTING;
5436 return SSA_PROP_NOT_INTERESTING;
5439 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5440 switch (gimple_call_internal_fn (stmt))
5442 case IFN_ADD_OVERFLOW:
5443 case IFN_SUB_OVERFLOW:
5444 case IFN_MUL_OVERFLOW:
5445 case IFN_ATOMIC_COMPARE_EXCHANGE:
5446 /* These internal calls return _Complex integer type,
5447 which VRP does not track, but the immediate uses
5448 thereof might be interesting. */
5449 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5451 imm_use_iterator iter;
5452 use_operand_p use_p;
5453 enum ssa_prop_result res = SSA_PROP_VARYING;
5455 set_value_range_to_varying (get_value_range (lhs));
5457 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5459 gimple *use_stmt = USE_STMT (use_p);
5460 if (!is_gimple_assign (use_stmt))
5461 continue;
5462 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5463 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5464 continue;
5465 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5466 tree use_lhs = gimple_assign_lhs (use_stmt);
5467 if (TREE_CODE (rhs1) != rhs_code
5468 || TREE_OPERAND (rhs1, 0) != lhs
5469 || TREE_CODE (use_lhs) != SSA_NAME
5470 || !stmt_interesting_for_vrp (use_stmt)
5471 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5472 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5473 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5474 continue;
5476 /* If there is a change in the value range for any of the
5477 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5478 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5479 or IMAGPART_EXPR immediate uses, but none of them have
5480 a change in their value ranges, return
5481 SSA_PROP_NOT_INTERESTING. If there are no
5482 {REAL,IMAG}PART_EXPR uses at all,
5483 return SSA_PROP_VARYING. */
5484 value_range new_vr = VR_INITIALIZER;
5485 extract_range_basic (&new_vr, use_stmt);
5486 value_range *old_vr = get_value_range (use_lhs);
5487 if (old_vr->type != new_vr.type
5488 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5489 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5490 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5491 res = SSA_PROP_INTERESTING;
5492 else
5493 res = SSA_PROP_NOT_INTERESTING;
5494 BITMAP_FREE (new_vr.equiv);
5495 if (res == SSA_PROP_INTERESTING)
5497 *output_p = lhs;
5498 return res;
5502 return res;
5504 break;
5505 default:
5506 break;
5509 /* All other statements produce nothing of interest for VRP, so mark
5510 their outputs varying and prevent further simulation. */
5511 set_defs_to_varying (stmt);
5513 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5516 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5517 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5518 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5519 possible such range. The resulting range is not canonicalized. */
5521 static void
5522 union_ranges (enum value_range_type *vr0type,
5523 tree *vr0min, tree *vr0max,
5524 enum value_range_type vr1type,
5525 tree vr1min, tree vr1max)
5527 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5528 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5530 /* [] is vr0, () is vr1 in the following classification comments. */
5531 if (mineq && maxeq)
5533 /* [( )] */
5534 if (*vr0type == vr1type)
5535 /* Nothing to do for equal ranges. */
5537 else if ((*vr0type == VR_RANGE
5538 && vr1type == VR_ANTI_RANGE)
5539 || (*vr0type == VR_ANTI_RANGE
5540 && vr1type == VR_RANGE))
5542 /* For anti-range with range union the result is varying. */
5543 goto give_up;
5545 else
5546 gcc_unreachable ();
5548 else if (operand_less_p (*vr0max, vr1min) == 1
5549 || operand_less_p (vr1max, *vr0min) == 1)
5551 /* [ ] ( ) or ( ) [ ]
5552 If the ranges have an empty intersection, result of the union
5553 operation is the anti-range or if both are anti-ranges
5554 it covers all. */
5555 if (*vr0type == VR_ANTI_RANGE
5556 && vr1type == VR_ANTI_RANGE)
5557 goto give_up;
5558 else if (*vr0type == VR_ANTI_RANGE
5559 && vr1type == VR_RANGE)
5561 else if (*vr0type == VR_RANGE
5562 && vr1type == VR_ANTI_RANGE)
5564 *vr0type = vr1type;
5565 *vr0min = vr1min;
5566 *vr0max = vr1max;
5568 else if (*vr0type == VR_RANGE
5569 && vr1type == VR_RANGE)
5571 /* The result is the convex hull of both ranges. */
5572 if (operand_less_p (*vr0max, vr1min) == 1)
5574 /* If the result can be an anti-range, create one. */
5575 if (TREE_CODE (*vr0max) == INTEGER_CST
5576 && TREE_CODE (vr1min) == INTEGER_CST
5577 && vrp_val_is_min (*vr0min)
5578 && vrp_val_is_max (vr1max))
5580 tree min = int_const_binop (PLUS_EXPR,
5581 *vr0max,
5582 build_int_cst (TREE_TYPE (*vr0max), 1));
5583 tree max = int_const_binop (MINUS_EXPR,
5584 vr1min,
5585 build_int_cst (TREE_TYPE (vr1min), 1));
5586 if (!operand_less_p (max, min))
5588 *vr0type = VR_ANTI_RANGE;
5589 *vr0min = min;
5590 *vr0max = max;
5592 else
5593 *vr0max = vr1max;
5595 else
5596 *vr0max = vr1max;
5598 else
5600 /* If the result can be an anti-range, create one. */
5601 if (TREE_CODE (vr1max) == INTEGER_CST
5602 && TREE_CODE (*vr0min) == INTEGER_CST
5603 && vrp_val_is_min (vr1min)
5604 && vrp_val_is_max (*vr0max))
5606 tree min = int_const_binop (PLUS_EXPR,
5607 vr1max,
5608 build_int_cst (TREE_TYPE (vr1max), 1));
5609 tree max = int_const_binop (MINUS_EXPR,
5610 *vr0min,
5611 build_int_cst (TREE_TYPE (*vr0min), 1));
5612 if (!operand_less_p (max, min))
5614 *vr0type = VR_ANTI_RANGE;
5615 *vr0min = min;
5616 *vr0max = max;
5618 else
5619 *vr0min = vr1min;
5621 else
5622 *vr0min = vr1min;
5625 else
5626 gcc_unreachable ();
5628 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5629 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5631 /* [ ( ) ] or [( ) ] or [ ( )] */
5632 if (*vr0type == VR_RANGE
5633 && vr1type == VR_RANGE)
5635 else if (*vr0type == VR_ANTI_RANGE
5636 && vr1type == VR_ANTI_RANGE)
5638 *vr0type = vr1type;
5639 *vr0min = vr1min;
5640 *vr0max = vr1max;
5642 else if (*vr0type == VR_ANTI_RANGE
5643 && vr1type == VR_RANGE)
5645 /* Arbitrarily choose the right or left gap. */
5646 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5647 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5648 build_int_cst (TREE_TYPE (vr1min), 1));
5649 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5650 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5651 build_int_cst (TREE_TYPE (vr1max), 1));
5652 else
5653 goto give_up;
5655 else if (*vr0type == VR_RANGE
5656 && vr1type == VR_ANTI_RANGE)
5657 /* The result covers everything. */
5658 goto give_up;
5659 else
5660 gcc_unreachable ();
5662 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5663 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5665 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5666 if (*vr0type == VR_RANGE
5667 && vr1type == VR_RANGE)
5669 *vr0type = vr1type;
5670 *vr0min = vr1min;
5671 *vr0max = vr1max;
5673 else if (*vr0type == VR_ANTI_RANGE
5674 && vr1type == VR_ANTI_RANGE)
5676 else if (*vr0type == VR_RANGE
5677 && vr1type == VR_ANTI_RANGE)
5679 *vr0type = VR_ANTI_RANGE;
5680 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5682 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5683 build_int_cst (TREE_TYPE (*vr0min), 1));
5684 *vr0min = vr1min;
5686 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5688 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5689 build_int_cst (TREE_TYPE (*vr0max), 1));
5690 *vr0max = vr1max;
5692 else
5693 goto give_up;
5695 else if (*vr0type == VR_ANTI_RANGE
5696 && vr1type == VR_RANGE)
5697 /* The result covers everything. */
5698 goto give_up;
5699 else
5700 gcc_unreachable ();
5702 else if ((operand_less_p (vr1min, *vr0max) == 1
5703 || operand_equal_p (vr1min, *vr0max, 0))
5704 && operand_less_p (*vr0min, vr1min) == 1
5705 && operand_less_p (*vr0max, vr1max) == 1)
5707 /* [ ( ] ) or [ ]( ) */
5708 if (*vr0type == VR_RANGE
5709 && vr1type == VR_RANGE)
5710 *vr0max = vr1max;
5711 else if (*vr0type == VR_ANTI_RANGE
5712 && vr1type == VR_ANTI_RANGE)
5713 *vr0min = vr1min;
5714 else if (*vr0type == VR_ANTI_RANGE
5715 && vr1type == VR_RANGE)
5717 if (TREE_CODE (vr1min) == INTEGER_CST)
5718 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5719 build_int_cst (TREE_TYPE (vr1min), 1));
5720 else
5721 goto give_up;
5723 else if (*vr0type == VR_RANGE
5724 && vr1type == VR_ANTI_RANGE)
5726 if (TREE_CODE (*vr0max) == INTEGER_CST)
5728 *vr0type = vr1type;
5729 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5730 build_int_cst (TREE_TYPE (*vr0max), 1));
5731 *vr0max = vr1max;
5733 else
5734 goto give_up;
5736 else
5737 gcc_unreachable ();
5739 else if ((operand_less_p (*vr0min, vr1max) == 1
5740 || operand_equal_p (*vr0min, vr1max, 0))
5741 && operand_less_p (vr1min, *vr0min) == 1
5742 && operand_less_p (vr1max, *vr0max) == 1)
5744 /* ( [ ) ] or ( )[ ] */
5745 if (*vr0type == VR_RANGE
5746 && vr1type == VR_RANGE)
5747 *vr0min = vr1min;
5748 else if (*vr0type == VR_ANTI_RANGE
5749 && vr1type == VR_ANTI_RANGE)
5750 *vr0max = vr1max;
5751 else if (*vr0type == VR_ANTI_RANGE
5752 && vr1type == VR_RANGE)
5754 if (TREE_CODE (vr1max) == INTEGER_CST)
5755 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5756 build_int_cst (TREE_TYPE (vr1max), 1));
5757 else
5758 goto give_up;
5760 else if (*vr0type == VR_RANGE
5761 && vr1type == VR_ANTI_RANGE)
5763 if (TREE_CODE (*vr0min) == INTEGER_CST)
5765 *vr0type = vr1type;
5766 *vr0min = vr1min;
5767 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5768 build_int_cst (TREE_TYPE (*vr0min), 1));
5770 else
5771 goto give_up;
5773 else
5774 gcc_unreachable ();
5776 else
5777 goto give_up;
5779 return;
5781 give_up:
5782 *vr0type = VR_VARYING;
5783 *vr0min = NULL_TREE;
5784 *vr0max = NULL_TREE;
5787 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5788 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5789 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5790 possible such range. The resulting range is not canonicalized. */
5792 static void
5793 intersect_ranges (enum value_range_type *vr0type,
5794 tree *vr0min, tree *vr0max,
5795 enum value_range_type vr1type,
5796 tree vr1min, tree vr1max)
5798 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5799 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5801 /* [] is vr0, () is vr1 in the following classification comments. */
5802 if (mineq && maxeq)
5804 /* [( )] */
5805 if (*vr0type == vr1type)
5806 /* Nothing to do for equal ranges. */
5808 else if ((*vr0type == VR_RANGE
5809 && vr1type == VR_ANTI_RANGE)
5810 || (*vr0type == VR_ANTI_RANGE
5811 && vr1type == VR_RANGE))
5813 /* For anti-range with range intersection the result is empty. */
5814 *vr0type = VR_UNDEFINED;
5815 *vr0min = NULL_TREE;
5816 *vr0max = NULL_TREE;
5818 else
5819 gcc_unreachable ();
5821 else if (operand_less_p (*vr0max, vr1min) == 1
5822 || operand_less_p (vr1max, *vr0min) == 1)
5824 /* [ ] ( ) or ( ) [ ]
5825 If the ranges have an empty intersection, the result of the
5826 intersect operation is the range for intersecting an
5827 anti-range with a range or empty when intersecting two ranges. */
5828 if (*vr0type == VR_RANGE
5829 && vr1type == VR_ANTI_RANGE)
5831 else if (*vr0type == VR_ANTI_RANGE
5832 && vr1type == VR_RANGE)
5834 *vr0type = vr1type;
5835 *vr0min = vr1min;
5836 *vr0max = vr1max;
5838 else if (*vr0type == VR_RANGE
5839 && vr1type == VR_RANGE)
5841 *vr0type = VR_UNDEFINED;
5842 *vr0min = NULL_TREE;
5843 *vr0max = NULL_TREE;
5845 else if (*vr0type == VR_ANTI_RANGE
5846 && vr1type == VR_ANTI_RANGE)
5848 /* If the anti-ranges are adjacent to each other merge them. */
5849 if (TREE_CODE (*vr0max) == INTEGER_CST
5850 && TREE_CODE (vr1min) == INTEGER_CST
5851 && operand_less_p (*vr0max, vr1min) == 1
5852 && integer_onep (int_const_binop (MINUS_EXPR,
5853 vr1min, *vr0max)))
5854 *vr0max = vr1max;
5855 else if (TREE_CODE (vr1max) == INTEGER_CST
5856 && TREE_CODE (*vr0min) == INTEGER_CST
5857 && operand_less_p (vr1max, *vr0min) == 1
5858 && integer_onep (int_const_binop (MINUS_EXPR,
5859 *vr0min, vr1max)))
5860 *vr0min = vr1min;
5861 /* Else arbitrarily take VR0. */
5864 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5865 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5867 /* [ ( ) ] or [( ) ] or [ ( )] */
5868 if (*vr0type == VR_RANGE
5869 && vr1type == VR_RANGE)
5871 /* If both are ranges the result is the inner one. */
5872 *vr0type = vr1type;
5873 *vr0min = vr1min;
5874 *vr0max = vr1max;
5876 else if (*vr0type == VR_RANGE
5877 && vr1type == VR_ANTI_RANGE)
5879 /* Choose the right gap if the left one is empty. */
5880 if (mineq)
5882 if (TREE_CODE (vr1max) != INTEGER_CST)
5883 *vr0min = vr1max;
5884 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5885 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5886 *vr0min
5887 = int_const_binop (MINUS_EXPR, vr1max,
5888 build_int_cst (TREE_TYPE (vr1max), -1));
5889 else
5890 *vr0min
5891 = int_const_binop (PLUS_EXPR, vr1max,
5892 build_int_cst (TREE_TYPE (vr1max), 1));
5894 /* Choose the left gap if the right one is empty. */
5895 else if (maxeq)
5897 if (TREE_CODE (vr1min) != INTEGER_CST)
5898 *vr0max = vr1min;
5899 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5900 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5901 *vr0max
5902 = int_const_binop (PLUS_EXPR, vr1min,
5903 build_int_cst (TREE_TYPE (vr1min), -1));
5904 else
5905 *vr0max
5906 = int_const_binop (MINUS_EXPR, vr1min,
5907 build_int_cst (TREE_TYPE (vr1min), 1));
5909 /* Choose the anti-range if the range is effectively varying. */
5910 else if (vrp_val_is_min (*vr0min)
5911 && vrp_val_is_max (*vr0max))
5913 *vr0type = vr1type;
5914 *vr0min = vr1min;
5915 *vr0max = vr1max;
5917 /* Else choose the range. */
5919 else if (*vr0type == VR_ANTI_RANGE
5920 && vr1type == VR_ANTI_RANGE)
5921 /* If both are anti-ranges the result is the outer one. */
5923 else if (*vr0type == VR_ANTI_RANGE
5924 && vr1type == VR_RANGE)
5926 /* The intersection is empty. */
5927 *vr0type = VR_UNDEFINED;
5928 *vr0min = NULL_TREE;
5929 *vr0max = NULL_TREE;
5931 else
5932 gcc_unreachable ();
5934 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5935 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5937 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5938 if (*vr0type == VR_RANGE
5939 && vr1type == VR_RANGE)
5940 /* Choose the inner range. */
5942 else if (*vr0type == VR_ANTI_RANGE
5943 && vr1type == VR_RANGE)
5945 /* Choose the right gap if the left is empty. */
5946 if (mineq)
5948 *vr0type = VR_RANGE;
5949 if (TREE_CODE (*vr0max) != INTEGER_CST)
5950 *vr0min = *vr0max;
5951 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5952 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5953 *vr0min
5954 = int_const_binop (MINUS_EXPR, *vr0max,
5955 build_int_cst (TREE_TYPE (*vr0max), -1));
5956 else
5957 *vr0min
5958 = int_const_binop (PLUS_EXPR, *vr0max,
5959 build_int_cst (TREE_TYPE (*vr0max), 1));
5960 *vr0max = vr1max;
5962 /* Choose the left gap if the right is empty. */
5963 else if (maxeq)
5965 *vr0type = VR_RANGE;
5966 if (TREE_CODE (*vr0min) != INTEGER_CST)
5967 *vr0max = *vr0min;
5968 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5969 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5970 *vr0max
5971 = int_const_binop (PLUS_EXPR, *vr0min,
5972 build_int_cst (TREE_TYPE (*vr0min), -1));
5973 else
5974 *vr0max
5975 = int_const_binop (MINUS_EXPR, *vr0min,
5976 build_int_cst (TREE_TYPE (*vr0min), 1));
5977 *vr0min = vr1min;
5979 /* Choose the anti-range if the range is effectively varying. */
5980 else if (vrp_val_is_min (vr1min)
5981 && vrp_val_is_max (vr1max))
5983 /* Choose the anti-range if it is ~[0,0], that range is special
5984 enough to special case when vr1's range is relatively wide. */
5985 else if (*vr0min == *vr0max
5986 && integer_zerop (*vr0min)
5987 && (TYPE_PRECISION (TREE_TYPE (*vr0min))
5988 == TYPE_PRECISION (ptr_type_node))
5989 && TREE_CODE (vr1max) == INTEGER_CST
5990 && TREE_CODE (vr1min) == INTEGER_CST
5991 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5992 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5994 /* Else choose the range. */
5995 else
5997 *vr0type = vr1type;
5998 *vr0min = vr1min;
5999 *vr0max = vr1max;
6002 else if (*vr0type == VR_ANTI_RANGE
6003 && vr1type == VR_ANTI_RANGE)
6005 /* If both are anti-ranges the result is the outer one. */
6006 *vr0type = vr1type;
6007 *vr0min = vr1min;
6008 *vr0max = vr1max;
6010 else if (vr1type == VR_ANTI_RANGE
6011 && *vr0type == VR_RANGE)
6013 /* The intersection is empty. */
6014 *vr0type = VR_UNDEFINED;
6015 *vr0min = NULL_TREE;
6016 *vr0max = NULL_TREE;
6018 else
6019 gcc_unreachable ();
6021 else if ((operand_less_p (vr1min, *vr0max) == 1
6022 || operand_equal_p (vr1min, *vr0max, 0))
6023 && operand_less_p (*vr0min, vr1min) == 1)
6025 /* [ ( ] ) or [ ]( ) */
6026 if (*vr0type == VR_ANTI_RANGE
6027 && vr1type == VR_ANTI_RANGE)
6028 *vr0max = vr1max;
6029 else if (*vr0type == VR_RANGE
6030 && vr1type == VR_RANGE)
6031 *vr0min = vr1min;
6032 else if (*vr0type == VR_RANGE
6033 && vr1type == VR_ANTI_RANGE)
6035 if (TREE_CODE (vr1min) == INTEGER_CST)
6036 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
6037 build_int_cst (TREE_TYPE (vr1min), 1));
6038 else
6039 *vr0max = vr1min;
6041 else if (*vr0type == VR_ANTI_RANGE
6042 && vr1type == VR_RANGE)
6044 *vr0type = VR_RANGE;
6045 if (TREE_CODE (*vr0max) == INTEGER_CST)
6046 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
6047 build_int_cst (TREE_TYPE (*vr0max), 1));
6048 else
6049 *vr0min = *vr0max;
6050 *vr0max = vr1max;
6052 else
6053 gcc_unreachable ();
6055 else if ((operand_less_p (*vr0min, vr1max) == 1
6056 || operand_equal_p (*vr0min, vr1max, 0))
6057 && operand_less_p (vr1min, *vr0min) == 1)
6059 /* ( [ ) ] or ( )[ ] */
6060 if (*vr0type == VR_ANTI_RANGE
6061 && vr1type == VR_ANTI_RANGE)
6062 *vr0min = vr1min;
6063 else if (*vr0type == VR_RANGE
6064 && vr1type == VR_RANGE)
6065 *vr0max = vr1max;
6066 else if (*vr0type == VR_RANGE
6067 && vr1type == VR_ANTI_RANGE)
6069 if (TREE_CODE (vr1max) == INTEGER_CST)
6070 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6071 build_int_cst (TREE_TYPE (vr1max), 1));
6072 else
6073 *vr0min = vr1max;
6075 else if (*vr0type == VR_ANTI_RANGE
6076 && vr1type == VR_RANGE)
6078 *vr0type = VR_RANGE;
6079 if (TREE_CODE (*vr0min) == INTEGER_CST)
6080 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6081 build_int_cst (TREE_TYPE (*vr0min), 1));
6082 else
6083 *vr0max = *vr0min;
6084 *vr0min = vr1min;
6086 else
6087 gcc_unreachable ();
6090 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6091 result for the intersection. That's always a conservative
6092 correct estimate unless VR1 is a constant singleton range
6093 in which case we choose that. */
6094 if (vr1type == VR_RANGE
6095 && is_gimple_min_invariant (vr1min)
6096 && vrp_operand_equal_p (vr1min, vr1max))
6098 *vr0type = vr1type;
6099 *vr0min = vr1min;
6100 *vr0max = vr1max;
6103 return;
6107 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6108 in *VR0. This may not be the smallest possible such range. */
6110 static void
6111 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
6113 value_range saved;
6115 /* If either range is VR_VARYING the other one wins. */
6116 if (vr1->type == VR_VARYING)
6117 return;
6118 if (vr0->type == VR_VARYING)
6120 copy_value_range (vr0, vr1);
6121 return;
6124 /* When either range is VR_UNDEFINED the resulting range is
6125 VR_UNDEFINED, too. */
6126 if (vr0->type == VR_UNDEFINED)
6127 return;
6128 if (vr1->type == VR_UNDEFINED)
6130 set_value_range_to_undefined (vr0);
6131 return;
6134 /* Save the original vr0 so we can return it as conservative intersection
6135 result when our worker turns things to varying. */
6136 saved = *vr0;
6137 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
6138 vr1->type, vr1->min, vr1->max);
6139 /* Make sure to canonicalize the result though as the inversion of a
6140 VR_RANGE can still be a VR_RANGE. */
6141 set_and_canonicalize_value_range (vr0, vr0->type,
6142 vr0->min, vr0->max, vr0->equiv);
6143 /* If that failed, use the saved original VR0. */
6144 if (vr0->type == VR_VARYING)
6146 *vr0 = saved;
6147 return;
6149 /* If the result is VR_UNDEFINED there is no need to mess with
6150 the equivalencies. */
6151 if (vr0->type == VR_UNDEFINED)
6152 return;
6154 /* The resulting set of equivalences for range intersection is the union of
6155 the two sets. */
6156 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6157 bitmap_ior_into (vr0->equiv, vr1->equiv);
6158 else if (vr1->equiv && !vr0->equiv)
6160 /* All equivalence bitmaps are allocated from the same obstack. So
6161 we can use the obstack associated with VR to allocate vr0->equiv. */
6162 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
6163 bitmap_copy (vr0->equiv, vr1->equiv);
6167 void
6168 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
6170 if (dump_file && (dump_flags & TDF_DETAILS))
6172 fprintf (dump_file, "Intersecting\n ");
6173 dump_value_range (dump_file, vr0);
6174 fprintf (dump_file, "\nand\n ");
6175 dump_value_range (dump_file, vr1);
6176 fprintf (dump_file, "\n");
6178 vrp_intersect_ranges_1 (vr0, vr1);
6179 if (dump_file && (dump_flags & TDF_DETAILS))
6181 fprintf (dump_file, "to\n ");
6182 dump_value_range (dump_file, vr0);
6183 fprintf (dump_file, "\n");
6187 /* Meet operation for value ranges. Given two value ranges VR0 and
6188 VR1, store in VR0 a range that contains both VR0 and VR1. This
6189 may not be the smallest possible such range. */
6191 static void
6192 vrp_meet_1 (value_range *vr0, const value_range *vr1)
6194 value_range saved;
6196 if (vr0->type == VR_UNDEFINED)
6198 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
6199 return;
6202 if (vr1->type == VR_UNDEFINED)
6204 /* VR0 already has the resulting range. */
6205 return;
6208 if (vr0->type == VR_VARYING)
6210 /* Nothing to do. VR0 already has the resulting range. */
6211 return;
6214 if (vr1->type == VR_VARYING)
6216 set_value_range_to_varying (vr0);
6217 return;
6220 saved = *vr0;
6221 union_ranges (&vr0->type, &vr0->min, &vr0->max,
6222 vr1->type, vr1->min, vr1->max);
6223 if (vr0->type == VR_VARYING)
6225 /* Failed to find an efficient meet. Before giving up and setting
6226 the result to VARYING, see if we can at least derive a useful
6227 anti-range. FIXME, all this nonsense about distinguishing
6228 anti-ranges from ranges is necessary because of the odd
6229 semantics of range_includes_zero_p and friends. */
6230 if (((saved.type == VR_RANGE
6231 && range_includes_zero_p (saved.min, saved.max) == 0)
6232 || (saved.type == VR_ANTI_RANGE
6233 && range_includes_zero_p (saved.min, saved.max) == 1))
6234 && ((vr1->type == VR_RANGE
6235 && range_includes_zero_p (vr1->min, vr1->max) == 0)
6236 || (vr1->type == VR_ANTI_RANGE
6237 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
6239 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
6241 /* Since this meet operation did not result from the meeting of
6242 two equivalent names, VR0 cannot have any equivalences. */
6243 if (vr0->equiv)
6244 bitmap_clear (vr0->equiv);
6245 return;
6248 set_value_range_to_varying (vr0);
6249 return;
6251 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
6252 vr0->equiv);
6253 if (vr0->type == VR_VARYING)
6254 return;
6256 /* The resulting set of equivalences is always the intersection of
6257 the two sets. */
6258 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6259 bitmap_and_into (vr0->equiv, vr1->equiv);
6260 else if (vr0->equiv && !vr1->equiv)
6261 bitmap_clear (vr0->equiv);
6264 void
6265 vrp_meet (value_range *vr0, const value_range *vr1)
6267 if (dump_file && (dump_flags & TDF_DETAILS))
6269 fprintf (dump_file, "Meeting\n ");
6270 dump_value_range (dump_file, vr0);
6271 fprintf (dump_file, "\nand\n ");
6272 dump_value_range (dump_file, vr1);
6273 fprintf (dump_file, "\n");
6275 vrp_meet_1 (vr0, vr1);
6276 if (dump_file && (dump_flags & TDF_DETAILS))
6278 fprintf (dump_file, "to\n ");
6279 dump_value_range (dump_file, vr0);
6280 fprintf (dump_file, "\n");
6285 /* Visit all arguments for PHI node PHI that flow through executable
6286 edges. If a valid value range can be derived from all the incoming
6287 value ranges, set a new range for the LHS of PHI. */
6289 enum ssa_prop_result
6290 vrp_prop::visit_phi (gphi *phi)
6292 tree lhs = PHI_RESULT (phi);
6293 value_range vr_result = VR_INITIALIZER;
6294 extract_range_from_phi_node (phi, &vr_result);
6295 if (update_value_range (lhs, &vr_result))
6297 if (dump_file && (dump_flags & TDF_DETAILS))
6299 fprintf (dump_file, "Found new range for ");
6300 print_generic_expr (dump_file, lhs);
6301 fprintf (dump_file, ": ");
6302 dump_value_range (dump_file, &vr_result);
6303 fprintf (dump_file, "\n");
6306 if (vr_result.type == VR_VARYING)
6307 return SSA_PROP_VARYING;
6309 return SSA_PROP_INTERESTING;
6312 /* Nothing changed, don't add outgoing edges. */
6313 return SSA_PROP_NOT_INTERESTING;
6316 class vrp_folder : public substitute_and_fold_engine
6318 public:
6319 tree get_value (tree) FINAL OVERRIDE;
6320 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6321 bool fold_predicate_in (gimple_stmt_iterator *);
6323 class vr_values *vr_values;
6325 /* Delegators. */
6326 tree vrp_evaluate_conditional (tree_code code, tree op0,
6327 tree op1, gimple *stmt)
6328 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6329 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6330 { return vr_values->simplify_stmt_using_ranges (gsi); }
6331 tree op_with_constant_singleton_value_range (tree op)
6332 { return vr_values->op_with_constant_singleton_value_range (op); }
6335 /* If the statement pointed by SI has a predicate whose value can be
6336 computed using the value range information computed by VRP, compute
6337 its value and return true. Otherwise, return false. */
6339 bool
6340 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6342 bool assignment_p = false;
6343 tree val;
6344 gimple *stmt = gsi_stmt (*si);
6346 if (is_gimple_assign (stmt)
6347 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6349 assignment_p = true;
6350 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6351 gimple_assign_rhs1 (stmt),
6352 gimple_assign_rhs2 (stmt),
6353 stmt);
6355 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6356 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6357 gimple_cond_lhs (cond_stmt),
6358 gimple_cond_rhs (cond_stmt),
6359 stmt);
6360 else
6361 return false;
6363 if (val)
6365 if (assignment_p)
6366 val = fold_convert (gimple_expr_type (stmt), val);
6368 if (dump_file)
6370 fprintf (dump_file, "Folding predicate ");
6371 print_gimple_expr (dump_file, stmt, 0);
6372 fprintf (dump_file, " to ");
6373 print_generic_expr (dump_file, val);
6374 fprintf (dump_file, "\n");
6377 if (is_gimple_assign (stmt))
6378 gimple_assign_set_rhs_from_tree (si, val);
6379 else
6381 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6382 gcond *cond_stmt = as_a <gcond *> (stmt);
6383 if (integer_zerop (val))
6384 gimple_cond_make_false (cond_stmt);
6385 else if (integer_onep (val))
6386 gimple_cond_make_true (cond_stmt);
6387 else
6388 gcc_unreachable ();
6391 return true;
6394 return false;
6397 /* Callback for substitute_and_fold folding the stmt at *SI. */
6399 bool
6400 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6402 if (fold_predicate_in (si))
6403 return true;
6405 return simplify_stmt_using_ranges (si);
6408 /* If OP has a value range with a single constant value return that,
6409 otherwise return NULL_TREE. This returns OP itself if OP is a
6410 constant.
6412 Implemented as a pure wrapper right now, but this will change. */
6414 tree
6415 vrp_folder::get_value (tree op)
6417 return op_with_constant_singleton_value_range (op);
6420 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6421 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6422 BB. If no such ASSERT_EXPR is found, return OP. */
6424 static tree
6425 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6427 imm_use_iterator imm_iter;
6428 gimple *use_stmt;
6429 use_operand_p use_p;
6431 if (TREE_CODE (op) == SSA_NAME)
6433 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6435 use_stmt = USE_STMT (use_p);
6436 if (use_stmt != stmt
6437 && gimple_assign_single_p (use_stmt)
6438 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6439 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6440 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6441 return gimple_assign_lhs (use_stmt);
6444 return op;
6447 /* A hack. */
6448 static class vr_values *x_vr_values;
6450 /* A trivial wrapper so that we can present the generic jump threading
6451 code with a simple API for simplifying statements. STMT is the
6452 statement we want to simplify, WITHIN_STMT provides the location
6453 for any overflow warnings. */
6455 static tree
6456 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6457 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6458 basic_block bb)
6460 /* First see if the conditional is in the hash table. */
6461 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6462 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6463 return cached_lhs;
6465 vr_values *vr_values = x_vr_values;
6466 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6468 tree op0 = gimple_cond_lhs (cond_stmt);
6469 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6471 tree op1 = gimple_cond_rhs (cond_stmt);
6472 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6474 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6475 op0, op1, within_stmt);
6478 /* We simplify a switch statement by trying to determine which case label
6479 will be taken. If we are successful then we return the corresponding
6480 CASE_LABEL_EXPR. */
6481 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6483 tree op = gimple_switch_index (switch_stmt);
6484 if (TREE_CODE (op) != SSA_NAME)
6485 return NULL_TREE;
6487 op = lhs_of_dominating_assert (op, bb, stmt);
6489 value_range *vr = vr_values->get_value_range (op);
6490 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6491 || symbolic_range_p (vr))
6492 return NULL_TREE;
6494 if (vr->type == VR_RANGE)
6496 size_t i, j;
6497 /* Get the range of labels that contain a part of the operand's
6498 value range. */
6499 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6501 /* Is there only one such label? */
6502 if (i == j)
6504 tree label = gimple_switch_label (switch_stmt, i);
6506 /* The i'th label will be taken only if the value range of the
6507 operand is entirely within the bounds of this label. */
6508 if (CASE_HIGH (label) != NULL_TREE
6509 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6510 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6511 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6512 && tree_int_cst_equal (vr->min, vr->max)))
6513 return label;
6516 /* If there are no such labels then the default label will be
6517 taken. */
6518 if (i > j)
6519 return gimple_switch_label (switch_stmt, 0);
6522 if (vr->type == VR_ANTI_RANGE)
6524 unsigned n = gimple_switch_num_labels (switch_stmt);
6525 tree min_label = gimple_switch_label (switch_stmt, 1);
6526 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6528 /* The default label will be taken only if the anti-range of the
6529 operand is entirely outside the bounds of all the (non-default)
6530 case labels. */
6531 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6532 && (CASE_HIGH (max_label) != NULL_TREE
6533 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6534 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6535 return gimple_switch_label (switch_stmt, 0);
6538 return NULL_TREE;
6541 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6543 value_range new_vr = VR_INITIALIZER;
6544 tree lhs = gimple_assign_lhs (assign_stmt);
6546 if (TREE_CODE (lhs) == SSA_NAME
6547 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6548 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6550 vr_values->extract_range_from_assignment (&new_vr, assign_stmt);
6551 if (range_int_cst_singleton_p (&new_vr))
6552 return new_vr.min;
6556 return NULL_TREE;
6559 class vrp_dom_walker : public dom_walker
6561 public:
6562 vrp_dom_walker (cdi_direction direction,
6563 class const_and_copies *const_and_copies,
6564 class avail_exprs_stack *avail_exprs_stack)
6565 : dom_walker (direction, true),
6566 m_const_and_copies (const_and_copies),
6567 m_avail_exprs_stack (avail_exprs_stack),
6568 m_dummy_cond (NULL) {}
6570 virtual edge before_dom_children (basic_block);
6571 virtual void after_dom_children (basic_block);
6573 class vr_values *vr_values;
6575 private:
6576 class const_and_copies *m_const_and_copies;
6577 class avail_exprs_stack *m_avail_exprs_stack;
6579 gcond *m_dummy_cond;
6583 /* Called before processing dominator children of BB. We want to look
6584 at ASSERT_EXPRs and record information from them in the appropriate
6585 tables.
6587 We could look at other statements here. It's not seen as likely
6588 to significantly increase the jump threads we discover. */
6590 edge
6591 vrp_dom_walker::before_dom_children (basic_block bb)
6593 gimple_stmt_iterator gsi;
6595 m_avail_exprs_stack->push_marker ();
6596 m_const_and_copies->push_marker ();
6597 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6599 gimple *stmt = gsi_stmt (gsi);
6600 if (gimple_assign_single_p (stmt)
6601 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6603 tree rhs1 = gimple_assign_rhs1 (stmt);
6604 tree cond = TREE_OPERAND (rhs1, 1);
6605 tree inverted = invert_truthvalue (cond);
6606 vec<cond_equivalence> p;
6607 p.create (3);
6608 record_conditions (&p, cond, inverted);
6609 for (unsigned int i = 0; i < p.length (); i++)
6610 m_avail_exprs_stack->record_cond (&p[i]);
6612 tree lhs = gimple_assign_lhs (stmt);
6613 m_const_and_copies->record_const_or_copy (lhs,
6614 TREE_OPERAND (rhs1, 0));
6615 p.release ();
6616 continue;
6618 break;
6620 return NULL;
6623 /* Called after processing dominator children of BB. This is where we
6624 actually call into the threader. */
6625 void
6626 vrp_dom_walker::after_dom_children (basic_block bb)
6628 if (!m_dummy_cond)
6629 m_dummy_cond = gimple_build_cond (NE_EXPR,
6630 integer_zero_node, integer_zero_node,
6631 NULL, NULL);
6633 x_vr_values = vr_values;
6634 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6635 m_avail_exprs_stack,
6636 simplify_stmt_for_jump_threading);
6637 x_vr_values = NULL;
6639 m_avail_exprs_stack->pop_to_marker ();
6640 m_const_and_copies->pop_to_marker ();
6643 /* Blocks which have more than one predecessor and more than
6644 one successor present jump threading opportunities, i.e.,
6645 when the block is reached from a specific predecessor, we
6646 may be able to determine which of the outgoing edges will
6647 be traversed. When this optimization applies, we are able
6648 to avoid conditionals at runtime and we may expose secondary
6649 optimization opportunities.
6651 This routine is effectively a driver for the generic jump
6652 threading code. It basically just presents the generic code
6653 with edges that may be suitable for jump threading.
6655 Unlike DOM, we do not iterate VRP if jump threading was successful.
6656 While iterating may expose new opportunities for VRP, it is expected
6657 those opportunities would be very limited and the compile time cost
6658 to expose those opportunities would be significant.
6660 As jump threading opportunities are discovered, they are registered
6661 for later realization. */
6663 static void
6664 identify_jump_threads (class vr_values *vr_values)
6666 int i;
6667 edge e;
6669 /* Ugh. When substituting values earlier in this pass we can
6670 wipe the dominance information. So rebuild the dominator
6671 information as we need it within the jump threading code. */
6672 calculate_dominance_info (CDI_DOMINATORS);
6674 /* We do not allow VRP information to be used for jump threading
6675 across a back edge in the CFG. Otherwise it becomes too
6676 difficult to avoid eliminating loop exit tests. Of course
6677 EDGE_DFS_BACK is not accurate at this time so we have to
6678 recompute it. */
6679 mark_dfs_back_edges ();
6681 /* Do not thread across edges we are about to remove. Just marking
6682 them as EDGE_IGNORE will do. */
6683 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6684 e->flags |= EDGE_IGNORE;
6686 /* Allocate our unwinder stack to unwind any temporary equivalences
6687 that might be recorded. */
6688 const_and_copies *equiv_stack = new const_and_copies ();
6690 hash_table<expr_elt_hasher> *avail_exprs
6691 = new hash_table<expr_elt_hasher> (1024);
6692 avail_exprs_stack *avail_exprs_stack
6693 = new class avail_exprs_stack (avail_exprs);
6695 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6696 walker.vr_values = vr_values;
6697 walker.walk (cfun->cfg->x_entry_block_ptr);
6699 /* Clear EDGE_IGNORE. */
6700 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6701 e->flags &= ~EDGE_IGNORE;
6703 /* We do not actually update the CFG or SSA graphs at this point as
6704 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6705 handle ASSERT_EXPRs gracefully. */
6706 delete equiv_stack;
6707 delete avail_exprs;
6708 delete avail_exprs_stack;
6711 /* Traverse all the blocks folding conditionals with known ranges. */
6713 void
6714 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6716 size_t i;
6718 vr_values.values_propagated = true;
6720 if (dump_file)
6722 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6723 vr_values.dump_all_value_ranges (dump_file);
6724 fprintf (dump_file, "\n");
6727 /* Set value range to non pointer SSA_NAMEs. */
6728 for (i = 0; i < num_ssa_names; i++)
6730 tree name = ssa_name (i);
6731 if (!name)
6732 continue;
6734 value_range *vr = get_value_range (name);
6735 if (!name
6736 || (vr->type == VR_VARYING)
6737 || (vr->type == VR_UNDEFINED)
6738 || (TREE_CODE (vr->min) != INTEGER_CST)
6739 || (TREE_CODE (vr->max) != INTEGER_CST))
6740 continue;
6742 if (POINTER_TYPE_P (TREE_TYPE (name))
6743 && ((vr->type == VR_RANGE
6744 && range_includes_zero_p (vr->min, vr->max) == 0)
6745 || (vr->type == VR_ANTI_RANGE
6746 && range_includes_zero_p (vr->min, vr->max) == 1)))
6747 set_ptr_nonnull (name);
6748 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6749 set_range_info (name, vr->type,
6750 wi::to_wide (vr->min),
6751 wi::to_wide (vr->max));
6754 class vrp_folder vrp_folder;
6755 vrp_folder.vr_values = &vr_values;
6756 vrp_folder.substitute_and_fold ();
6758 if (warn_array_bounds && warn_array_bounds_p)
6759 check_all_array_refs ();
6762 /* Main entry point to VRP (Value Range Propagation). This pass is
6763 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6764 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6765 Programming Language Design and Implementation, pp. 67-78, 1995.
6766 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6768 This is essentially an SSA-CCP pass modified to deal with ranges
6769 instead of constants.
6771 While propagating ranges, we may find that two or more SSA name
6772 have equivalent, though distinct ranges. For instance,
6774 1 x_9 = p_3->a;
6775 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6776 3 if (p_4 == q_2)
6777 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6778 5 endif
6779 6 if (q_2)
6781 In the code above, pointer p_5 has range [q_2, q_2], but from the
6782 code we can also determine that p_5 cannot be NULL and, if q_2 had
6783 a non-varying range, p_5's range should also be compatible with it.
6785 These equivalences are created by two expressions: ASSERT_EXPR and
6786 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6787 result of another assertion, then we can use the fact that p_5 and
6788 p_4 are equivalent when evaluating p_5's range.
6790 Together with value ranges, we also propagate these equivalences
6791 between names so that we can take advantage of information from
6792 multiple ranges when doing final replacement. Note that this
6793 equivalency relation is transitive but not symmetric.
6795 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6796 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6797 in contexts where that assertion does not hold (e.g., in line 6).
6799 TODO, the main difference between this pass and Patterson's is that
6800 we do not propagate edge probabilities. We only compute whether
6801 edges can be taken or not. That is, instead of having a spectrum
6802 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6803 DON'T KNOW. In the future, it may be worthwhile to propagate
6804 probabilities to aid branch prediction. */
6806 static unsigned int
6807 execute_vrp (bool warn_array_bounds_p)
6809 int i;
6810 edge e;
6811 switch_update *su;
6813 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6814 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6815 scev_initialize ();
6817 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6818 Inserting assertions may split edges which will invalidate
6819 EDGE_DFS_BACK. */
6820 insert_range_assertions ();
6822 to_remove_edges.create (10);
6823 to_update_switch_stmts.create (5);
6824 threadedge_initialize_values ();
6826 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6827 mark_dfs_back_edges ();
6829 class vrp_prop vrp_prop;
6830 vrp_prop.vrp_initialize ();
6831 vrp_prop.ssa_propagate ();
6832 vrp_prop.vrp_finalize (warn_array_bounds_p);
6834 /* We must identify jump threading opportunities before we release
6835 the datastructures built by VRP. */
6836 identify_jump_threads (&vrp_prop.vr_values);
6838 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6839 was set by a type conversion can often be rewritten to use the
6840 RHS of the type conversion.
6842 However, doing so inhibits jump threading through the comparison.
6843 So that transformation is not performed until after jump threading
6844 is complete. */
6845 basic_block bb;
6846 FOR_EACH_BB_FN (bb, cfun)
6848 gimple *last = last_stmt (bb);
6849 if (last && gimple_code (last) == GIMPLE_COND)
6850 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6853 free_numbers_of_iterations_estimates (cfun);
6855 /* ASSERT_EXPRs must be removed before finalizing jump threads
6856 as finalizing jump threads calls the CFG cleanup code which
6857 does not properly handle ASSERT_EXPRs. */
6858 remove_range_assertions ();
6860 /* If we exposed any new variables, go ahead and put them into
6861 SSA form now, before we handle jump threading. This simplifies
6862 interactions between rewriting of _DECL nodes into SSA form
6863 and rewriting SSA_NAME nodes into SSA form after block
6864 duplication and CFG manipulation. */
6865 update_ssa (TODO_update_ssa);
6867 /* We identified all the jump threading opportunities earlier, but could
6868 not transform the CFG at that time. This routine transforms the
6869 CFG and arranges for the dominator tree to be rebuilt if necessary.
6871 Note the SSA graph update will occur during the normal TODO
6872 processing by the pass manager. */
6873 thread_through_all_blocks (false);
6875 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
6876 CFG in a broken state and requires a cfg_cleanup run. */
6877 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6878 remove_edge (e);
6879 /* Update SWITCH_EXPR case label vector. */
6880 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
6882 size_t j;
6883 size_t n = TREE_VEC_LENGTH (su->vec);
6884 tree label;
6885 gimple_switch_set_num_labels (su->stmt, n);
6886 for (j = 0; j < n; j++)
6887 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
6888 /* As we may have replaced the default label with a regular one
6889 make sure to make it a real default label again. This ensures
6890 optimal expansion. */
6891 label = gimple_switch_label (su->stmt, 0);
6892 CASE_LOW (label) = NULL_TREE;
6893 CASE_HIGH (label) = NULL_TREE;
6896 if (to_remove_edges.length () > 0)
6898 free_dominance_info (CDI_DOMINATORS);
6899 loops_state_set (LOOPS_NEED_FIXUP);
6902 to_remove_edges.release ();
6903 to_update_switch_stmts.release ();
6904 threadedge_finalize_values ();
6906 scev_finalize ();
6907 loop_optimizer_finalize ();
6908 return 0;
6911 namespace {
6913 const pass_data pass_data_vrp =
6915 GIMPLE_PASS, /* type */
6916 "vrp", /* name */
6917 OPTGROUP_NONE, /* optinfo_flags */
6918 TV_TREE_VRP, /* tv_id */
6919 PROP_ssa, /* properties_required */
6920 0, /* properties_provided */
6921 0, /* properties_destroyed */
6922 0, /* todo_flags_start */
6923 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6926 class pass_vrp : public gimple_opt_pass
6928 public:
6929 pass_vrp (gcc::context *ctxt)
6930 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6933 /* opt_pass methods: */
6934 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6935 void set_pass_param (unsigned int n, bool param)
6937 gcc_assert (n == 0);
6938 warn_array_bounds_p = param;
6940 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6941 virtual unsigned int execute (function *)
6942 { return execute_vrp (warn_array_bounds_p); }
6944 private:
6945 bool warn_array_bounds_p;
6946 }; // class pass_vrp
6948 } // anon namespace
6950 gimple_opt_pass *
6951 make_pass_vrp (gcc::context *ctxt)
6953 return new pass_vrp (ctxt);