re PR debug/91929 (missing inline subroutine information in build using sin/cos)
[official-gcc.git] / gcc / tree-vrp.c
blobcffa05083400bb2539fed72eed7c695b5c8e6949
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "range-op.h"
72 static bool
73 ranges_from_anti_range (const value_range_base *ar,
74 value_range_base *vr0, value_range_base *vr1,
75 bool handle_pointers = false);
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
81 void
82 value_range::set_equiv (bitmap equiv)
84 if (undefined_p () || varying_p ())
85 equiv = NULL;
86 /* Since updating the equivalence set involves deep copying the
87 bitmaps, only do it if absolutely necessary.
89 All equivalence bitmaps are allocated from the same obstack. So
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */
91 if (m_equiv == NULL
92 && equiv != NULL)
93 m_equiv = BITMAP_ALLOC (equiv->obstack);
95 if (equiv != m_equiv)
97 if (equiv && !bitmap_empty_p (equiv))
98 bitmap_copy (m_equiv, equiv);
99 else
100 bitmap_clear (m_equiv);
104 /* Initialize value_range. */
106 void
107 value_range::set (enum value_range_kind kind, tree min, tree max,
108 bitmap equiv)
110 value_range_base::set (kind, min, max);
111 set_equiv (equiv);
112 if (flag_checking)
113 check ();
116 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
118 set (kind, min, max);
121 value_range::value_range (value_range_kind kind, tree min, tree max,
122 bitmap equiv)
124 m_equiv = NULL;
125 set (kind, min, max, equiv);
128 value_range::value_range (const value_range_base &other)
130 m_equiv = NULL;
131 set (other.kind (), other.min(), other.max (), NULL);
134 value_range_base::value_range_base (tree type)
136 set_varying (type);
139 value_range_base::value_range_base (enum value_range_kind kind,
140 tree type,
141 const wide_int &wmin,
142 const wide_int &wmax)
144 tree min = wide_int_to_tree (type, wmin);
145 tree max = wide_int_to_tree (type, wmax);
146 gcc_checking_assert (kind == VR_RANGE || kind == VR_ANTI_RANGE);
147 set (kind, min, max);
150 value_range_base::value_range_base (tree type,
151 const wide_int &wmin,
152 const wide_int &wmax)
154 tree min = wide_int_to_tree (type, wmin);
155 tree max = wide_int_to_tree (type, wmax);
156 set (VR_RANGE, min, max);
159 value_range_base::value_range_base (tree min, tree max)
161 set (VR_RANGE, min, max);
164 /* Like set, but keep the equivalences in place. */
166 void
167 value_range::update (value_range_kind kind, tree min, tree max)
169 set (kind, min, max,
170 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
173 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
175 Note: The code that avoids the bitmap sharing looks at the existing
176 this->m_equiv, so this function cannot be used to initalize an
177 object. Use the constructors for initialization. */
179 void
180 value_range::deep_copy (const value_range *from)
182 set (from->m_kind, from->min (), from->max (), from->m_equiv);
185 void
186 value_range::move (value_range *from)
188 set (from->m_kind, from->min (), from->max ());
189 m_equiv = from->m_equiv;
190 from->m_equiv = NULL;
193 /* Check the validity of the range. */
195 void
196 value_range_base::check ()
198 switch (m_kind)
200 case VR_RANGE:
201 case VR_ANTI_RANGE:
203 int cmp;
205 gcc_assert (m_min && m_max);
207 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
209 /* Creating ~[-MIN, +MAX] is stupid because that would be
210 the empty set. */
211 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
212 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
214 cmp = compare_values (m_min, m_max);
215 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
216 break;
218 case VR_UNDEFINED:
219 gcc_assert (!min () && !max ());
220 break;
221 case VR_VARYING:
222 gcc_assert (m_min && m_max);
223 break;
224 default:
225 gcc_unreachable ();
229 void
230 value_range::check ()
232 value_range_base::check ();
233 switch (m_kind)
235 case VR_UNDEFINED:
236 case VR_VARYING:
237 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
238 default:;
242 /* Equality operator. We purposely do not overload ==, to avoid
243 confusion with the equality bitmap in the derived value_range
244 class. */
246 bool
247 value_range_base::equal_p (const value_range_base &other) const
249 /* Ignore types for undefined. All undefines are equal. */
250 if (undefined_p ())
251 return m_kind == other.m_kind;
253 return (m_kind == other.m_kind
254 && vrp_operand_equal_p (m_min, other.m_min)
255 && vrp_operand_equal_p (m_max, other.m_max));
258 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
259 IGNORE_EQUIVS is TRUE. */
261 bool
262 value_range::equal_p (const value_range &other, bool ignore_equivs) const
264 return (value_range_base::equal_p (other)
265 && (ignore_equivs
266 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
269 /* Return TRUE if this is a symbolic range. */
271 bool
272 value_range_base::symbolic_p () const
274 return (!varying_p ()
275 && !undefined_p ()
276 && (!is_gimple_min_invariant (m_min)
277 || !is_gimple_min_invariant (m_max)));
280 /* NOTE: This is not the inverse of symbolic_p because the range
281 could also be varying or undefined. Ideally they should be inverse
282 of each other, with varying only applying to symbolics. Varying of
283 constants would be represented as [-MIN, +MAX]. */
285 bool
286 value_range_base::constant_p () const
288 return (!varying_p ()
289 && !undefined_p ()
290 && TREE_CODE (m_min) == INTEGER_CST
291 && TREE_CODE (m_max) == INTEGER_CST);
294 void
295 value_range_base::set_undefined ()
297 m_kind = VR_UNDEFINED;
298 m_min = m_max = NULL;
301 void
302 value_range::set_undefined ()
304 set (VR_UNDEFINED, NULL, NULL, NULL);
307 void
308 value_range_base::set_varying (tree type)
310 m_kind = VR_VARYING;
311 if (supports_type_p (type))
313 m_min = vrp_val_min (type, true);
314 m_max = vrp_val_max (type, true);
316 else
317 /* We can't do anything range-wise with these types. */
318 m_min = m_max = error_mark_node;
321 void
322 value_range::set_varying (tree type)
324 value_range_base::set_varying (type);
325 equiv_clear ();
328 /* Return TRUE if it is possible that range contains VAL. */
330 bool
331 value_range_base::may_contain_p (tree val) const
333 return value_inside_range (val) != 0;
336 void
337 value_range::equiv_clear ()
339 if (m_equiv)
340 bitmap_clear (m_equiv);
343 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
344 bitmap. If no equivalence table has been created, OBSTACK is the
345 obstack to use (NULL for the default obstack).
347 This is the central point where equivalence processing can be
348 turned on/off. */
350 void
351 value_range::equiv_add (const_tree var,
352 const value_range *var_vr,
353 bitmap_obstack *obstack)
355 if (!m_equiv)
356 m_equiv = BITMAP_ALLOC (obstack);
357 unsigned ver = SSA_NAME_VERSION (var);
358 bitmap_set_bit (m_equiv, ver);
359 if (var_vr && var_vr->m_equiv)
360 bitmap_ior_into (m_equiv, var_vr->m_equiv);
363 /* If range is a singleton, place it in RESULT and return TRUE.
364 Note: A singleton can be any gimple invariant, not just constants.
365 So, [&x, &x] counts as a singleton. */
367 bool
368 value_range_base::singleton_p (tree *result) const
370 if (m_kind == VR_ANTI_RANGE)
372 if (nonzero_p ())
374 if (TYPE_PRECISION (type ()) == 1)
376 if (result)
377 *result = m_max;
378 return true;
380 return false;
382 if (num_pairs () == 1)
384 value_range_base vr0, vr1;
385 ranges_from_anti_range (this, &vr0, &vr1, true);
386 return vr0.singleton_p (result);
389 if (m_kind == VR_RANGE
390 && vrp_operand_equal_p (min (), max ())
391 && is_gimple_min_invariant (min ()))
393 if (result)
394 *result = min ();
395 return true;
397 return false;
400 tree
401 value_range_base::type () const
403 gcc_checking_assert (m_min);
404 return TREE_TYPE (min ());
407 void
408 value_range_base::dump (FILE *file) const
410 if (undefined_p ())
411 fprintf (file, "UNDEFINED");
412 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
414 tree ttype = type ();
416 print_generic_expr (file, ttype);
417 fprintf (file, " ");
419 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
421 if (INTEGRAL_TYPE_P (ttype)
422 && !TYPE_UNSIGNED (ttype)
423 && vrp_val_is_min (min ())
424 && TYPE_PRECISION (ttype) != 1)
425 fprintf (file, "-INF");
426 else
427 print_generic_expr (file, min ());
429 fprintf (file, ", ");
431 if (INTEGRAL_TYPE_P (ttype)
432 && vrp_val_is_max (max ())
433 && TYPE_PRECISION (ttype) != 1)
434 fprintf (file, "+INF");
435 else
436 print_generic_expr (file, max ());
438 fprintf (file, "]");
440 else if (varying_p ())
442 print_generic_expr (file, type ());
443 fprintf (file, " VARYING");
445 else
446 gcc_unreachable ();
449 void
450 value_range_base::dump () const
452 dump (stderr);
455 void
456 value_range::dump (FILE *file) const
458 value_range_base::dump (file);
459 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
460 && m_equiv)
462 bitmap_iterator bi;
463 unsigned i, c = 0;
465 fprintf (file, " EQUIVALENCES: { ");
467 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
469 print_generic_expr (file, ssa_name (i));
470 fprintf (file, " ");
471 c++;
474 fprintf (file, "} (%u elements)", c);
478 void
479 value_range::dump () const
481 dump (stderr);
484 void
485 dump_value_range (FILE *file, const value_range *vr)
487 if (!vr)
488 fprintf (file, "[]");
489 else
490 vr->dump (file);
493 void
494 dump_value_range (FILE *file, const value_range_base *vr)
496 if (!vr)
497 fprintf (file, "[]");
498 else
499 vr->dump (file);
502 DEBUG_FUNCTION void
503 debug (const value_range_base *vr)
505 dump_value_range (stderr, vr);
508 DEBUG_FUNCTION void
509 debug (const value_range_base &vr)
511 dump_value_range (stderr, &vr);
514 DEBUG_FUNCTION void
515 debug (const value_range *vr)
517 dump_value_range (stderr, vr);
520 DEBUG_FUNCTION void
521 debug (const value_range &vr)
523 dump_value_range (stderr, &vr);
526 /* Return true if the SSA name NAME is live on the edge E. */
528 static bool
529 live_on_edge (edge e, tree name)
531 return (live[e->dest->index]
532 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
535 /* Location information for ASSERT_EXPRs. Each instance of this
536 structure describes an ASSERT_EXPR for an SSA name. Since a single
537 SSA name may have more than one assertion associated with it, these
538 locations are kept in a linked list attached to the corresponding
539 SSA name. */
540 struct assert_locus
542 /* Basic block where the assertion would be inserted. */
543 basic_block bb;
545 /* Some assertions need to be inserted on an edge (e.g., assertions
546 generated by COND_EXPRs). In those cases, BB will be NULL. */
547 edge e;
549 /* Pointer to the statement that generated this assertion. */
550 gimple_stmt_iterator si;
552 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
553 enum tree_code comp_code;
555 /* Value being compared against. */
556 tree val;
558 /* Expression to compare. */
559 tree expr;
561 /* Next node in the linked list. */
562 assert_locus *next;
565 /* If bit I is present, it means that SSA name N_i has a list of
566 assertions that should be inserted in the IL. */
567 static bitmap need_assert_for;
569 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
570 holds a list of ASSERT_LOCUS_T nodes that describe where
571 ASSERT_EXPRs for SSA name N_I should be inserted. */
572 static assert_locus **asserts_for;
574 /* Return the maximum value for TYPE. */
576 tree
577 vrp_val_max (const_tree type, bool handle_pointers)
579 if (INTEGRAL_TYPE_P (type))
580 return TYPE_MAX_VALUE (type);
581 if (POINTER_TYPE_P (type) && handle_pointers)
583 wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
584 return wide_int_to_tree (const_cast<tree> (type), max);
586 return NULL_TREE;
589 /* Return the minimum value for TYPE. */
591 tree
592 vrp_val_min (const_tree type, bool handle_pointers)
594 if (INTEGRAL_TYPE_P (type))
595 return TYPE_MIN_VALUE (type);
596 if (POINTER_TYPE_P (type) && handle_pointers)
597 return build_zero_cst (const_cast<tree> (type));
598 return NULL_TREE;
601 /* Return whether VAL is equal to the maximum value of its type.
602 We can't do a simple equality comparison with TYPE_MAX_VALUE because
603 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
604 is not == to the integer constant with the same value in the type. */
606 bool
607 vrp_val_is_max (const_tree val, bool handle_pointers)
609 tree type_max = vrp_val_max (TREE_TYPE (val), handle_pointers);
610 return (val == type_max
611 || (type_max != NULL_TREE
612 && operand_equal_p (val, type_max, 0)));
615 /* Return whether VAL is equal to the minimum value of its type. */
617 bool
618 vrp_val_is_min (const_tree val, bool handle_pointers)
620 tree type_min = vrp_val_min (TREE_TYPE (val), handle_pointers);
621 return (val == type_min
622 || (type_min != NULL_TREE
623 && operand_equal_p (val, type_min, 0)));
626 /* VR_TYPE describes a range with mininum value *MIN and maximum
627 value *MAX. Restrict the range to the set of values that have
628 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
629 return the new range type.
631 SGN gives the sign of the values described by the range. */
633 enum value_range_kind
634 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
635 wide_int *min, wide_int *max,
636 const wide_int &nonzero_bits,
637 signop sgn)
639 if (vr_type == VR_ANTI_RANGE)
641 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
642 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
643 to create an inclusive upper bound for A and an inclusive lower
644 bound for B. */
645 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
646 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
648 /* If the calculation of A_MAX wrapped, A is effectively empty
649 and A_MAX is the highest value that satisfies NONZERO_BITS.
650 Likewise if the calculation of B_MIN wrapped, B is effectively
651 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
652 bool a_empty = wi::ge_p (a_max, *min, sgn);
653 bool b_empty = wi::le_p (b_min, *max, sgn);
655 /* If both A and B are empty, there are no valid values. */
656 if (a_empty && b_empty)
657 return VR_UNDEFINED;
659 /* If exactly one of A or B is empty, return a VR_RANGE for the
660 other one. */
661 if (a_empty || b_empty)
663 *min = b_min;
664 *max = a_max;
665 gcc_checking_assert (wi::le_p (*min, *max, sgn));
666 return VR_RANGE;
669 /* Update the VR_ANTI_RANGE bounds. */
670 *min = a_max + 1;
671 *max = b_min - 1;
672 gcc_checking_assert (wi::le_p (*min, *max, sgn));
674 /* Now check whether the excluded range includes any values that
675 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
676 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
678 unsigned int precision = min->get_precision ();
679 *min = wi::min_value (precision, sgn);
680 *max = wi::max_value (precision, sgn);
681 vr_type = VR_RANGE;
684 if (vr_type == VR_RANGE)
686 *max = wi::round_down_for_mask (*max, nonzero_bits);
688 /* Check that the range contains at least one valid value. */
689 if (wi::gt_p (*min, *max, sgn))
690 return VR_UNDEFINED;
692 *min = wi::round_up_for_mask (*min, nonzero_bits);
693 gcc_checking_assert (wi::le_p (*min, *max, sgn));
695 return vr_type;
699 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
700 This means adjusting VRTYPE, MIN and MAX representing the case of a
701 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
702 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
703 In corner cases where MAX+1 or MIN-1 wraps this will fall back
704 to varying.
705 This routine exists to ease canonicalization in the case where we
706 extract ranges from var + CST op limit. */
708 void
709 value_range_base::set (enum value_range_kind kind, tree min, tree max)
711 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
712 if (kind == VR_UNDEFINED)
714 set_undefined ();
715 return;
717 else if (kind == VR_VARYING)
719 gcc_assert (TREE_TYPE (min) == TREE_TYPE (max));
720 tree typ = TREE_TYPE (min);
721 if (supports_type_p (typ))
723 gcc_assert (vrp_val_min (typ, true));
724 gcc_assert (vrp_val_max (typ, true));
726 set_varying (typ);
727 return;
730 /* Nothing to canonicalize for symbolic ranges. */
731 if (TREE_CODE (min) != INTEGER_CST
732 || TREE_CODE (max) != INTEGER_CST)
734 m_kind = kind;
735 m_min = min;
736 m_max = max;
737 return;
740 /* Wrong order for min and max, to swap them and the VR type we need
741 to adjust them. */
742 if (tree_int_cst_lt (max, min))
744 tree one, tmp;
746 /* For one bit precision if max < min, then the swapped
747 range covers all values, so for VR_RANGE it is varying and
748 for VR_ANTI_RANGE empty range, so drop to varying as well. */
749 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
751 set_varying (TREE_TYPE (min));
752 return;
755 one = build_int_cst (TREE_TYPE (min), 1);
756 tmp = int_const_binop (PLUS_EXPR, max, one);
757 max = int_const_binop (MINUS_EXPR, min, one);
758 min = tmp;
760 /* There's one corner case, if we had [C+1, C] before we now have
761 that again. But this represents an empty value range, so drop
762 to varying in this case. */
763 if (tree_int_cst_lt (max, min))
765 set_varying (TREE_TYPE (min));
766 return;
769 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
772 tree type = TREE_TYPE (min);
774 /* Anti-ranges that can be represented as ranges should be so. */
775 if (kind == VR_ANTI_RANGE)
777 /* For -fstrict-enums we may receive out-of-range ranges so consider
778 values < -INF and values > INF as -INF/INF as well. */
779 bool is_min = (INTEGRAL_TYPE_P (type)
780 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
781 bool is_max = (INTEGRAL_TYPE_P (type)
782 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
784 if (is_min && is_max)
786 /* We cannot deal with empty ranges, drop to varying.
787 ??? This could be VR_UNDEFINED instead. */
788 set_varying (type);
789 return;
791 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
792 && (is_min || is_max))
794 /* Non-empty boolean ranges can always be represented
795 as a singleton range. */
796 if (is_min)
797 min = max = vrp_val_max (TREE_TYPE (min));
798 else
799 min = max = vrp_val_min (TREE_TYPE (min));
800 kind = VR_RANGE;
802 else if (is_min
803 /* Allow non-zero pointers to be normalized to [1,MAX]. */
804 || (POINTER_TYPE_P (TREE_TYPE (min))
805 && integer_zerop (min)))
807 tree one = build_int_cst (TREE_TYPE (max), 1);
808 min = int_const_binop (PLUS_EXPR, max, one);
809 max = vrp_val_max (TREE_TYPE (max), true);
810 kind = VR_RANGE;
812 else if (is_max)
814 tree one = build_int_cst (TREE_TYPE (min), 1);
815 max = int_const_binop (MINUS_EXPR, min, one);
816 min = vrp_val_min (TREE_TYPE (min));
817 kind = VR_RANGE;
821 /* Normalize [MIN, MAX] into VARYING and ~[MIN, MAX] into UNDEFINED.
823 Avoid using TYPE_{MIN,MAX}_VALUE because -fstrict-enums can
824 restrict those to a subset of what actually fits in the type.
825 Instead use the extremes of the type precision which will allow
826 compare_range_with_value() to check if a value is inside a range,
827 whereas if we used TYPE_*_VAL, said function would just punt
828 upon seeing a VARYING. */
829 unsigned prec = TYPE_PRECISION (type);
830 signop sign = TYPE_SIGN (type);
831 if (wi::eq_p (wi::to_wide (min), wi::min_value (prec, sign))
832 && wi::eq_p (wi::to_wide (max), wi::max_value (prec, sign)))
834 if (kind == VR_RANGE)
835 set_varying (type);
836 else if (kind == VR_ANTI_RANGE)
837 set_undefined ();
838 else
839 gcc_unreachable ();
840 return;
843 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
844 to make sure VRP iteration terminates, otherwise we can get into
845 oscillations. */
847 m_kind = kind;
848 m_min = min;
849 m_max = max;
850 if (flag_checking)
851 check ();
854 void
855 value_range_base::set (tree val)
857 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
858 if (TREE_OVERFLOW_P (val))
859 val = drop_tree_overflow (val);
860 set (VR_RANGE, val, val);
863 void
864 value_range::set (tree val)
866 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
867 if (TREE_OVERFLOW_P (val))
868 val = drop_tree_overflow (val);
869 set (VR_RANGE, val, val, NULL);
872 /* Set value range VR to a nonzero range of type TYPE. */
874 void
875 value_range_base::set_nonzero (tree type)
877 tree zero = build_int_cst (type, 0);
878 set (VR_ANTI_RANGE, zero, zero);
881 /* Set value range VR to a ZERO range of type TYPE. */
883 void
884 value_range_base::set_zero (tree type)
886 set (build_int_cst (type, 0));
889 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
891 bool
892 vrp_operand_equal_p (const_tree val1, const_tree val2)
894 if (val1 == val2)
895 return true;
896 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
897 return false;
898 return true;
901 /* Return true, if the bitmaps B1 and B2 are equal. */
903 bool
904 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
906 return (b1 == b2
907 || ((!b1 || bitmap_empty_p (b1))
908 && (!b2 || bitmap_empty_p (b2)))
909 || (b1 && b2
910 && bitmap_equal_p (b1, b2)));
913 static bool
914 range_has_numeric_bounds_p (const value_range_base *vr)
916 return (vr->min ()
917 && TREE_CODE (vr->min ()) == INTEGER_CST
918 && TREE_CODE (vr->max ()) == INTEGER_CST);
921 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
922 a singleton. */
924 bool
925 range_int_cst_p (const value_range_base *vr)
927 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
930 /* Return true if VR is a INTEGER_CST singleton. */
932 bool
933 range_int_cst_singleton_p (const value_range_base *vr)
935 return (range_int_cst_p (vr)
936 && tree_int_cst_equal (vr->min (), vr->max ()));
939 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
940 otherwise. We only handle additive operations and set NEG to true if the
941 symbol is negated and INV to the invariant part, if any. */
943 tree
944 get_single_symbol (tree t, bool *neg, tree *inv)
946 bool neg_;
947 tree inv_;
949 *inv = NULL_TREE;
950 *neg = false;
952 if (TREE_CODE (t) == PLUS_EXPR
953 || TREE_CODE (t) == POINTER_PLUS_EXPR
954 || TREE_CODE (t) == MINUS_EXPR)
956 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
958 neg_ = (TREE_CODE (t) == MINUS_EXPR);
959 inv_ = TREE_OPERAND (t, 0);
960 t = TREE_OPERAND (t, 1);
962 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
964 neg_ = false;
965 inv_ = TREE_OPERAND (t, 1);
966 t = TREE_OPERAND (t, 0);
968 else
969 return NULL_TREE;
971 else
973 neg_ = false;
974 inv_ = NULL_TREE;
977 if (TREE_CODE (t) == NEGATE_EXPR)
979 t = TREE_OPERAND (t, 0);
980 neg_ = !neg_;
983 if (TREE_CODE (t) != SSA_NAME)
984 return NULL_TREE;
986 if (inv_ && TREE_OVERFLOW_P (inv_))
987 inv_ = drop_tree_overflow (inv_);
989 *neg = neg_;
990 *inv = inv_;
991 return t;
994 /* The reverse operation: build a symbolic expression with TYPE
995 from symbol SYM, negated according to NEG, and invariant INV. */
997 static tree
998 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
1000 const bool pointer_p = POINTER_TYPE_P (type);
1001 tree t = sym;
1003 if (neg)
1004 t = build1 (NEGATE_EXPR, type, t);
1006 if (integer_zerop (inv))
1007 return t;
1009 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
1012 /* Return
1013 1 if VAL < VAL2
1014 0 if !(VAL < VAL2)
1015 -2 if those are incomparable. */
1017 operand_less_p (tree val, tree val2)
1019 /* LT is folded faster than GE and others. Inline the common case. */
1020 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1021 return tree_int_cst_lt (val, val2);
1022 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
1023 return val == val2 ? 0 : -2;
1024 else
1026 int cmp = compare_values (val, val2);
1027 if (cmp == -1)
1028 return 1;
1029 else if (cmp == 0 || cmp == 1)
1030 return 0;
1031 else
1032 return -2;
1035 return 0;
1038 /* Compare two values VAL1 and VAL2. Return
1040 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1041 -1 if VAL1 < VAL2,
1042 0 if VAL1 == VAL2,
1043 +1 if VAL1 > VAL2, and
1044 +2 if VAL1 != VAL2
1046 This is similar to tree_int_cst_compare but supports pointer values
1047 and values that cannot be compared at compile time.
1049 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1050 true if the return value is only valid if we assume that signed
1051 overflow is undefined. */
1054 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1056 if (val1 == val2)
1057 return 0;
1059 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1060 both integers. */
1061 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1062 == POINTER_TYPE_P (TREE_TYPE (val2)));
1064 /* Convert the two values into the same type. This is needed because
1065 sizetype causes sign extension even for unsigned types. */
1066 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
1067 val2 = fold_convert (TREE_TYPE (val1), val2);
1069 const bool overflow_undefined
1070 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1071 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1072 tree inv1, inv2;
1073 bool neg1, neg2;
1074 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1075 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1077 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1078 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1079 if (sym1 && sym2)
1081 /* Both values must use the same name with the same sign. */
1082 if (sym1 != sym2 || neg1 != neg2)
1083 return -2;
1085 /* [-]NAME + CST == [-]NAME + CST. */
1086 if (inv1 == inv2)
1087 return 0;
1089 /* If overflow is defined we cannot simplify more. */
1090 if (!overflow_undefined)
1091 return -2;
1093 if (strict_overflow_p != NULL
1094 /* Symbolic range building sets TREE_NO_WARNING to declare
1095 that overflow doesn't happen. */
1096 && (!inv1 || !TREE_NO_WARNING (val1))
1097 && (!inv2 || !TREE_NO_WARNING (val2)))
1098 *strict_overflow_p = true;
1100 if (!inv1)
1101 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1102 if (!inv2)
1103 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1105 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1106 TYPE_SIGN (TREE_TYPE (val1)));
1109 const bool cst1 = is_gimple_min_invariant (val1);
1110 const bool cst2 = is_gimple_min_invariant (val2);
1112 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1113 it might be possible to say something depending on the constants. */
1114 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1116 if (!overflow_undefined)
1117 return -2;
1119 if (strict_overflow_p != NULL
1120 /* Symbolic range building sets TREE_NO_WARNING to declare
1121 that overflow doesn't happen. */
1122 && (!sym1 || !TREE_NO_WARNING (val1))
1123 && (!sym2 || !TREE_NO_WARNING (val2)))
1124 *strict_overflow_p = true;
1126 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1127 tree cst = cst1 ? val1 : val2;
1128 tree inv = cst1 ? inv2 : inv1;
1130 /* Compute the difference between the constants. If it overflows or
1131 underflows, this means that we can trivially compare the NAME with
1132 it and, consequently, the two values with each other. */
1133 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1134 if (wi::cmp (0, wi::to_wide (inv), sgn)
1135 != wi::cmp (diff, wi::to_wide (cst), sgn))
1137 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1138 return cst1 ? res : -res;
1141 return -2;
1144 /* We cannot say anything more for non-constants. */
1145 if (!cst1 || !cst2)
1146 return -2;
1148 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1150 /* We cannot compare overflowed values. */
1151 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1152 return -2;
1154 if (TREE_CODE (val1) == INTEGER_CST
1155 && TREE_CODE (val2) == INTEGER_CST)
1156 return tree_int_cst_compare (val1, val2);
1158 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1160 if (known_eq (wi::to_poly_widest (val1),
1161 wi::to_poly_widest (val2)))
1162 return 0;
1163 if (known_lt (wi::to_poly_widest (val1),
1164 wi::to_poly_widest (val2)))
1165 return -1;
1166 if (known_gt (wi::to_poly_widest (val1),
1167 wi::to_poly_widest (val2)))
1168 return 1;
1171 return -2;
1173 else
1175 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1177 /* We cannot compare overflowed values. */
1178 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1179 return -2;
1181 return tree_int_cst_compare (val1, val2);
1184 /* First see if VAL1 and VAL2 are not the same. */
1185 if (operand_equal_p (val1, val2, 0))
1186 return 0;
1188 fold_defer_overflow_warnings ();
1190 /* If VAL1 is a lower address than VAL2, return -1. */
1191 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
1192 if (t && integer_onep (t))
1194 fold_undefer_and_ignore_overflow_warnings ();
1195 return -1;
1198 /* If VAL1 is a higher address than VAL2, return +1. */
1199 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
1200 if (t && integer_onep (t))
1202 fold_undefer_and_ignore_overflow_warnings ();
1203 return 1;
1206 /* If VAL1 is different than VAL2, return +2. */
1207 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1208 fold_undefer_and_ignore_overflow_warnings ();
1209 if (t && integer_onep (t))
1210 return 2;
1212 return -2;
1216 /* Compare values like compare_values_warnv. */
1219 compare_values (tree val1, tree val2)
1221 bool sop;
1222 return compare_values_warnv (val1, val2, &sop);
1226 /* Return 1 if VAL is inside value range.
1227 0 if VAL is not inside value range.
1228 -2 if we cannot tell either way.
1230 Benchmark compile/20001226-1.c compilation time after changing this
1231 function. */
1234 value_range_base::value_inside_range (tree val) const
1236 int cmp1, cmp2;
1238 if (varying_p ())
1239 return 1;
1241 if (undefined_p ())
1242 return 0;
1244 cmp1 = operand_less_p (val, m_min);
1245 if (cmp1 == -2)
1246 return -2;
1247 if (cmp1 == 1)
1248 return m_kind != VR_RANGE;
1250 cmp2 = operand_less_p (m_max, val);
1251 if (cmp2 == -2)
1252 return -2;
1254 if (m_kind == VR_RANGE)
1255 return !cmp2;
1256 else
1257 return !!cmp2;
1260 /* For range [LB, UB] compute two wide_int bit masks.
1262 In the MAY_BE_NONZERO bit mask, if some bit is unset, it means that
1263 for all numbers in the range the bit is 0, otherwise it might be 0
1264 or 1.
1266 In the MUST_BE_NONZERO bit mask, if some bit is set, it means that
1267 for all numbers in the range the bit is 1, otherwise it might be 0
1268 or 1. */
1270 static inline void
1271 wide_int_range_set_zero_nonzero_bits (signop sign,
1272 const wide_int &lb, const wide_int &ub,
1273 wide_int &may_be_nonzero,
1274 wide_int &must_be_nonzero)
1276 may_be_nonzero = wi::minus_one (lb.get_precision ());
1277 must_be_nonzero = wi::zero (lb.get_precision ());
1279 if (wi::eq_p (lb, ub))
1281 may_be_nonzero = lb;
1282 must_be_nonzero = may_be_nonzero;
1284 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
1286 wide_int xor_mask = lb ^ ub;
1287 may_be_nonzero = lb | ub;
1288 must_be_nonzero = lb & ub;
1289 if (xor_mask != 0)
1291 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1292 may_be_nonzero.get_precision ());
1293 may_be_nonzero = may_be_nonzero | mask;
1294 must_be_nonzero = wi::bit_and_not (must_be_nonzero, mask);
1299 /* value_range wrapper for wide_int_range_set_zero_nonzero_bits above.
1301 Return TRUE if VR was a constant range and we were able to compute
1302 the bit masks. */
1304 bool
1305 vrp_set_zero_nonzero_bits (const tree expr_type,
1306 const value_range_base *vr,
1307 wide_int *may_be_nonzero,
1308 wide_int *must_be_nonzero)
1310 if (!range_int_cst_p (vr))
1312 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1313 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1314 return false;
1316 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1317 wi::to_wide (vr->min ()),
1318 wi::to_wide (vr->max ()),
1319 *may_be_nonzero, *must_be_nonzero);
1320 return true;
1323 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1324 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1325 false otherwise. If *AR can be represented with a single range
1326 *VR1 will be VR_UNDEFINED. */
1328 static bool
1329 ranges_from_anti_range (const value_range_base *ar,
1330 value_range_base *vr0, value_range_base *vr1,
1331 bool handle_pointers)
1333 tree type = ar->type ();
1335 vr0->set_undefined ();
1336 vr1->set_undefined ();
1338 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1339 [A+1, +INF]. Not sure if this helps in practice, though. */
1341 if (ar->kind () != VR_ANTI_RANGE
1342 || TREE_CODE (ar->min ()) != INTEGER_CST
1343 || TREE_CODE (ar->max ()) != INTEGER_CST
1344 || !vrp_val_min (type, handle_pointers)
1345 || !vrp_val_max (type, handle_pointers))
1346 return false;
1348 if (tree_int_cst_lt (vrp_val_min (type, handle_pointers), ar->min ()))
1349 vr0->set (VR_RANGE,
1350 vrp_val_min (type, handle_pointers),
1351 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1352 if (tree_int_cst_lt (ar->max (), vrp_val_max (type, handle_pointers)))
1353 vr1->set (VR_RANGE,
1354 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1355 vrp_val_max (type, handle_pointers));
1356 if (vr0->undefined_p ())
1358 *vr0 = *vr1;
1359 vr1->set_undefined ();
1362 return !vr0->undefined_p ();
1365 /* If BOUND will include a symbolic bound, adjust it accordingly,
1366 otherwise leave it as is.
1368 CODE is the original operation that combined the bounds (PLUS_EXPR
1369 or MINUS_EXPR).
1371 TYPE is the type of the original operation.
1373 SYM_OPn is the symbolic for OPn if it has a symbolic.
1375 NEG_OPn is TRUE if the OPn was negated. */
1377 static void
1378 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1379 tree sym_op0, tree sym_op1,
1380 bool neg_op0, bool neg_op1)
1382 bool minus_p = (code == MINUS_EXPR);
1383 /* If the result bound is constant, we're done; otherwise, build the
1384 symbolic lower bound. */
1385 if (sym_op0 == sym_op1)
1387 else if (sym_op0)
1388 bound = build_symbolic_expr (type, sym_op0,
1389 neg_op0, bound);
1390 else if (sym_op1)
1392 /* We may not negate if that might introduce
1393 undefined overflow. */
1394 if (!minus_p
1395 || neg_op1
1396 || TYPE_OVERFLOW_WRAPS (type))
1397 bound = build_symbolic_expr (type, sym_op1,
1398 neg_op1 ^ minus_p, bound);
1399 else
1400 bound = NULL_TREE;
1404 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1405 int bound according to CODE. CODE is the operation combining the
1406 bound (either a PLUS_EXPR or a MINUS_EXPR).
1408 TYPE is the type of the combine operation.
1410 WI is the wide int to store the result.
1412 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1413 if over/underflow occurred. */
1415 static void
1416 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1417 tree type, tree op0, tree op1)
1419 bool minus_p = (code == MINUS_EXPR);
1420 const signop sgn = TYPE_SIGN (type);
1421 const unsigned int prec = TYPE_PRECISION (type);
1423 /* Combine the bounds, if any. */
1424 if (op0 && op1)
1426 if (minus_p)
1427 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1428 else
1429 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1431 else if (op0)
1432 wi = wi::to_wide (op0);
1433 else if (op1)
1435 if (minus_p)
1436 wi = wi::neg (wi::to_wide (op1), &ovf);
1437 else
1438 wi = wi::to_wide (op1);
1440 else
1441 wi = wi::shwi (0, prec);
1444 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1445 put the result in VR.
1447 TYPE is the type of the range.
1449 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1450 occurred while originally calculating WMIN or WMAX. -1 indicates
1451 underflow. +1 indicates overflow. 0 indicates neither. */
1453 static void
1454 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1455 tree type,
1456 const wide_int &wmin, const wide_int &wmax,
1457 wi::overflow_type min_ovf,
1458 wi::overflow_type max_ovf)
1460 const signop sgn = TYPE_SIGN (type);
1461 const unsigned int prec = TYPE_PRECISION (type);
1463 /* For one bit precision if max < min, then the swapped
1464 range covers all values. */
1465 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1467 kind = VR_VARYING;
1468 return;
1471 if (TYPE_OVERFLOW_WRAPS (type))
1473 /* If overflow wraps, truncate the values and adjust the
1474 range kind and bounds appropriately. */
1475 wide_int tmin = wide_int::from (wmin, prec, sgn);
1476 wide_int tmax = wide_int::from (wmax, prec, sgn);
1477 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1479 /* If the limits are swapped, we wrapped around and cover
1480 the entire range. */
1481 if (wi::gt_p (tmin, tmax, sgn))
1482 kind = VR_VARYING;
1483 else
1485 kind = VR_RANGE;
1486 /* No overflow or both overflow or underflow. The
1487 range kind stays VR_RANGE. */
1488 min = wide_int_to_tree (type, tmin);
1489 max = wide_int_to_tree (type, tmax);
1491 return;
1493 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1494 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1496 /* Min underflow or max overflow. The range kind
1497 changes to VR_ANTI_RANGE. */
1498 bool covers = false;
1499 wide_int tem = tmin;
1500 tmin = tmax + 1;
1501 if (wi::cmp (tmin, tmax, sgn) < 0)
1502 covers = true;
1503 tmax = tem - 1;
1504 if (wi::cmp (tmax, tem, sgn) > 0)
1505 covers = true;
1506 /* If the anti-range would cover nothing, drop to varying.
1507 Likewise if the anti-range bounds are outside of the
1508 types values. */
1509 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1511 kind = VR_VARYING;
1512 return;
1514 kind = VR_ANTI_RANGE;
1515 min = wide_int_to_tree (type, tmin);
1516 max = wide_int_to_tree (type, tmax);
1517 return;
1519 else
1521 /* Other underflow and/or overflow, drop to VR_VARYING. */
1522 kind = VR_VARYING;
1523 return;
1526 else
1528 /* If overflow does not wrap, saturate to the types min/max
1529 value. */
1530 wide_int type_min = wi::min_value (prec, sgn);
1531 wide_int type_max = wi::max_value (prec, sgn);
1532 kind = VR_RANGE;
1533 if (min_ovf == wi::OVF_UNDERFLOW)
1534 min = wide_int_to_tree (type, type_min);
1535 else if (min_ovf == wi::OVF_OVERFLOW)
1536 min = wide_int_to_tree (type, type_max);
1537 else
1538 min = wide_int_to_tree (type, wmin);
1540 if (max_ovf == wi::OVF_UNDERFLOW)
1541 max = wide_int_to_tree (type, type_min);
1542 else if (max_ovf == wi::OVF_OVERFLOW)
1543 max = wide_int_to_tree (type, type_max);
1544 else
1545 max = wide_int_to_tree (type, wmax);
1549 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
1551 static void
1552 extract_range_from_pointer_plus_expr (value_range_base *vr,
1553 enum tree_code code,
1554 tree expr_type,
1555 const value_range_base *vr0,
1556 const value_range_base *vr1)
1558 gcc_checking_assert (POINTER_TYPE_P (expr_type)
1559 && code == POINTER_PLUS_EXPR);
1560 /* For pointer types, we are really only interested in asserting
1561 whether the expression evaluates to non-NULL.
1562 With -fno-delete-null-pointer-checks we need to be more
1563 conservative. As some object might reside at address 0,
1564 then some offset could be added to it and the same offset
1565 subtracted again and the result would be NULL.
1566 E.g.
1567 static int a[12]; where &a[0] is NULL and
1568 ptr = &a[6];
1569 ptr -= 6;
1570 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1571 where the first range doesn't include zero and the second one
1572 doesn't either. As the second operand is sizetype (unsigned),
1573 consider all ranges where the MSB could be set as possible
1574 subtractions where the result might be NULL. */
1575 if ((!range_includes_zero_p (vr0)
1576 || !range_includes_zero_p (vr1))
1577 && !TYPE_OVERFLOW_WRAPS (expr_type)
1578 && (flag_delete_null_pointer_checks
1579 || (range_int_cst_p (vr1)
1580 && !tree_int_cst_sign_bit (vr1->max ()))))
1581 vr->set_nonzero (expr_type);
1582 else if (vr0->zero_p () && vr1->zero_p ())
1583 vr->set_zero (expr_type);
1584 else
1585 vr->set_varying (expr_type);
1588 /* Extract range information from a PLUS/MINUS_EXPR and store the
1589 result in *VR. */
1591 static void
1592 extract_range_from_plus_minus_expr (value_range_base *vr,
1593 enum tree_code code,
1594 tree expr_type,
1595 const value_range_base *vr0_,
1596 const value_range_base *vr1_)
1598 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
1600 value_range_base vr0 = *vr0_, vr1 = *vr1_;
1601 value_range_base vrtem0, vrtem1;
1603 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1604 and express ~[] op X as ([]' op X) U ([]'' op X). */
1605 if (vr0.kind () == VR_ANTI_RANGE
1606 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1608 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
1609 if (!vrtem1.undefined_p ())
1611 value_range_base vrres;
1612 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1613 &vrtem1, vr1_);
1614 vr->union_ (&vrres);
1616 return;
1618 /* Likewise for X op ~[]. */
1619 if (vr1.kind () == VR_ANTI_RANGE
1620 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1622 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
1623 if (!vrtem1.undefined_p ())
1625 value_range_base vrres;
1626 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1627 vr0_, &vrtem1);
1628 vr->union_ (&vrres);
1630 return;
1633 value_range_kind kind;
1634 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
1635 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
1636 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
1637 tree min = NULL, max = NULL;
1639 /* This will normalize things such that calculating
1640 [0,0] - VR_VARYING is not dropped to varying, but is
1641 calculated as [MIN+1, MAX]. */
1642 if (vr0.varying_p ())
1644 vr0_kind = VR_RANGE;
1645 vr0_min = vrp_val_min (expr_type);
1646 vr0_max = vrp_val_max (expr_type);
1648 if (vr1.varying_p ())
1650 vr1_kind = VR_RANGE;
1651 vr1_min = vrp_val_min (expr_type);
1652 vr1_max = vrp_val_max (expr_type);
1655 const bool minus_p = (code == MINUS_EXPR);
1656 tree min_op0 = vr0_min;
1657 tree min_op1 = minus_p ? vr1_max : vr1_min;
1658 tree max_op0 = vr0_max;
1659 tree max_op1 = minus_p ? vr1_min : vr1_max;
1660 tree sym_min_op0 = NULL_TREE;
1661 tree sym_min_op1 = NULL_TREE;
1662 tree sym_max_op0 = NULL_TREE;
1663 tree sym_max_op1 = NULL_TREE;
1664 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1666 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1668 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1669 single-symbolic ranges, try to compute the precise resulting range,
1670 but only if we know that this resulting range will also be constant
1671 or single-symbolic. */
1672 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1673 && (TREE_CODE (min_op0) == INTEGER_CST
1674 || (sym_min_op0
1675 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1676 && (TREE_CODE (min_op1) == INTEGER_CST
1677 || (sym_min_op1
1678 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1679 && (!(sym_min_op0 && sym_min_op1)
1680 || (sym_min_op0 == sym_min_op1
1681 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1682 && (TREE_CODE (max_op0) == INTEGER_CST
1683 || (sym_max_op0
1684 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1685 && (TREE_CODE (max_op1) == INTEGER_CST
1686 || (sym_max_op1
1687 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1688 && (!(sym_max_op0 && sym_max_op1)
1689 || (sym_max_op0 == sym_max_op1
1690 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1692 wide_int wmin, wmax;
1693 wi::overflow_type min_ovf = wi::OVF_NONE;
1694 wi::overflow_type max_ovf = wi::OVF_NONE;
1696 /* Build the bounds. */
1697 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1698 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1700 /* If we have overflow for the constant part and the resulting
1701 range will be symbolic, drop to VR_VARYING. */
1702 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1703 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1705 vr->set_varying (expr_type);
1706 return;
1709 /* Adjust the range for possible overflow. */
1710 min = NULL_TREE;
1711 max = NULL_TREE;
1712 set_value_range_with_overflow (kind, min, max, expr_type,
1713 wmin, wmax, min_ovf, max_ovf);
1714 if (kind == VR_VARYING)
1716 vr->set_varying (expr_type);
1717 return;
1720 /* Build the symbolic bounds if needed. */
1721 adjust_symbolic_bound (min, code, expr_type,
1722 sym_min_op0, sym_min_op1,
1723 neg_min_op0, neg_min_op1);
1724 adjust_symbolic_bound (max, code, expr_type,
1725 sym_max_op0, sym_max_op1,
1726 neg_max_op0, neg_max_op1);
1728 else
1730 /* For other cases, for example if we have a PLUS_EXPR with two
1731 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1732 to compute a precise range for such a case.
1733 ??? General even mixed range kind operations can be expressed
1734 by for example transforming ~[3, 5] + [1, 2] to range-only
1735 operations and a union primitive:
1736 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1737 [-INF+1, 4] U [6, +INF(OVF)]
1738 though usually the union is not exactly representable with
1739 a single range or anti-range as the above is
1740 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1741 but one could use a scheme similar to equivalences for this. */
1742 vr->set_varying (expr_type);
1743 return;
1746 /* If either MIN or MAX overflowed, then set the resulting range to
1747 VARYING. */
1748 if (min == NULL_TREE
1749 || TREE_OVERFLOW_P (min)
1750 || max == NULL_TREE
1751 || TREE_OVERFLOW_P (max))
1753 vr->set_varying (expr_type);
1754 return;
1757 int cmp = compare_values (min, max);
1758 if (cmp == -2 || cmp == 1)
1760 /* If the new range has its limits swapped around (MIN > MAX),
1761 then the operation caused one of them to wrap around, mark
1762 the new range VARYING. */
1763 vr->set_varying (expr_type);
1765 else
1766 vr->set (kind, min, max);
1769 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
1770 suitable operator is found, return NULL and set VR to VARYING. */
1772 static const range_operator *
1773 get_range_op_handler (value_range_base *vr,
1774 enum tree_code code,
1775 tree expr_type)
1777 const range_operator *op = range_op_handler (code, expr_type);
1778 if (!op)
1779 vr->set_varying (expr_type);
1780 return op;
1783 /* If the types passed are supported, return TRUE, otherwise set VR to
1784 VARYING and return FALSE. */
1786 static bool
1787 supported_types_p (value_range_base *vr,
1788 tree type0,
1789 tree type1 = NULL)
1791 if (!value_range_base::supports_type_p (type0)
1792 || (type1 && !value_range_base::supports_type_p (type1)))
1794 vr->set_varying (type0);
1795 return false;
1797 return true;
1800 /* If any of the ranges passed are defined, return TRUE, otherwise set
1801 VR to UNDEFINED and return FALSE. */
1803 static bool
1804 defined_ranges_p (value_range_base *vr,
1805 const value_range_base *vr0,
1806 const value_range_base *vr1 = NULL)
1808 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1810 vr->set_undefined ();
1811 return false;
1813 return true;
1816 static value_range_base
1817 drop_undefines_to_varying (const value_range_base *vr, tree expr_type)
1819 if (vr->undefined_p ())
1820 return value_range_base (expr_type);
1821 else
1822 return *vr;
1825 /* If any operand is symbolic, perform a binary operation on them and
1826 return TRUE, otherwise return FALSE. */
1828 static bool
1829 range_fold_binary_symbolics_p (value_range_base *vr,
1830 tree_code code,
1831 tree expr_type,
1832 const value_range_base *vr0,
1833 const value_range_base *vr1)
1835 if (vr0->symbolic_p () || vr1->symbolic_p ())
1837 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1839 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0, vr1);
1840 return true;
1842 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1844 extract_range_from_pointer_plus_expr (vr, code, expr_type, vr0, vr1);
1845 return true;
1847 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1848 *vr = op->fold_range (expr_type,
1849 vr0->normalize_symbolics (),
1850 vr1->normalize_symbolics ());
1851 return true;
1853 return false;
1856 /* If operand is symbolic, perform a unary operation on it and return
1857 TRUE, otherwise return FALSE. */
1859 static bool
1860 range_fold_unary_symbolics_p (value_range_base *vr,
1861 tree_code code,
1862 tree expr_type,
1863 const value_range_base *vr0)
1865 if (vr0->symbolic_p ())
1867 if (code == NEGATE_EXPR)
1869 /* -X is simply 0 - X. */
1870 value_range_base zero;
1871 zero.set_zero (vr0->type ());
1872 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1873 return true;
1875 if (code == BIT_NOT_EXPR)
1877 /* ~X is simply -1 - X. */
1878 value_range_base minusone;
1879 minusone.set (build_int_cst (vr0->type (), -1));
1880 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1881 return true;
1883 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1884 *vr = op->fold_range (expr_type,
1885 vr0->normalize_symbolics (),
1886 value_range_base (expr_type));
1887 return true;
1889 return false;
1892 /* Perform a binary operation on a pair of ranges. */
1894 void
1895 range_fold_binary_expr (value_range_base *vr,
1896 enum tree_code code,
1897 tree expr_type,
1898 const value_range_base *vr0_,
1899 const value_range_base *vr1_)
1901 if (!supported_types_p (vr, expr_type)
1902 || !defined_ranges_p (vr, vr0_, vr1_))
1903 return;
1904 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1905 if (!op)
1906 return;
1908 value_range_base vr0 = drop_undefines_to_varying (vr0_, expr_type);
1909 value_range_base vr1 = drop_undefines_to_varying (vr1_, expr_type);
1910 if (range_fold_binary_symbolics_p (vr, code, expr_type, &vr0, &vr1))
1911 return;
1913 *vr = op->fold_range (expr_type,
1914 vr0.normalize_addresses (),
1915 vr1.normalize_addresses ());
1918 /* Perform a unary operation on a range. */
1920 void
1921 range_fold_unary_expr (value_range_base *vr,
1922 enum tree_code code, tree expr_type,
1923 const value_range_base *vr0,
1924 tree vr0_type)
1926 if (!supported_types_p (vr, expr_type, vr0_type)
1927 || !defined_ranges_p (vr, vr0))
1928 return;
1929 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1930 if (!op)
1931 return;
1933 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1934 return;
1936 *vr = op->fold_range (expr_type,
1937 vr0->normalize_addresses (),
1938 value_range_base (expr_type));
1941 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1942 create a new SSA name N and return the assertion assignment
1943 'N = ASSERT_EXPR <V, V OP W>'. */
1945 static gimple *
1946 build_assert_expr_for (tree cond, tree v)
1948 tree a;
1949 gassign *assertion;
1951 gcc_assert (TREE_CODE (v) == SSA_NAME
1952 && COMPARISON_CLASS_P (cond));
1954 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1955 assertion = gimple_build_assign (NULL_TREE, a);
1957 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1958 operand of the ASSERT_EXPR. Create it so the new name and the old one
1959 are registered in the replacement table so that we can fix the SSA web
1960 after adding all the ASSERT_EXPRs. */
1961 tree new_def = create_new_def_for (v, assertion, NULL);
1962 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1963 given we have to be able to fully propagate those out to re-create
1964 valid SSA when removing the asserts. */
1965 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1966 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1968 return assertion;
1972 /* Return false if EXPR is a predicate expression involving floating
1973 point values. */
1975 static inline bool
1976 fp_predicate (gimple *stmt)
1978 GIMPLE_CHECK (stmt, GIMPLE_COND);
1980 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
1983 /* If the range of values taken by OP can be inferred after STMT executes,
1984 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1985 describes the inferred range. Return true if a range could be
1986 inferred. */
1988 bool
1989 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1991 *val_p = NULL_TREE;
1992 *comp_code_p = ERROR_MARK;
1994 /* Do not attempt to infer anything in names that flow through
1995 abnormal edges. */
1996 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1997 return false;
1999 /* If STMT is the last statement of a basic block with no normal
2000 successors, there is no point inferring anything about any of its
2001 operands. We would not be able to find a proper insertion point
2002 for the assertion, anyway. */
2003 if (stmt_ends_bb_p (stmt))
2005 edge_iterator ei;
2006 edge e;
2008 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2009 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2010 break;
2011 if (e == NULL)
2012 return false;
2015 if (infer_nonnull_range (stmt, op))
2017 *val_p = build_int_cst (TREE_TYPE (op), 0);
2018 *comp_code_p = NE_EXPR;
2019 return true;
2022 return false;
2026 void dump_asserts_for (FILE *, tree);
2027 void debug_asserts_for (tree);
2028 void dump_all_asserts (FILE *);
2029 void debug_all_asserts (void);
2031 /* Dump all the registered assertions for NAME to FILE. */
2033 void
2034 dump_asserts_for (FILE *file, tree name)
2036 assert_locus *loc;
2038 fprintf (file, "Assertions to be inserted for ");
2039 print_generic_expr (file, name);
2040 fprintf (file, "\n");
2042 loc = asserts_for[SSA_NAME_VERSION (name)];
2043 while (loc)
2045 fprintf (file, "\t");
2046 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2047 fprintf (file, "\n\tBB #%d", loc->bb->index);
2048 if (loc->e)
2050 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2051 loc->e->dest->index);
2052 dump_edge_info (file, loc->e, dump_flags, 0);
2054 fprintf (file, "\n\tPREDICATE: ");
2055 print_generic_expr (file, loc->expr);
2056 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2057 print_generic_expr (file, loc->val);
2058 fprintf (file, "\n\n");
2059 loc = loc->next;
2062 fprintf (file, "\n");
2066 /* Dump all the registered assertions for NAME to stderr. */
2068 DEBUG_FUNCTION void
2069 debug_asserts_for (tree name)
2071 dump_asserts_for (stderr, name);
2075 /* Dump all the registered assertions for all the names to FILE. */
2077 void
2078 dump_all_asserts (FILE *file)
2080 unsigned i;
2081 bitmap_iterator bi;
2083 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2084 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2085 dump_asserts_for (file, ssa_name (i));
2086 fprintf (file, "\n");
2090 /* Dump all the registered assertions for all the names to stderr. */
2092 DEBUG_FUNCTION void
2093 debug_all_asserts (void)
2095 dump_all_asserts (stderr);
2098 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2100 static void
2101 add_assert_info (vec<assert_info> &asserts,
2102 tree name, tree expr, enum tree_code comp_code, tree val)
2104 assert_info info;
2105 info.comp_code = comp_code;
2106 info.name = name;
2107 if (TREE_OVERFLOW_P (val))
2108 val = drop_tree_overflow (val);
2109 info.val = val;
2110 info.expr = expr;
2111 asserts.safe_push (info);
2112 if (dump_enabled_p ())
2113 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2114 "Adding assert for %T from %T %s %T\n",
2115 name, expr, op_symbol_code (comp_code), val);
2118 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2119 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2120 E->DEST, then register this location as a possible insertion point
2121 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2123 BB, E and SI provide the exact insertion point for the new
2124 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2125 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2126 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2127 must not be NULL. */
2129 static void
2130 register_new_assert_for (tree name, tree expr,
2131 enum tree_code comp_code,
2132 tree val,
2133 basic_block bb,
2134 edge e,
2135 gimple_stmt_iterator si)
2137 assert_locus *n, *loc, *last_loc;
2138 basic_block dest_bb;
2140 gcc_checking_assert (bb == NULL || e == NULL);
2142 if (e == NULL)
2143 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2144 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2146 /* Never build an assert comparing against an integer constant with
2147 TREE_OVERFLOW set. This confuses our undefined overflow warning
2148 machinery. */
2149 if (TREE_OVERFLOW_P (val))
2150 val = drop_tree_overflow (val);
2152 /* The new assertion A will be inserted at BB or E. We need to
2153 determine if the new location is dominated by a previously
2154 registered location for A. If we are doing an edge insertion,
2155 assume that A will be inserted at E->DEST. Note that this is not
2156 necessarily true.
2158 If E is a critical edge, it will be split. But even if E is
2159 split, the new block will dominate the same set of blocks that
2160 E->DEST dominates.
2162 The reverse, however, is not true, blocks dominated by E->DEST
2163 will not be dominated by the new block created to split E. So,
2164 if the insertion location is on a critical edge, we will not use
2165 the new location to move another assertion previously registered
2166 at a block dominated by E->DEST. */
2167 dest_bb = (bb) ? bb : e->dest;
2169 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2170 VAL at a block dominating DEST_BB, then we don't need to insert a new
2171 one. Similarly, if the same assertion already exists at a block
2172 dominated by DEST_BB and the new location is not on a critical
2173 edge, then update the existing location for the assertion (i.e.,
2174 move the assertion up in the dominance tree).
2176 Note, this is implemented as a simple linked list because there
2177 should not be more than a handful of assertions registered per
2178 name. If this becomes a performance problem, a table hashed by
2179 COMP_CODE and VAL could be implemented. */
2180 loc = asserts_for[SSA_NAME_VERSION (name)];
2181 last_loc = loc;
2182 while (loc)
2184 if (loc->comp_code == comp_code
2185 && (loc->val == val
2186 || operand_equal_p (loc->val, val, 0))
2187 && (loc->expr == expr
2188 || operand_equal_p (loc->expr, expr, 0)))
2190 /* If E is not a critical edge and DEST_BB
2191 dominates the existing location for the assertion, move
2192 the assertion up in the dominance tree by updating its
2193 location information. */
2194 if ((e == NULL || !EDGE_CRITICAL_P (e))
2195 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2197 loc->bb = dest_bb;
2198 loc->e = e;
2199 loc->si = si;
2200 return;
2204 /* Update the last node of the list and move to the next one. */
2205 last_loc = loc;
2206 loc = loc->next;
2209 /* If we didn't find an assertion already registered for
2210 NAME COMP_CODE VAL, add a new one at the end of the list of
2211 assertions associated with NAME. */
2212 n = XNEW (struct assert_locus);
2213 n->bb = dest_bb;
2214 n->e = e;
2215 n->si = si;
2216 n->comp_code = comp_code;
2217 n->val = val;
2218 n->expr = expr;
2219 n->next = NULL;
2221 if (last_loc)
2222 last_loc->next = n;
2223 else
2224 asserts_for[SSA_NAME_VERSION (name)] = n;
2226 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2229 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2230 Extract a suitable test code and value and store them into *CODE_P and
2231 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2233 If no extraction was possible, return FALSE, otherwise return TRUE.
2235 If INVERT is true, then we invert the result stored into *CODE_P. */
2237 static bool
2238 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2239 tree cond_op0, tree cond_op1,
2240 bool invert, enum tree_code *code_p,
2241 tree *val_p)
2243 enum tree_code comp_code;
2244 tree val;
2246 /* Otherwise, we have a comparison of the form NAME COMP VAL
2247 or VAL COMP NAME. */
2248 if (name == cond_op1)
2250 /* If the predicate is of the form VAL COMP NAME, flip
2251 COMP around because we need to register NAME as the
2252 first operand in the predicate. */
2253 comp_code = swap_tree_comparison (cond_code);
2254 val = cond_op0;
2256 else if (name == cond_op0)
2258 /* The comparison is of the form NAME COMP VAL, so the
2259 comparison code remains unchanged. */
2260 comp_code = cond_code;
2261 val = cond_op1;
2263 else
2264 gcc_unreachable ();
2266 /* Invert the comparison code as necessary. */
2267 if (invert)
2268 comp_code = invert_tree_comparison (comp_code, 0);
2270 /* VRP only handles integral and pointer types. */
2271 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2272 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2273 return false;
2275 /* Do not register always-false predicates.
2276 FIXME: this works around a limitation in fold() when dealing with
2277 enumerations. Given 'enum { N1, N2 } x;', fold will not
2278 fold 'if (x > N2)' to 'if (0)'. */
2279 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2280 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2282 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2283 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2285 if (comp_code == GT_EXPR
2286 && (!max
2287 || compare_values (val, max) == 0))
2288 return false;
2290 if (comp_code == LT_EXPR
2291 && (!min
2292 || compare_values (val, min) == 0))
2293 return false;
2295 *code_p = comp_code;
2296 *val_p = val;
2297 return true;
2300 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2301 (otherwise return VAL). VAL and MASK must be zero-extended for
2302 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2303 (to transform signed values into unsigned) and at the end xor
2304 SGNBIT back. */
2306 static wide_int
2307 masked_increment (const wide_int &val_in, const wide_int &mask,
2308 const wide_int &sgnbit, unsigned int prec)
2310 wide_int bit = wi::one (prec), res;
2311 unsigned int i;
2313 wide_int val = val_in ^ sgnbit;
2314 for (i = 0; i < prec; i++, bit += bit)
2316 res = mask;
2317 if ((res & bit) == 0)
2318 continue;
2319 res = bit - 1;
2320 res = wi::bit_and_not (val + bit, res);
2321 res &= mask;
2322 if (wi::gtu_p (res, val))
2323 return res ^ sgnbit;
2325 return val ^ sgnbit;
2328 /* Helper for overflow_comparison_p
2330 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2331 OP1's defining statement to see if it ultimately has the form
2332 OP0 CODE (OP0 PLUS INTEGER_CST)
2334 If so, return TRUE indicating this is an overflow test and store into
2335 *NEW_CST an updated constant that can be used in a narrowed range test.
2337 REVERSED indicates if the comparison was originally:
2339 OP1 CODE' OP0.
2341 This affects how we build the updated constant. */
2343 static bool
2344 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2345 bool follow_assert_exprs, bool reversed, tree *new_cst)
2347 /* See if this is a relational operation between two SSA_NAMES with
2348 unsigned, overflow wrapping values. If so, check it more deeply. */
2349 if ((code == LT_EXPR || code == LE_EXPR
2350 || code == GE_EXPR || code == GT_EXPR)
2351 && TREE_CODE (op0) == SSA_NAME
2352 && TREE_CODE (op1) == SSA_NAME
2353 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2354 && TYPE_UNSIGNED (TREE_TYPE (op0))
2355 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2357 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2359 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2360 if (follow_assert_exprs)
2362 while (gimple_assign_single_p (op1_def)
2363 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2365 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2366 if (TREE_CODE (op1) != SSA_NAME)
2367 break;
2368 op1_def = SSA_NAME_DEF_STMT (op1);
2372 /* Now look at the defining statement of OP1 to see if it adds
2373 or subtracts a nonzero constant from another operand. */
2374 if (op1_def
2375 && is_gimple_assign (op1_def)
2376 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2377 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2378 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2380 tree target = gimple_assign_rhs1 (op1_def);
2382 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2383 for one where TARGET appears on the RHS. */
2384 if (follow_assert_exprs)
2386 /* Now see if that "other operand" is op0, following the chain
2387 of ASSERT_EXPRs if necessary. */
2388 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2389 while (op0 != target
2390 && gimple_assign_single_p (op0_def)
2391 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2393 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2394 if (TREE_CODE (op0) != SSA_NAME)
2395 break;
2396 op0_def = SSA_NAME_DEF_STMT (op0);
2400 /* If we did not find our target SSA_NAME, then this is not
2401 an overflow test. */
2402 if (op0 != target)
2403 return false;
2405 tree type = TREE_TYPE (op0);
2406 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2407 tree inc = gimple_assign_rhs2 (op1_def);
2408 if (reversed)
2409 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2410 else
2411 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2412 return true;
2415 return false;
2418 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2419 OP1's defining statement to see if it ultimately has the form
2420 OP0 CODE (OP0 PLUS INTEGER_CST)
2422 If so, return TRUE indicating this is an overflow test and store into
2423 *NEW_CST an updated constant that can be used in a narrowed range test.
2425 These statements are left as-is in the IL to facilitate discovery of
2426 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2427 the alternate range representation is often useful within VRP. */
2429 bool
2430 overflow_comparison_p (tree_code code, tree name, tree val,
2431 bool use_equiv_p, tree *new_cst)
2433 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2434 return true;
2435 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2436 use_equiv_p, true, new_cst);
2440 /* Try to register an edge assertion for SSA name NAME on edge E for
2441 the condition COND contributing to the conditional jump pointed to by BSI.
2442 Invert the condition COND if INVERT is true. */
2444 static void
2445 register_edge_assert_for_2 (tree name, edge e,
2446 enum tree_code cond_code,
2447 tree cond_op0, tree cond_op1, bool invert,
2448 vec<assert_info> &asserts)
2450 tree val;
2451 enum tree_code comp_code;
2453 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2454 cond_op0,
2455 cond_op1,
2456 invert, &comp_code, &val))
2457 return;
2459 /* Queue the assert. */
2460 tree x;
2461 if (overflow_comparison_p (comp_code, name, val, false, &x))
2463 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2464 ? GT_EXPR : LE_EXPR);
2465 add_assert_info (asserts, name, name, new_code, x);
2467 add_assert_info (asserts, name, name, comp_code, val);
2469 /* In the case of NAME <= CST and NAME being defined as
2470 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2471 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2472 This catches range and anti-range tests. */
2473 if ((comp_code == LE_EXPR
2474 || comp_code == GT_EXPR)
2475 && TREE_CODE (val) == INTEGER_CST
2476 && TYPE_UNSIGNED (TREE_TYPE (val)))
2478 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2479 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2481 /* Extract CST2 from the (optional) addition. */
2482 if (is_gimple_assign (def_stmt)
2483 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2485 name2 = gimple_assign_rhs1 (def_stmt);
2486 cst2 = gimple_assign_rhs2 (def_stmt);
2487 if (TREE_CODE (name2) == SSA_NAME
2488 && TREE_CODE (cst2) == INTEGER_CST)
2489 def_stmt = SSA_NAME_DEF_STMT (name2);
2492 /* Extract NAME2 from the (optional) sign-changing cast. */
2493 if (gimple_assign_cast_p (def_stmt))
2495 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2496 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2497 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2498 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2499 name3 = gimple_assign_rhs1 (def_stmt);
2502 /* If name3 is used later, create an ASSERT_EXPR for it. */
2503 if (name3 != NULL_TREE
2504 && TREE_CODE (name3) == SSA_NAME
2505 && (cst2 == NULL_TREE
2506 || TREE_CODE (cst2) == INTEGER_CST)
2507 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2509 tree tmp;
2511 /* Build an expression for the range test. */
2512 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2513 if (cst2 != NULL_TREE)
2514 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2515 add_assert_info (asserts, name3, tmp, comp_code, val);
2518 /* If name2 is used later, create an ASSERT_EXPR for it. */
2519 if (name2 != NULL_TREE
2520 && TREE_CODE (name2) == SSA_NAME
2521 && TREE_CODE (cst2) == INTEGER_CST
2522 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2524 tree tmp;
2526 /* Build an expression for the range test. */
2527 tmp = name2;
2528 if (TREE_TYPE (name) != TREE_TYPE (name2))
2529 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2530 if (cst2 != NULL_TREE)
2531 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2532 add_assert_info (asserts, name2, tmp, comp_code, val);
2536 /* In the case of post-in/decrement tests like if (i++) ... and uses
2537 of the in/decremented value on the edge the extra name we want to
2538 assert for is not on the def chain of the name compared. Instead
2539 it is in the set of use stmts.
2540 Similar cases happen for conversions that were simplified through
2541 fold_{sign_changed,widened}_comparison. */
2542 if ((comp_code == NE_EXPR
2543 || comp_code == EQ_EXPR)
2544 && TREE_CODE (val) == INTEGER_CST)
2546 imm_use_iterator ui;
2547 gimple *use_stmt;
2548 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2550 if (!is_gimple_assign (use_stmt))
2551 continue;
2553 /* Cut off to use-stmts that are dominating the predecessor. */
2554 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2555 continue;
2557 tree name2 = gimple_assign_lhs (use_stmt);
2558 if (TREE_CODE (name2) != SSA_NAME)
2559 continue;
2561 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2562 tree cst;
2563 if (code == PLUS_EXPR
2564 || code == MINUS_EXPR)
2566 cst = gimple_assign_rhs2 (use_stmt);
2567 if (TREE_CODE (cst) != INTEGER_CST)
2568 continue;
2569 cst = int_const_binop (code, val, cst);
2571 else if (CONVERT_EXPR_CODE_P (code))
2573 /* For truncating conversions we cannot record
2574 an inequality. */
2575 if (comp_code == NE_EXPR
2576 && (TYPE_PRECISION (TREE_TYPE (name2))
2577 < TYPE_PRECISION (TREE_TYPE (name))))
2578 continue;
2579 cst = fold_convert (TREE_TYPE (name2), val);
2581 else
2582 continue;
2584 if (TREE_OVERFLOW_P (cst))
2585 cst = drop_tree_overflow (cst);
2586 add_assert_info (asserts, name2, name2, comp_code, cst);
2590 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2591 && TREE_CODE (val) == INTEGER_CST)
2593 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2594 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2595 tree val2 = NULL_TREE;
2596 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2597 wide_int mask = wi::zero (prec);
2598 unsigned int nprec = prec;
2599 enum tree_code rhs_code = ERROR_MARK;
2601 if (is_gimple_assign (def_stmt))
2602 rhs_code = gimple_assign_rhs_code (def_stmt);
2604 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2605 assert that A != CST1 -+ CST2. */
2606 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2607 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2609 tree op0 = gimple_assign_rhs1 (def_stmt);
2610 tree op1 = gimple_assign_rhs2 (def_stmt);
2611 if (TREE_CODE (op0) == SSA_NAME
2612 && TREE_CODE (op1) == INTEGER_CST)
2614 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2615 ? MINUS_EXPR : PLUS_EXPR);
2616 op1 = int_const_binop (reverse_op, val, op1);
2617 if (TREE_OVERFLOW (op1))
2618 op1 = drop_tree_overflow (op1);
2619 add_assert_info (asserts, op0, op0, comp_code, op1);
2623 /* Add asserts for NAME cmp CST and NAME being defined
2624 as NAME = (int) NAME2. */
2625 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2626 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2627 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2628 && gimple_assign_cast_p (def_stmt))
2630 name2 = gimple_assign_rhs1 (def_stmt);
2631 if (CONVERT_EXPR_CODE_P (rhs_code)
2632 && TREE_CODE (name2) == SSA_NAME
2633 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2634 && TYPE_UNSIGNED (TREE_TYPE (name2))
2635 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2636 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2637 || !tree_int_cst_equal (val,
2638 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2640 tree tmp, cst;
2641 enum tree_code new_comp_code = comp_code;
2643 cst = fold_convert (TREE_TYPE (name2),
2644 TYPE_MIN_VALUE (TREE_TYPE (val)));
2645 /* Build an expression for the range test. */
2646 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2647 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2648 fold_convert (TREE_TYPE (name2), val));
2649 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2651 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2652 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2653 build_int_cst (TREE_TYPE (name2), 1));
2655 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2659 /* Add asserts for NAME cmp CST and NAME being defined as
2660 NAME = NAME2 >> CST2.
2662 Extract CST2 from the right shift. */
2663 if (rhs_code == RSHIFT_EXPR)
2665 name2 = gimple_assign_rhs1 (def_stmt);
2666 cst2 = gimple_assign_rhs2 (def_stmt);
2667 if (TREE_CODE (name2) == SSA_NAME
2668 && tree_fits_uhwi_p (cst2)
2669 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2670 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2671 && type_has_mode_precision_p (TREE_TYPE (val)))
2673 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2674 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2677 if (val2 != NULL_TREE
2678 && TREE_CODE (val2) == INTEGER_CST
2679 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2680 TREE_TYPE (val),
2681 val2, cst2), val))
2683 enum tree_code new_comp_code = comp_code;
2684 tree tmp, new_val;
2686 tmp = name2;
2687 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2689 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2691 tree type = build_nonstandard_integer_type (prec, 1);
2692 tmp = build1 (NOP_EXPR, type, name2);
2693 val2 = fold_convert (type, val2);
2695 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2696 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2697 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2699 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2701 wide_int minval
2702 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2703 new_val = val2;
2704 if (minval == wi::to_wide (new_val))
2705 new_val = NULL_TREE;
2707 else
2709 wide_int maxval
2710 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2711 mask |= wi::to_wide (val2);
2712 if (wi::eq_p (mask, maxval))
2713 new_val = NULL_TREE;
2714 else
2715 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2718 if (new_val)
2719 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2722 /* If we have a conversion that doesn't change the value of the source
2723 simply register the same assert for it. */
2724 if (CONVERT_EXPR_CODE_P (rhs_code))
2726 wide_int rmin, rmax;
2727 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2728 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2729 && TREE_CODE (rhs1) == SSA_NAME
2730 /* Make sure the relation preserves the upper/lower boundary of
2731 the range conservatively. */
2732 && (comp_code == NE_EXPR
2733 || comp_code == EQ_EXPR
2734 || (TYPE_SIGN (TREE_TYPE (name))
2735 == TYPE_SIGN (TREE_TYPE (rhs1)))
2736 || ((comp_code == LE_EXPR
2737 || comp_code == LT_EXPR)
2738 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2739 || ((comp_code == GE_EXPR
2740 || comp_code == GT_EXPR)
2741 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
2742 /* And the conversion does not alter the value we compare
2743 against and all values in rhs1 can be represented in
2744 the converted to type. */
2745 && int_fits_type_p (val, TREE_TYPE (rhs1))
2746 && ((TYPE_PRECISION (TREE_TYPE (name))
2747 > TYPE_PRECISION (TREE_TYPE (rhs1)))
2748 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
2749 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
2750 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
2751 add_assert_info (asserts, rhs1, rhs1,
2752 comp_code, fold_convert (TREE_TYPE (rhs1), val));
2755 /* Add asserts for NAME cmp CST and NAME being defined as
2756 NAME = NAME2 & CST2.
2758 Extract CST2 from the and.
2760 Also handle
2761 NAME = (unsigned) NAME2;
2762 casts where NAME's type is unsigned and has smaller precision
2763 than NAME2's type as if it was NAME = NAME2 & MASK. */
2764 names[0] = NULL_TREE;
2765 names[1] = NULL_TREE;
2766 cst2 = NULL_TREE;
2767 if (rhs_code == BIT_AND_EXPR
2768 || (CONVERT_EXPR_CODE_P (rhs_code)
2769 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2770 && TYPE_UNSIGNED (TREE_TYPE (val))
2771 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2772 > prec))
2774 name2 = gimple_assign_rhs1 (def_stmt);
2775 if (rhs_code == BIT_AND_EXPR)
2776 cst2 = gimple_assign_rhs2 (def_stmt);
2777 else
2779 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2780 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2782 if (TREE_CODE (name2) == SSA_NAME
2783 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2784 && TREE_CODE (cst2) == INTEGER_CST
2785 && !integer_zerop (cst2)
2786 && (nprec > 1
2787 || TYPE_UNSIGNED (TREE_TYPE (val))))
2789 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2790 if (gimple_assign_cast_p (def_stmt2))
2792 names[1] = gimple_assign_rhs1 (def_stmt2);
2793 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2794 || TREE_CODE (names[1]) != SSA_NAME
2795 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2796 || (TYPE_PRECISION (TREE_TYPE (name2))
2797 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2798 names[1] = NULL_TREE;
2800 names[0] = name2;
2803 if (names[0] || names[1])
2805 wide_int minv, maxv, valv, cst2v;
2806 wide_int tem, sgnbit;
2807 bool valid_p = false, valn, cst2n;
2808 enum tree_code ccode = comp_code;
2810 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2811 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2812 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2813 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2814 /* If CST2 doesn't have most significant bit set,
2815 but VAL is negative, we have comparison like
2816 if ((x & 0x123) > -4) (always true). Just give up. */
2817 if (!cst2n && valn)
2818 ccode = ERROR_MARK;
2819 if (cst2n)
2820 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2821 else
2822 sgnbit = wi::zero (nprec);
2823 minv = valv & cst2v;
2824 switch (ccode)
2826 case EQ_EXPR:
2827 /* Minimum unsigned value for equality is VAL & CST2
2828 (should be equal to VAL, otherwise we probably should
2829 have folded the comparison into false) and
2830 maximum unsigned value is VAL | ~CST2. */
2831 maxv = valv | ~cst2v;
2832 valid_p = true;
2833 break;
2835 case NE_EXPR:
2836 tem = valv | ~cst2v;
2837 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2838 if (valv == 0)
2840 cst2n = false;
2841 sgnbit = wi::zero (nprec);
2842 goto gt_expr;
2844 /* If (VAL | ~CST2) is all ones, handle it as
2845 (X & CST2) < VAL. */
2846 if (tem == -1)
2848 cst2n = false;
2849 valn = false;
2850 sgnbit = wi::zero (nprec);
2851 goto lt_expr;
2853 if (!cst2n && wi::neg_p (cst2v))
2854 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2855 if (sgnbit != 0)
2857 if (valv == sgnbit)
2859 cst2n = true;
2860 valn = true;
2861 goto gt_expr;
2863 if (tem == wi::mask (nprec - 1, false, nprec))
2865 cst2n = true;
2866 goto lt_expr;
2868 if (!cst2n)
2869 sgnbit = wi::zero (nprec);
2871 break;
2873 case GE_EXPR:
2874 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2875 is VAL and maximum unsigned value is ~0. For signed
2876 comparison, if CST2 doesn't have most significant bit
2877 set, handle it similarly. If CST2 has MSB set,
2878 the minimum is the same, and maximum is ~0U/2. */
2879 if (minv != valv)
2881 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2882 VAL. */
2883 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2884 if (minv == valv)
2885 break;
2887 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2888 valid_p = true;
2889 break;
2891 case GT_EXPR:
2892 gt_expr:
2893 /* Find out smallest MINV where MINV > VAL
2894 && (MINV & CST2) == MINV, if any. If VAL is signed and
2895 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2896 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2897 if (minv == valv)
2898 break;
2899 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2900 valid_p = true;
2901 break;
2903 case LE_EXPR:
2904 /* Minimum unsigned value for <= is 0 and maximum
2905 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2906 Otherwise, find smallest VAL2 where VAL2 > VAL
2907 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2908 as maximum.
2909 For signed comparison, if CST2 doesn't have most
2910 significant bit set, handle it similarly. If CST2 has
2911 MSB set, the maximum is the same and minimum is INT_MIN. */
2912 if (minv == valv)
2913 maxv = valv;
2914 else
2916 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2917 if (maxv == valv)
2918 break;
2919 maxv -= 1;
2921 maxv |= ~cst2v;
2922 minv = sgnbit;
2923 valid_p = true;
2924 break;
2926 case LT_EXPR:
2927 lt_expr:
2928 /* Minimum unsigned value for < is 0 and maximum
2929 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2930 Otherwise, find smallest VAL2 where VAL2 > VAL
2931 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2932 as maximum.
2933 For signed comparison, if CST2 doesn't have most
2934 significant bit set, handle it similarly. If CST2 has
2935 MSB set, the maximum is the same and minimum is INT_MIN. */
2936 if (minv == valv)
2938 if (valv == sgnbit)
2939 break;
2940 maxv = valv;
2942 else
2944 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2945 if (maxv == valv)
2946 break;
2948 maxv -= 1;
2949 maxv |= ~cst2v;
2950 minv = sgnbit;
2951 valid_p = true;
2952 break;
2954 default:
2955 break;
2957 if (valid_p
2958 && (maxv - minv) != -1)
2960 tree tmp, new_val, type;
2961 int i;
2963 for (i = 0; i < 2; i++)
2964 if (names[i])
2966 wide_int maxv2 = maxv;
2967 tmp = names[i];
2968 type = TREE_TYPE (names[i]);
2969 if (!TYPE_UNSIGNED (type))
2971 type = build_nonstandard_integer_type (nprec, 1);
2972 tmp = build1 (NOP_EXPR, type, names[i]);
2974 if (minv != 0)
2976 tmp = build2 (PLUS_EXPR, type, tmp,
2977 wide_int_to_tree (type, -minv));
2978 maxv2 = maxv - minv;
2980 new_val = wide_int_to_tree (type, maxv2);
2981 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
2988 /* OP is an operand of a truth value expression which is known to have
2989 a particular value. Register any asserts for OP and for any
2990 operands in OP's defining statement.
2992 If CODE is EQ_EXPR, then we want to register OP is zero (false),
2993 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
2995 static void
2996 register_edge_assert_for_1 (tree op, enum tree_code code,
2997 edge e, vec<assert_info> &asserts)
2999 gimple *op_def;
3000 tree val;
3001 enum tree_code rhs_code;
3003 /* We only care about SSA_NAMEs. */
3004 if (TREE_CODE (op) != SSA_NAME)
3005 return;
3007 /* We know that OP will have a zero or nonzero value. */
3008 val = build_int_cst (TREE_TYPE (op), 0);
3009 add_assert_info (asserts, op, op, code, val);
3011 /* Now look at how OP is set. If it's set from a comparison,
3012 a truth operation or some bit operations, then we may be able
3013 to register information about the operands of that assignment. */
3014 op_def = SSA_NAME_DEF_STMT (op);
3015 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3016 return;
3018 rhs_code = gimple_assign_rhs_code (op_def);
3020 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3022 bool invert = (code == EQ_EXPR ? true : false);
3023 tree op0 = gimple_assign_rhs1 (op_def);
3024 tree op1 = gimple_assign_rhs2 (op_def);
3026 if (TREE_CODE (op0) == SSA_NAME)
3027 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3028 if (TREE_CODE (op1) == SSA_NAME)
3029 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3031 else if ((code == NE_EXPR
3032 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3033 || (code == EQ_EXPR
3034 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3036 /* Recurse on each operand. */
3037 tree op0 = gimple_assign_rhs1 (op_def);
3038 tree op1 = gimple_assign_rhs2 (op_def);
3039 if (TREE_CODE (op0) == SSA_NAME
3040 && has_single_use (op0))
3041 register_edge_assert_for_1 (op0, code, e, asserts);
3042 if (TREE_CODE (op1) == SSA_NAME
3043 && has_single_use (op1))
3044 register_edge_assert_for_1 (op1, code, e, asserts);
3046 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3047 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3049 /* Recurse, flipping CODE. */
3050 code = invert_tree_comparison (code, false);
3051 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3053 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3055 /* Recurse through the copy. */
3056 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3058 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3060 /* Recurse through the type conversion, unless it is a narrowing
3061 conversion or conversion from non-integral type. */
3062 tree rhs = gimple_assign_rhs1 (op_def);
3063 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3064 && (TYPE_PRECISION (TREE_TYPE (rhs))
3065 <= TYPE_PRECISION (TREE_TYPE (op))))
3066 register_edge_assert_for_1 (rhs, code, e, asserts);
3070 /* Check if comparison
3071 NAME COND_OP INTEGER_CST
3072 has a form of
3073 (X & 11...100..0) COND_OP XX...X00...0
3074 Such comparison can yield assertions like
3075 X >= XX...X00...0
3076 X <= XX...X11...1
3077 in case of COND_OP being EQ_EXPR or
3078 X < XX...X00...0
3079 X > XX...X11...1
3080 in case of NE_EXPR. */
3082 static bool
3083 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3084 tree *new_name, tree *low, enum tree_code *low_code,
3085 tree *high, enum tree_code *high_code)
3087 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3089 if (!is_gimple_assign (def_stmt)
3090 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3091 return false;
3093 tree t = gimple_assign_rhs1 (def_stmt);
3094 tree maskt = gimple_assign_rhs2 (def_stmt);
3095 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3096 return false;
3098 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3099 wide_int inv_mask = ~mask;
3100 /* Must have been removed by now so don't bother optimizing. */
3101 if (mask == 0 || inv_mask == 0)
3102 return false;
3104 /* Assume VALT is INTEGER_CST. */
3105 wi::tree_to_wide_ref val = wi::to_wide (valt);
3107 if ((inv_mask & (inv_mask + 1)) != 0
3108 || (val & mask) != val)
3109 return false;
3111 bool is_range = cond_code == EQ_EXPR;
3113 tree type = TREE_TYPE (t);
3114 wide_int min = wi::min_value (type),
3115 max = wi::max_value (type);
3117 if (is_range)
3119 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3120 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3122 else
3124 /* We can still generate assertion if one of alternatives
3125 is known to always be false. */
3126 if (val == min)
3128 *low_code = (enum tree_code) 0;
3129 *high_code = GT_EXPR;
3131 else if ((val | inv_mask) == max)
3133 *low_code = LT_EXPR;
3134 *high_code = (enum tree_code) 0;
3136 else
3137 return false;
3140 *new_name = t;
3141 *low = wide_int_to_tree (type, val);
3142 *high = wide_int_to_tree (type, val | inv_mask);
3144 return true;
3147 /* Try to register an edge assertion for SSA name NAME on edge E for
3148 the condition COND contributing to the conditional jump pointed to by
3149 SI. */
3151 void
3152 register_edge_assert_for (tree name, edge e,
3153 enum tree_code cond_code, tree cond_op0,
3154 tree cond_op1, vec<assert_info> &asserts)
3156 tree val;
3157 enum tree_code comp_code;
3158 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3160 /* Do not attempt to infer anything in names that flow through
3161 abnormal edges. */
3162 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3163 return;
3165 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3166 cond_op0, cond_op1,
3167 is_else_edge,
3168 &comp_code, &val))
3169 return;
3171 /* Register ASSERT_EXPRs for name. */
3172 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3173 cond_op1, is_else_edge, asserts);
3176 /* If COND is effectively an equality test of an SSA_NAME against
3177 the value zero or one, then we may be able to assert values
3178 for SSA_NAMEs which flow into COND. */
3180 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3181 statement of NAME we can assert both operands of the BIT_AND_EXPR
3182 have nonzero value. */
3183 if (((comp_code == EQ_EXPR && integer_onep (val))
3184 || (comp_code == NE_EXPR && integer_zerop (val))))
3186 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3188 if (is_gimple_assign (def_stmt)
3189 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3191 tree op0 = gimple_assign_rhs1 (def_stmt);
3192 tree op1 = gimple_assign_rhs2 (def_stmt);
3193 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3194 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3198 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3199 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3200 have zero value. */
3201 if (((comp_code == EQ_EXPR && integer_zerop (val))
3202 || (comp_code == NE_EXPR && integer_onep (val))))
3204 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3206 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3207 necessarily zero value, or if type-precision is one. */
3208 if (is_gimple_assign (def_stmt)
3209 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3210 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3211 || comp_code == EQ_EXPR)))
3213 tree op0 = gimple_assign_rhs1 (def_stmt);
3214 tree op1 = gimple_assign_rhs2 (def_stmt);
3215 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3216 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3220 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3221 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3222 && TREE_CODE (val) == INTEGER_CST)
3224 enum tree_code low_code, high_code;
3225 tree low, high;
3226 if (is_masked_range_test (name, val, comp_code, &name, &low,
3227 &low_code, &high, &high_code))
3229 if (low_code != ERROR_MARK)
3230 register_edge_assert_for_2 (name, e, low_code, name,
3231 low, /*invert*/false, asserts);
3232 if (high_code != ERROR_MARK)
3233 register_edge_assert_for_2 (name, e, high_code, name,
3234 high, /*invert*/false, asserts);
3239 /* Finish found ASSERTS for E and register them at GSI. */
3241 static void
3242 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3243 vec<assert_info> &asserts)
3245 for (unsigned i = 0; i < asserts.length (); ++i)
3246 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3247 reachable from E. */
3248 if (live_on_edge (e, asserts[i].name))
3249 register_new_assert_for (asserts[i].name, asserts[i].expr,
3250 asserts[i].comp_code, asserts[i].val,
3251 NULL, e, gsi);
3256 /* Determine whether the outgoing edges of BB should receive an
3257 ASSERT_EXPR for each of the operands of BB's LAST statement.
3258 The last statement of BB must be a COND_EXPR.
3260 If any of the sub-graphs rooted at BB have an interesting use of
3261 the predicate operands, an assert location node is added to the
3262 list of assertions for the corresponding operands. */
3264 static void
3265 find_conditional_asserts (basic_block bb, gcond *last)
3267 gimple_stmt_iterator bsi;
3268 tree op;
3269 edge_iterator ei;
3270 edge e;
3271 ssa_op_iter iter;
3273 bsi = gsi_for_stmt (last);
3275 /* Look for uses of the operands in each of the sub-graphs
3276 rooted at BB. We need to check each of the outgoing edges
3277 separately, so that we know what kind of ASSERT_EXPR to
3278 insert. */
3279 FOR_EACH_EDGE (e, ei, bb->succs)
3281 if (e->dest == bb)
3282 continue;
3284 /* Register the necessary assertions for each operand in the
3285 conditional predicate. */
3286 auto_vec<assert_info, 8> asserts;
3287 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3288 register_edge_assert_for (op, e,
3289 gimple_cond_code (last),
3290 gimple_cond_lhs (last),
3291 gimple_cond_rhs (last), asserts);
3292 finish_register_edge_assert_for (e, bsi, asserts);
3296 struct case_info
3298 tree expr;
3299 basic_block bb;
3302 /* Compare two case labels sorting first by the destination bb index
3303 and then by the case value. */
3305 static int
3306 compare_case_labels (const void *p1, const void *p2)
3308 const struct case_info *ci1 = (const struct case_info *) p1;
3309 const struct case_info *ci2 = (const struct case_info *) p2;
3310 int idx1 = ci1->bb->index;
3311 int idx2 = ci2->bb->index;
3313 if (idx1 < idx2)
3314 return -1;
3315 else if (idx1 == idx2)
3317 /* Make sure the default label is first in a group. */
3318 if (!CASE_LOW (ci1->expr))
3319 return -1;
3320 else if (!CASE_LOW (ci2->expr))
3321 return 1;
3322 else
3323 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3324 CASE_LOW (ci2->expr));
3326 else
3327 return 1;
3330 /* Determine whether the outgoing edges of BB should receive an
3331 ASSERT_EXPR for each of the operands of BB's LAST statement.
3332 The last statement of BB must be a SWITCH_EXPR.
3334 If any of the sub-graphs rooted at BB have an interesting use of
3335 the predicate operands, an assert location node is added to the
3336 list of assertions for the corresponding operands. */
3338 static void
3339 find_switch_asserts (basic_block bb, gswitch *last)
3341 gimple_stmt_iterator bsi;
3342 tree op;
3343 edge e;
3344 struct case_info *ci;
3345 size_t n = gimple_switch_num_labels (last);
3346 #if GCC_VERSION >= 4000
3347 unsigned int idx;
3348 #else
3349 /* Work around GCC 3.4 bug (PR 37086). */
3350 volatile unsigned int idx;
3351 #endif
3353 bsi = gsi_for_stmt (last);
3354 op = gimple_switch_index (last);
3355 if (TREE_CODE (op) != SSA_NAME)
3356 return;
3358 /* Build a vector of case labels sorted by destination label. */
3359 ci = XNEWVEC (struct case_info, n);
3360 for (idx = 0; idx < n; ++idx)
3362 ci[idx].expr = gimple_switch_label (last, idx);
3363 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3365 edge default_edge = find_edge (bb, ci[0].bb);
3366 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3368 for (idx = 0; idx < n; ++idx)
3370 tree min, max;
3371 tree cl = ci[idx].expr;
3372 basic_block cbb = ci[idx].bb;
3374 min = CASE_LOW (cl);
3375 max = CASE_HIGH (cl);
3377 /* If there are multiple case labels with the same destination
3378 we need to combine them to a single value range for the edge. */
3379 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3381 /* Skip labels until the last of the group. */
3382 do {
3383 ++idx;
3384 } while (idx < n && cbb == ci[idx].bb);
3385 --idx;
3387 /* Pick up the maximum of the case label range. */
3388 if (CASE_HIGH (ci[idx].expr))
3389 max = CASE_HIGH (ci[idx].expr);
3390 else
3391 max = CASE_LOW (ci[idx].expr);
3394 /* Can't extract a useful assertion out of a range that includes the
3395 default label. */
3396 if (min == NULL_TREE)
3397 continue;
3399 /* Find the edge to register the assert expr on. */
3400 e = find_edge (bb, cbb);
3402 /* Register the necessary assertions for the operand in the
3403 SWITCH_EXPR. */
3404 auto_vec<assert_info, 8> asserts;
3405 register_edge_assert_for (op, e,
3406 max ? GE_EXPR : EQ_EXPR,
3407 op, fold_convert (TREE_TYPE (op), min),
3408 asserts);
3409 if (max)
3410 register_edge_assert_for (op, e, LE_EXPR, op,
3411 fold_convert (TREE_TYPE (op), max),
3412 asserts);
3413 finish_register_edge_assert_for (e, bsi, asserts);
3416 XDELETEVEC (ci);
3418 if (!live_on_edge (default_edge, op))
3419 return;
3421 /* Now register along the default label assertions that correspond to the
3422 anti-range of each label. */
3423 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3424 if (insertion_limit == 0)
3425 return;
3427 /* We can't do this if the default case shares a label with another case. */
3428 tree default_cl = gimple_switch_default_label (last);
3429 for (idx = 1; idx < n; idx++)
3431 tree min, max;
3432 tree cl = gimple_switch_label (last, idx);
3433 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3434 continue;
3436 min = CASE_LOW (cl);
3437 max = CASE_HIGH (cl);
3439 /* Combine contiguous case ranges to reduce the number of assertions
3440 to insert. */
3441 for (idx = idx + 1; idx < n; idx++)
3443 tree next_min, next_max;
3444 tree next_cl = gimple_switch_label (last, idx);
3445 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3446 break;
3448 next_min = CASE_LOW (next_cl);
3449 next_max = CASE_HIGH (next_cl);
3451 wide_int difference = (wi::to_wide (next_min)
3452 - wi::to_wide (max ? max : min));
3453 if (wi::eq_p (difference, 1))
3454 max = next_max ? next_max : next_min;
3455 else
3456 break;
3458 idx--;
3460 if (max == NULL_TREE)
3462 /* Register the assertion OP != MIN. */
3463 auto_vec<assert_info, 8> asserts;
3464 min = fold_convert (TREE_TYPE (op), min);
3465 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3466 asserts);
3467 finish_register_edge_assert_for (default_edge, bsi, asserts);
3469 else
3471 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3472 which will give OP the anti-range ~[MIN,MAX]. */
3473 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3474 min = fold_convert (TREE_TYPE (uop), min);
3475 max = fold_convert (TREE_TYPE (uop), max);
3477 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3478 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3479 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3480 NULL, default_edge, bsi);
3483 if (--insertion_limit == 0)
3484 break;
3489 /* Traverse all the statements in block BB looking for statements that
3490 may generate useful assertions for the SSA names in their operand.
3491 If a statement produces a useful assertion A for name N_i, then the
3492 list of assertions already generated for N_i is scanned to
3493 determine if A is actually needed.
3495 If N_i already had the assertion A at a location dominating the
3496 current location, then nothing needs to be done. Otherwise, the
3497 new location for A is recorded instead.
3499 1- For every statement S in BB, all the variables used by S are
3500 added to bitmap FOUND_IN_SUBGRAPH.
3502 2- If statement S uses an operand N in a way that exposes a known
3503 value range for N, then if N was not already generated by an
3504 ASSERT_EXPR, create a new assert location for N. For instance,
3505 if N is a pointer and the statement dereferences it, we can
3506 assume that N is not NULL.
3508 3- COND_EXPRs are a special case of #2. We can derive range
3509 information from the predicate but need to insert different
3510 ASSERT_EXPRs for each of the sub-graphs rooted at the
3511 conditional block. If the last statement of BB is a conditional
3512 expression of the form 'X op Y', then
3514 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3516 b) If the conditional is the only entry point to the sub-graph
3517 corresponding to the THEN_CLAUSE, recurse into it. On
3518 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3519 an ASSERT_EXPR is added for the corresponding variable.
3521 c) Repeat step (b) on the ELSE_CLAUSE.
3523 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3525 For instance,
3527 if (a == 9)
3528 b = a;
3529 else
3530 b = c + 1;
3532 In this case, an assertion on the THEN clause is useful to
3533 determine that 'a' is always 9 on that edge. However, an assertion
3534 on the ELSE clause would be unnecessary.
3536 4- If BB does not end in a conditional expression, then we recurse
3537 into BB's dominator children.
3539 At the end of the recursive traversal, every SSA name will have a
3540 list of locations where ASSERT_EXPRs should be added. When a new
3541 location for name N is found, it is registered by calling
3542 register_new_assert_for. That function keeps track of all the
3543 registered assertions to prevent adding unnecessary assertions.
3544 For instance, if a pointer P_4 is dereferenced more than once in a
3545 dominator tree, only the location dominating all the dereference of
3546 P_4 will receive an ASSERT_EXPR. */
3548 static void
3549 find_assert_locations_1 (basic_block bb, sbitmap live)
3551 gimple *last;
3553 last = last_stmt (bb);
3555 /* If BB's last statement is a conditional statement involving integer
3556 operands, determine if we need to add ASSERT_EXPRs. */
3557 if (last
3558 && gimple_code (last) == GIMPLE_COND
3559 && !fp_predicate (last)
3560 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3561 find_conditional_asserts (bb, as_a <gcond *> (last));
3563 /* If BB's last statement is a switch statement involving integer
3564 operands, determine if we need to add ASSERT_EXPRs. */
3565 if (last
3566 && gimple_code (last) == GIMPLE_SWITCH
3567 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3568 find_switch_asserts (bb, as_a <gswitch *> (last));
3570 /* Traverse all the statements in BB marking used names and looking
3571 for statements that may infer assertions for their used operands. */
3572 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3573 gsi_prev (&si))
3575 gimple *stmt;
3576 tree op;
3577 ssa_op_iter i;
3579 stmt = gsi_stmt (si);
3581 if (is_gimple_debug (stmt))
3582 continue;
3584 /* See if we can derive an assertion for any of STMT's operands. */
3585 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3587 tree value;
3588 enum tree_code comp_code;
3590 /* If op is not live beyond this stmt, do not bother to insert
3591 asserts for it. */
3592 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3593 continue;
3595 /* If OP is used in such a way that we can infer a value
3596 range for it, and we don't find a previous assertion for
3597 it, create a new assertion location node for OP. */
3598 if (infer_value_range (stmt, op, &comp_code, &value))
3600 /* If we are able to infer a nonzero value range for OP,
3601 then walk backwards through the use-def chain to see if OP
3602 was set via a typecast.
3604 If so, then we can also infer a nonzero value range
3605 for the operand of the NOP_EXPR. */
3606 if (comp_code == NE_EXPR && integer_zerop (value))
3608 tree t = op;
3609 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3611 while (is_gimple_assign (def_stmt)
3612 && CONVERT_EXPR_CODE_P
3613 (gimple_assign_rhs_code (def_stmt))
3614 && TREE_CODE
3615 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3616 && POINTER_TYPE_P
3617 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3619 t = gimple_assign_rhs1 (def_stmt);
3620 def_stmt = SSA_NAME_DEF_STMT (t);
3622 /* Note we want to register the assert for the
3623 operand of the NOP_EXPR after SI, not after the
3624 conversion. */
3625 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3626 register_new_assert_for (t, t, comp_code, value,
3627 bb, NULL, si);
3631 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3635 /* Update live. */
3636 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3637 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3638 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3639 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3642 /* Traverse all PHI nodes in BB, updating live. */
3643 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3644 gsi_next (&si))
3646 use_operand_p arg_p;
3647 ssa_op_iter i;
3648 gphi *phi = si.phi ();
3649 tree res = gimple_phi_result (phi);
3651 if (virtual_operand_p (res))
3652 continue;
3654 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3656 tree arg = USE_FROM_PTR (arg_p);
3657 if (TREE_CODE (arg) == SSA_NAME)
3658 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3661 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3665 /* Do an RPO walk over the function computing SSA name liveness
3666 on-the-fly and deciding on assert expressions to insert. */
3668 static void
3669 find_assert_locations (void)
3671 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3672 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3673 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3674 int rpo_cnt, i;
3676 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3677 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3678 for (i = 0; i < rpo_cnt; ++i)
3679 bb_rpo[rpo[i]] = i;
3681 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3682 the order we compute liveness and insert asserts we otherwise
3683 fail to insert asserts into the loop latch. */
3684 loop_p loop;
3685 FOR_EACH_LOOP (loop, 0)
3687 i = loop->latch->index;
3688 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3689 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3690 !gsi_end_p (gsi); gsi_next (&gsi))
3692 gphi *phi = gsi.phi ();
3693 if (virtual_operand_p (gimple_phi_result (phi)))
3694 continue;
3695 tree arg = gimple_phi_arg_def (phi, j);
3696 if (TREE_CODE (arg) == SSA_NAME)
3698 if (live[i] == NULL)
3700 live[i] = sbitmap_alloc (num_ssa_names);
3701 bitmap_clear (live[i]);
3703 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3708 for (i = rpo_cnt - 1; i >= 0; --i)
3710 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3711 edge e;
3712 edge_iterator ei;
3714 if (!live[rpo[i]])
3716 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3717 bitmap_clear (live[rpo[i]]);
3720 /* Process BB and update the live information with uses in
3721 this block. */
3722 find_assert_locations_1 (bb, live[rpo[i]]);
3724 /* Merge liveness into the predecessor blocks and free it. */
3725 if (!bitmap_empty_p (live[rpo[i]]))
3727 int pred_rpo = i;
3728 FOR_EACH_EDGE (e, ei, bb->preds)
3730 int pred = e->src->index;
3731 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3732 continue;
3734 if (!live[pred])
3736 live[pred] = sbitmap_alloc (num_ssa_names);
3737 bitmap_clear (live[pred]);
3739 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3741 if (bb_rpo[pred] < pred_rpo)
3742 pred_rpo = bb_rpo[pred];
3745 /* Record the RPO number of the last visited block that needs
3746 live information from this block. */
3747 last_rpo[rpo[i]] = pred_rpo;
3749 else
3751 sbitmap_free (live[rpo[i]]);
3752 live[rpo[i]] = NULL;
3755 /* We can free all successors live bitmaps if all their
3756 predecessors have been visited already. */
3757 FOR_EACH_EDGE (e, ei, bb->succs)
3758 if (last_rpo[e->dest->index] == i
3759 && live[e->dest->index])
3761 sbitmap_free (live[e->dest->index]);
3762 live[e->dest->index] = NULL;
3766 XDELETEVEC (rpo);
3767 XDELETEVEC (bb_rpo);
3768 XDELETEVEC (last_rpo);
3769 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3770 if (live[i])
3771 sbitmap_free (live[i]);
3772 XDELETEVEC (live);
3775 /* Create an ASSERT_EXPR for NAME and insert it in the location
3776 indicated by LOC. Return true if we made any edge insertions. */
3778 static bool
3779 process_assert_insertions_for (tree name, assert_locus *loc)
3781 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3782 gimple *stmt;
3783 tree cond;
3784 gimple *assert_stmt;
3785 edge_iterator ei;
3786 edge e;
3788 /* If we have X <=> X do not insert an assert expr for that. */
3789 if (loc->expr == loc->val)
3790 return false;
3792 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3793 assert_stmt = build_assert_expr_for (cond, name);
3794 if (loc->e)
3796 /* We have been asked to insert the assertion on an edge. This
3797 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3798 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3799 || (gimple_code (gsi_stmt (loc->si))
3800 == GIMPLE_SWITCH));
3802 gsi_insert_on_edge (loc->e, assert_stmt);
3803 return true;
3806 /* If the stmt iterator points at the end then this is an insertion
3807 at the beginning of a block. */
3808 if (gsi_end_p (loc->si))
3810 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3811 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3812 return false;
3815 /* Otherwise, we can insert right after LOC->SI iff the
3816 statement must not be the last statement in the block. */
3817 stmt = gsi_stmt (loc->si);
3818 if (!stmt_ends_bb_p (stmt))
3820 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3821 return false;
3824 /* If STMT must be the last statement in BB, we can only insert new
3825 assertions on the non-abnormal edge out of BB. Note that since
3826 STMT is not control flow, there may only be one non-abnormal/eh edge
3827 out of BB. */
3828 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3829 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3831 gsi_insert_on_edge (e, assert_stmt);
3832 return true;
3835 gcc_unreachable ();
3838 /* Qsort helper for sorting assert locations. If stable is true, don't
3839 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3840 on the other side some pointers might be NULL. */
3842 template <bool stable>
3843 static int
3844 compare_assert_loc (const void *pa, const void *pb)
3846 assert_locus * const a = *(assert_locus * const *)pa;
3847 assert_locus * const b = *(assert_locus * const *)pb;
3849 /* If stable, some asserts might be optimized away already, sort
3850 them last. */
3851 if (stable)
3853 if (a == NULL)
3854 return b != NULL;
3855 else if (b == NULL)
3856 return -1;
3859 if (a->e == NULL && b->e != NULL)
3860 return 1;
3861 else if (a->e != NULL && b->e == NULL)
3862 return -1;
3864 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3865 no need to test both a->e and b->e. */
3867 /* Sort after destination index. */
3868 if (a->e == NULL)
3870 else if (a->e->dest->index > b->e->dest->index)
3871 return 1;
3872 else if (a->e->dest->index < b->e->dest->index)
3873 return -1;
3875 /* Sort after comp_code. */
3876 if (a->comp_code > b->comp_code)
3877 return 1;
3878 else if (a->comp_code < b->comp_code)
3879 return -1;
3881 hashval_t ha, hb;
3883 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3884 uses DECL_UID of the VAR_DECL, so sorting might differ between
3885 -g and -g0. When doing the removal of redundant assert exprs
3886 and commonization to successors, this does not matter, but for
3887 the final sort needs to be stable. */
3888 if (stable)
3890 ha = 0;
3891 hb = 0;
3893 else
3895 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3896 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3899 /* Break the tie using hashing and source/bb index. */
3900 if (ha == hb)
3901 return (a->e != NULL
3902 ? a->e->src->index - b->e->src->index
3903 : a->bb->index - b->bb->index);
3904 return ha > hb ? 1 : -1;
3907 /* Process all the insertions registered for every name N_i registered
3908 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3909 found in ASSERTS_FOR[i]. */
3911 static void
3912 process_assert_insertions (void)
3914 unsigned i;
3915 bitmap_iterator bi;
3916 bool update_edges_p = false;
3917 int num_asserts = 0;
3919 if (dump_file && (dump_flags & TDF_DETAILS))
3920 dump_all_asserts (dump_file);
3922 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3924 assert_locus *loc = asserts_for[i];
3925 gcc_assert (loc);
3927 auto_vec<assert_locus *, 16> asserts;
3928 for (; loc; loc = loc->next)
3929 asserts.safe_push (loc);
3930 asserts.qsort (compare_assert_loc<false>);
3932 /* Push down common asserts to successors and remove redundant ones. */
3933 unsigned ecnt = 0;
3934 assert_locus *common = NULL;
3935 unsigned commonj = 0;
3936 for (unsigned j = 0; j < asserts.length (); ++j)
3938 loc = asserts[j];
3939 if (! loc->e)
3940 common = NULL;
3941 else if (! common
3942 || loc->e->dest != common->e->dest
3943 || loc->comp_code != common->comp_code
3944 || ! operand_equal_p (loc->val, common->val, 0)
3945 || ! operand_equal_p (loc->expr, common->expr, 0))
3947 commonj = j;
3948 common = loc;
3949 ecnt = 1;
3951 else if (loc->e == asserts[j-1]->e)
3953 /* Remove duplicate asserts. */
3954 if (commonj == j - 1)
3956 commonj = j;
3957 common = loc;
3959 free (asserts[j-1]);
3960 asserts[j-1] = NULL;
3962 else
3964 ecnt++;
3965 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3967 /* We have the same assertion on all incoming edges of a BB.
3968 Insert it at the beginning of that block. */
3969 loc->bb = loc->e->dest;
3970 loc->e = NULL;
3971 loc->si = gsi_none ();
3972 common = NULL;
3973 /* Clear asserts commoned. */
3974 for (; commonj != j; ++commonj)
3975 if (asserts[commonj])
3977 free (asserts[commonj]);
3978 asserts[commonj] = NULL;
3984 /* The asserts vector sorting above might be unstable for
3985 -fcompare-debug, sort again to ensure a stable sort. */
3986 asserts.qsort (compare_assert_loc<true>);
3987 for (unsigned j = 0; j < asserts.length (); ++j)
3989 loc = asserts[j];
3990 if (! loc)
3991 break;
3992 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3993 num_asserts++;
3994 free (loc);
3998 if (update_edges_p)
3999 gsi_commit_edge_inserts ();
4001 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4002 num_asserts);
4006 /* Traverse the flowgraph looking for conditional jumps to insert range
4007 expressions. These range expressions are meant to provide information
4008 to optimizations that need to reason in terms of value ranges. They
4009 will not be expanded into RTL. For instance, given:
4011 x = ...
4012 y = ...
4013 if (x < y)
4014 y = x - 2;
4015 else
4016 x = y + 3;
4018 this pass will transform the code into:
4020 x = ...
4021 y = ...
4022 if (x < y)
4024 x = ASSERT_EXPR <x, x < y>
4025 y = x - 2
4027 else
4029 y = ASSERT_EXPR <y, x >= y>
4030 x = y + 3
4033 The idea is that once copy and constant propagation have run, other
4034 optimizations will be able to determine what ranges of values can 'x'
4035 take in different paths of the code, simply by checking the reaching
4036 definition of 'x'. */
4038 static void
4039 insert_range_assertions (void)
4041 need_assert_for = BITMAP_ALLOC (NULL);
4042 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4044 calculate_dominance_info (CDI_DOMINATORS);
4046 find_assert_locations ();
4047 if (!bitmap_empty_p (need_assert_for))
4049 process_assert_insertions ();
4050 update_ssa (TODO_update_ssa_no_phi);
4053 if (dump_file && (dump_flags & TDF_DETAILS))
4055 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4056 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4059 free (asserts_for);
4060 BITMAP_FREE (need_assert_for);
4063 class vrp_prop : public ssa_propagation_engine
4065 public:
4066 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4067 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4069 void vrp_initialize (void);
4070 void vrp_finalize (bool);
4071 void check_all_array_refs (void);
4072 bool check_array_ref (location_t, tree, bool);
4073 bool check_mem_ref (location_t, tree, bool);
4074 void search_for_addr_array (tree, location_t);
4076 class vr_values vr_values;
4077 /* Temporary delegator to minimize code churn. */
4078 const value_range *get_value_range (const_tree op)
4079 { return vr_values.get_value_range (op); }
4080 void set_def_to_varying (const_tree def)
4081 { vr_values.set_def_to_varying (def); }
4082 void set_defs_to_varying (gimple *stmt)
4083 { vr_values.set_defs_to_varying (stmt); }
4084 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4085 tree *output_p, value_range *vr)
4086 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4087 bool update_value_range (const_tree op, value_range *vr)
4088 { return vr_values.update_value_range (op, vr); }
4089 void extract_range_basic (value_range *vr, gimple *stmt)
4090 { vr_values.extract_range_basic (vr, stmt); }
4091 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4092 { vr_values.extract_range_from_phi_node (phi, vr); }
4094 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4095 and "struct" hacks. If VRP can determine that the
4096 array subscript is a constant, check if it is outside valid
4097 range. If the array subscript is a RANGE, warn if it is
4098 non-overlapping with valid range.
4099 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.
4100 Returns true if a warning has been issued. */
4102 bool
4103 vrp_prop::check_array_ref (location_t location, tree ref,
4104 bool ignore_off_by_one)
4106 const value_range *vr = NULL;
4107 tree low_sub, up_sub;
4108 tree low_bound, up_bound, up_bound_p1;
4110 if (TREE_NO_WARNING (ref))
4111 return false;
4113 low_sub = up_sub = TREE_OPERAND (ref, 1);
4114 up_bound = array_ref_up_bound (ref);
4116 if (!up_bound
4117 || TREE_CODE (up_bound) != INTEGER_CST
4118 || (warn_array_bounds < 2
4119 && array_at_struct_end_p (ref)))
4121 /* Accesses to trailing arrays via pointers may access storage
4122 beyond the types array bounds. For such arrays, or for flexible
4123 array members, as well as for other arrays of an unknown size,
4124 replace the upper bound with a more permissive one that assumes
4125 the size of the largest object is PTRDIFF_MAX. */
4126 tree eltsize = array_ref_element_size (ref);
4128 if (TREE_CODE (eltsize) != INTEGER_CST
4129 || integer_zerop (eltsize))
4131 up_bound = NULL_TREE;
4132 up_bound_p1 = NULL_TREE;
4134 else
4136 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4137 tree arg = TREE_OPERAND (ref, 0);
4138 poly_int64 off;
4140 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4141 maxbound = wide_int_to_tree (sizetype,
4142 wi::sub (wi::to_wide (maxbound),
4143 off));
4144 else
4145 maxbound = fold_convert (sizetype, maxbound);
4147 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4149 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4150 build_int_cst (ptrdiff_type_node, 1));
4153 else
4154 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4155 build_int_cst (TREE_TYPE (up_bound), 1));
4157 low_bound = array_ref_low_bound (ref);
4159 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4161 bool warned = false;
4163 /* Empty array. */
4164 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4165 warned = warning_at (location, OPT_Warray_bounds,
4166 "array subscript %E is above array bounds of %qT",
4167 low_bound, artype);
4169 if (TREE_CODE (low_sub) == SSA_NAME)
4171 vr = get_value_range (low_sub);
4172 if (!vr->undefined_p () && !vr->varying_p ())
4174 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4175 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4179 if (vr && vr->kind () == VR_ANTI_RANGE)
4181 if (up_bound
4182 && TREE_CODE (up_sub) == INTEGER_CST
4183 && (ignore_off_by_one
4184 ? tree_int_cst_lt (up_bound, up_sub)
4185 : tree_int_cst_le (up_bound, up_sub))
4186 && TREE_CODE (low_sub) == INTEGER_CST
4187 && tree_int_cst_le (low_sub, low_bound))
4188 warned = warning_at (location, OPT_Warray_bounds,
4189 "array subscript [%E, %E] is outside "
4190 "array bounds of %qT",
4191 low_sub, up_sub, artype);
4193 else if (up_bound
4194 && TREE_CODE (up_sub) == INTEGER_CST
4195 && (ignore_off_by_one
4196 ? !tree_int_cst_le (up_sub, up_bound_p1)
4197 : !tree_int_cst_le (up_sub, up_bound)))
4199 if (dump_file && (dump_flags & TDF_DETAILS))
4201 fprintf (dump_file, "Array bound warning for ");
4202 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4203 fprintf (dump_file, "\n");
4205 warned = warning_at (location, OPT_Warray_bounds,
4206 "array subscript %E is above array bounds of %qT",
4207 up_sub, artype);
4209 else if (TREE_CODE (low_sub) == INTEGER_CST
4210 && tree_int_cst_lt (low_sub, low_bound))
4212 if (dump_file && (dump_flags & TDF_DETAILS))
4214 fprintf (dump_file, "Array bound warning for ");
4215 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4216 fprintf (dump_file, "\n");
4218 warned = warning_at (location, OPT_Warray_bounds,
4219 "array subscript %E is below array bounds of %qT",
4220 low_sub, artype);
4223 if (warned)
4225 ref = TREE_OPERAND (ref, 0);
4226 if (TREE_CODE (ref) == COMPONENT_REF)
4227 ref = TREE_OPERAND (ref, 1);
4229 if (DECL_P (ref))
4230 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4232 TREE_NO_WARNING (ref) = 1;
4235 return warned;
4238 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4239 references to string constants. If VRP can determine that the array
4240 subscript is a constant, check if it is outside valid range.
4241 If the array subscript is a RANGE, warn if it is non-overlapping
4242 with valid range.
4243 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4244 (used to allow one-past-the-end indices for code that takes
4245 the address of the just-past-the-end element of an array).
4246 Returns true if a warning has been issued. */
4248 bool
4249 vrp_prop::check_mem_ref (location_t location, tree ref,
4250 bool ignore_off_by_one)
4252 if (TREE_NO_WARNING (ref))
4253 return false;
4255 tree arg = TREE_OPERAND (ref, 0);
4256 /* The constant and variable offset of the reference. */
4257 tree cstoff = TREE_OPERAND (ref, 1);
4258 tree varoff = NULL_TREE;
4260 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4262 /* The array or string constant bounds in bytes. Initially set
4263 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4264 determined. */
4265 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4267 /* The minimum and maximum intermediate offset. For a reference
4268 to be valid, not only does the final offset/subscript must be
4269 in bounds but all intermediate offsets should be as well.
4270 GCC may be able to deal gracefully with such out-of-bounds
4271 offsets so the checking is only enbaled at -Warray-bounds=2
4272 where it may help detect bugs in uses of the intermediate
4273 offsets that could otherwise not be detectable. */
4274 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4275 offset_int extrema[2] = { 0, wi::abs (ioff) };
4277 /* The range of the byte offset into the reference. */
4278 offset_int offrange[2] = { 0, 0 };
4280 const value_range *vr = NULL;
4282 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4283 The loop computes the range of the final offset for expressions such
4284 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4285 some range. */
4286 const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
4287 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
4289 gimple *def = SSA_NAME_DEF_STMT (arg);
4290 if (!is_gimple_assign (def))
4291 break;
4293 tree_code code = gimple_assign_rhs_code (def);
4294 if (code == POINTER_PLUS_EXPR)
4296 arg = gimple_assign_rhs1 (def);
4297 varoff = gimple_assign_rhs2 (def);
4299 else if (code == ASSERT_EXPR)
4301 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4302 continue;
4304 else
4305 return false;
4307 /* VAROFF should always be a SSA_NAME here (and not even
4308 INTEGER_CST) but there's no point in taking chances. */
4309 if (TREE_CODE (varoff) != SSA_NAME)
4310 break;
4312 vr = get_value_range (varoff);
4313 if (!vr || vr->undefined_p () || vr->varying_p ())
4314 break;
4316 if (!vr->constant_p ())
4317 break;
4319 if (vr->kind () == VR_RANGE)
4321 offset_int min
4322 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4323 offset_int max
4324 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4325 if (min < max)
4327 offrange[0] += min;
4328 offrange[1] += max;
4330 else
4332 /* When MIN >= MAX, the offset is effectively in a union
4333 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4334 Since there is no way to represent such a range across
4335 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4336 to OFFRANGE. */
4337 offrange[0] += arrbounds[0];
4338 offrange[1] += arrbounds[1];
4341 else
4343 /* For an anti-range, analogously to the above, conservatively
4344 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4345 offrange[0] += arrbounds[0];
4346 offrange[1] += arrbounds[1];
4349 /* Keep track of the minimum and maximum offset. */
4350 if (offrange[1] < 0 && offrange[1] < extrema[0])
4351 extrema[0] = offrange[1];
4352 if (offrange[0] > 0 && offrange[0] > extrema[1])
4353 extrema[1] = offrange[0];
4355 if (offrange[0] < arrbounds[0])
4356 offrange[0] = arrbounds[0];
4358 if (offrange[1] > arrbounds[1])
4359 offrange[1] = arrbounds[1];
4362 if (TREE_CODE (arg) == ADDR_EXPR)
4364 arg = TREE_OPERAND (arg, 0);
4365 if (TREE_CODE (arg) != STRING_CST
4366 && TREE_CODE (arg) != VAR_DECL)
4367 return false;
4369 else
4370 return false;
4372 /* The type of the object being referred to. It can be an array,
4373 string literal, or a non-array type when the MEM_REF represents
4374 a reference/subscript via a pointer to an object that is not
4375 an element of an array. References to members of structs and
4376 unions are excluded because MEM_REF doesn't make it possible
4377 to identify the member where the reference originated.
4378 Incomplete types are excluded as well because their size is
4379 not known. */
4380 tree reftype = TREE_TYPE (arg);
4381 if (POINTER_TYPE_P (reftype)
4382 || !COMPLETE_TYPE_P (reftype)
4383 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4384 || RECORD_OR_UNION_TYPE_P (reftype))
4385 return false;
4387 arrbounds[0] = 0;
4389 offset_int eltsize;
4390 if (TREE_CODE (reftype) == ARRAY_TYPE)
4392 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4393 if (tree dom = TYPE_DOMAIN (reftype))
4395 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4396 if (array_at_struct_end_p (arg) || !bnds[0] || !bnds[1])
4397 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4398 else
4399 arrbounds[1] = (wi::to_offset (bnds[1]) - wi::to_offset (bnds[0])
4400 + 1) * eltsize;
4402 else
4403 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4405 if (TREE_CODE (ref) == MEM_REF)
4407 /* For MEM_REF determine a tighter bound of the non-array
4408 element type. */
4409 tree eltype = TREE_TYPE (reftype);
4410 while (TREE_CODE (eltype) == ARRAY_TYPE)
4411 eltype = TREE_TYPE (eltype);
4412 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4415 else
4417 eltsize = 1;
4418 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4421 offrange[0] += ioff;
4422 offrange[1] += ioff;
4424 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4425 is set (when taking the address of the one-past-last element
4426 of an array) but always use the stricter bound in diagnostics. */
4427 offset_int ubound = arrbounds[1];
4428 if (ignore_off_by_one)
4429 ubound += 1;
4431 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4433 /* Treat a reference to a non-array object as one to an array
4434 of a single element. */
4435 if (TREE_CODE (reftype) != ARRAY_TYPE)
4436 reftype = build_array_type_nelts (reftype, 1);
4438 if (TREE_CODE (ref) == MEM_REF)
4440 /* Extract the element type out of MEM_REF and use its size
4441 to compute the index to print in the diagnostic; arrays
4442 in MEM_REF don't mean anything. A type with no size like
4443 void is as good as having a size of 1. */
4444 tree type = TREE_TYPE (ref);
4445 while (TREE_CODE (type) == ARRAY_TYPE)
4446 type = TREE_TYPE (type);
4447 if (tree size = TYPE_SIZE_UNIT (type))
4449 offrange[0] = offrange[0] / wi::to_offset (size);
4450 offrange[1] = offrange[1] / wi::to_offset (size);
4453 else
4455 /* For anything other than MEM_REF, compute the index to
4456 print in the diagnostic as the offset over element size. */
4457 offrange[0] = offrange[0] / eltsize;
4458 offrange[1] = offrange[1] / eltsize;
4461 bool warned;
4462 if (offrange[0] == offrange[1])
4463 warned = warning_at (location, OPT_Warray_bounds,
4464 "array subscript %wi is outside array bounds "
4465 "of %qT",
4466 offrange[0].to_shwi (), reftype);
4467 else
4468 warned = warning_at (location, OPT_Warray_bounds,
4469 "array subscript [%wi, %wi] is outside "
4470 "array bounds of %qT",
4471 offrange[0].to_shwi (),
4472 offrange[1].to_shwi (), reftype);
4473 if (warned && DECL_P (arg))
4474 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4476 if (warned)
4477 TREE_NO_WARNING (ref) = 1;
4478 return warned;
4481 if (warn_array_bounds < 2)
4482 return false;
4484 /* At level 2 check also intermediate offsets. */
4485 int i = 0;
4486 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4488 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4490 if (warning_at (location, OPT_Warray_bounds,
4491 "intermediate array offset %wi is outside array bounds "
4492 "of %qT", tmpidx, reftype))
4494 TREE_NO_WARNING (ref) = 1;
4495 return true;
4499 return false;
4502 /* Searches if the expr T, located at LOCATION computes
4503 address of an ARRAY_REF, and call check_array_ref on it. */
4505 void
4506 vrp_prop::search_for_addr_array (tree t, location_t location)
4508 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4511 bool warned = false;
4512 if (TREE_CODE (t) == ARRAY_REF)
4513 warned = check_array_ref (location, t, true /*ignore_off_by_one*/);
4514 else if (TREE_CODE (t) == MEM_REF)
4515 warned = check_mem_ref (location, t, true /*ignore_off_by_one*/);
4517 if (warned)
4518 TREE_NO_WARNING (t) = true;
4520 t = TREE_OPERAND (t, 0);
4522 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4524 if (TREE_CODE (t) != MEM_REF
4525 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4526 || TREE_NO_WARNING (t))
4527 return;
4529 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4530 tree low_bound, up_bound, el_sz;
4531 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4532 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4533 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4534 return;
4536 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4537 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4538 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4539 if (!low_bound
4540 || TREE_CODE (low_bound) != INTEGER_CST
4541 || !up_bound
4542 || TREE_CODE (up_bound) != INTEGER_CST
4543 || !el_sz
4544 || TREE_CODE (el_sz) != INTEGER_CST)
4545 return;
4547 offset_int idx;
4548 if (!mem_ref_offset (t).is_constant (&idx))
4549 return;
4551 bool warned = false;
4552 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4553 if (idx < 0)
4555 if (dump_file && (dump_flags & TDF_DETAILS))
4557 fprintf (dump_file, "Array bound warning for ");
4558 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4559 fprintf (dump_file, "\n");
4561 warned = warning_at (location, OPT_Warray_bounds,
4562 "array subscript %wi is below "
4563 "array bounds of %qT",
4564 idx.to_shwi (), TREE_TYPE (tem));
4566 else if (idx > (wi::to_offset (up_bound)
4567 - wi::to_offset (low_bound) + 1))
4569 if (dump_file && (dump_flags & TDF_DETAILS))
4571 fprintf (dump_file, "Array bound warning for ");
4572 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4573 fprintf (dump_file, "\n");
4575 warned = warning_at (location, OPT_Warray_bounds,
4576 "array subscript %wu is above "
4577 "array bounds of %qT",
4578 idx.to_uhwi (), TREE_TYPE (tem));
4581 if (warned)
4583 if (DECL_P (t))
4584 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4586 TREE_NO_WARNING (t) = 1;
4590 /* walk_tree() callback that checks if *TP is
4591 an ARRAY_REF inside an ADDR_EXPR (in which an array
4592 subscript one outside the valid range is allowed). Call
4593 check_array_ref for each ARRAY_REF found. The location is
4594 passed in DATA. */
4596 static tree
4597 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4599 tree t = *tp;
4600 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4601 location_t location;
4603 if (EXPR_HAS_LOCATION (t))
4604 location = EXPR_LOCATION (t);
4605 else
4606 location = gimple_location (wi->stmt);
4608 *walk_subtree = TRUE;
4610 bool warned = false;
4611 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4612 if (TREE_CODE (t) == ARRAY_REF)
4613 warned = vrp_prop->check_array_ref (location, t, false/*ignore_off_by_one*/);
4614 else if (TREE_CODE (t) == MEM_REF)
4615 warned = vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4616 else if (TREE_CODE (t) == ADDR_EXPR)
4618 vrp_prop->search_for_addr_array (t, location);
4619 *walk_subtree = FALSE;
4621 /* Propagate the no-warning bit to the outer expression. */
4622 if (warned)
4623 TREE_NO_WARNING (t) = true;
4625 return NULL_TREE;
4628 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4629 to walk over all statements of all reachable BBs and call
4630 check_array_bounds on them. */
4632 class check_array_bounds_dom_walker : public dom_walker
4634 public:
4635 check_array_bounds_dom_walker (vrp_prop *prop)
4636 : dom_walker (CDI_DOMINATORS,
4637 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4638 flags, so that we can merge in information on
4639 non-executable edges from vrp_folder . */
4640 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4641 m_prop (prop) {}
4642 ~check_array_bounds_dom_walker () {}
4644 edge before_dom_children (basic_block) FINAL OVERRIDE;
4646 private:
4647 vrp_prop *m_prop;
4650 /* Implementation of dom_walker::before_dom_children.
4652 Walk over all statements of BB and call check_array_bounds on them,
4653 and determine if there's a unique successor edge. */
4655 edge
4656 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4658 gimple_stmt_iterator si;
4659 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4661 gimple *stmt = gsi_stmt (si);
4662 struct walk_stmt_info wi;
4663 if (!gimple_has_location (stmt)
4664 || is_gimple_debug (stmt))
4665 continue;
4667 memset (&wi, 0, sizeof (wi));
4669 wi.info = m_prop;
4671 walk_gimple_op (stmt, check_array_bounds, &wi);
4674 /* Determine if there's a unique successor edge, and if so, return
4675 that back to dom_walker, ensuring that we don't visit blocks that
4676 became unreachable during the VRP propagation
4677 (PR tree-optimization/83312). */
4678 return find_taken_edge (bb, NULL_TREE);
4681 /* Walk over all statements of all reachable BBs and call check_array_bounds
4682 on them. */
4684 void
4685 vrp_prop::check_all_array_refs ()
4687 check_array_bounds_dom_walker w (this);
4688 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4691 /* Return true if all imm uses of VAR are either in STMT, or
4692 feed (optionally through a chain of single imm uses) GIMPLE_COND
4693 in basic block COND_BB. */
4695 static bool
4696 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4698 use_operand_p use_p, use2_p;
4699 imm_use_iterator iter;
4701 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4702 if (USE_STMT (use_p) != stmt)
4704 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4705 if (is_gimple_debug (use_stmt))
4706 continue;
4707 while (is_gimple_assign (use_stmt)
4708 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4709 && single_imm_use (gimple_assign_lhs (use_stmt),
4710 &use2_p, &use_stmt2))
4711 use_stmt = use_stmt2;
4712 if (gimple_code (use_stmt) != GIMPLE_COND
4713 || gimple_bb (use_stmt) != cond_bb)
4714 return false;
4716 return true;
4719 /* Handle
4720 _4 = x_3 & 31;
4721 if (_4 != 0)
4722 goto <bb 6>;
4723 else
4724 goto <bb 7>;
4725 <bb 6>:
4726 __builtin_unreachable ();
4727 <bb 7>:
4728 x_5 = ASSERT_EXPR <x_3, ...>;
4729 If x_3 has no other immediate uses (checked by caller),
4730 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4731 from the non-zero bitmask. */
4733 void
4734 maybe_set_nonzero_bits (edge e, tree var)
4736 basic_block cond_bb = e->src;
4737 gimple *stmt = last_stmt (cond_bb);
4738 tree cst;
4740 if (stmt == NULL
4741 || gimple_code (stmt) != GIMPLE_COND
4742 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4743 ? EQ_EXPR : NE_EXPR)
4744 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4745 || !integer_zerop (gimple_cond_rhs (stmt)))
4746 return;
4748 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4749 if (!is_gimple_assign (stmt)
4750 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4751 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4752 return;
4753 if (gimple_assign_rhs1 (stmt) != var)
4755 gimple *stmt2;
4757 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4758 return;
4759 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4760 if (!gimple_assign_cast_p (stmt2)
4761 || gimple_assign_rhs1 (stmt2) != var
4762 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4763 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4764 != TYPE_PRECISION (TREE_TYPE (var))))
4765 return;
4767 cst = gimple_assign_rhs2 (stmt);
4768 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4769 wi::to_wide (cst)));
4772 /* Convert range assertion expressions into the implied copies and
4773 copy propagate away the copies. Doing the trivial copy propagation
4774 here avoids the need to run the full copy propagation pass after
4775 VRP.
4777 FIXME, this will eventually lead to copy propagation removing the
4778 names that had useful range information attached to them. For
4779 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4780 then N_i will have the range [3, +INF].
4782 However, by converting the assertion into the implied copy
4783 operation N_i = N_j, we will then copy-propagate N_j into the uses
4784 of N_i and lose the range information. We may want to hold on to
4785 ASSERT_EXPRs a little while longer as the ranges could be used in
4786 things like jump threading.
4788 The problem with keeping ASSERT_EXPRs around is that passes after
4789 VRP need to handle them appropriately.
4791 Another approach would be to make the range information a first
4792 class property of the SSA_NAME so that it can be queried from
4793 any pass. This is made somewhat more complex by the need for
4794 multiple ranges to be associated with one SSA_NAME. */
4796 static void
4797 remove_range_assertions (void)
4799 basic_block bb;
4800 gimple_stmt_iterator si;
4801 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4802 a basic block preceeded by GIMPLE_COND branching to it and
4803 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4804 int is_unreachable;
4806 /* Note that the BSI iterator bump happens at the bottom of the
4807 loop and no bump is necessary if we're removing the statement
4808 referenced by the current BSI. */
4809 FOR_EACH_BB_FN (bb, cfun)
4810 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4812 gimple *stmt = gsi_stmt (si);
4814 if (is_gimple_assign (stmt)
4815 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4817 tree lhs = gimple_assign_lhs (stmt);
4818 tree rhs = gimple_assign_rhs1 (stmt);
4819 tree var;
4821 var = ASSERT_EXPR_VAR (rhs);
4823 if (TREE_CODE (var) == SSA_NAME
4824 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4825 && SSA_NAME_RANGE_INFO (lhs))
4827 if (is_unreachable == -1)
4829 is_unreachable = 0;
4830 if (single_pred_p (bb)
4831 && assert_unreachable_fallthru_edge_p
4832 (single_pred_edge (bb)))
4833 is_unreachable = 1;
4835 /* Handle
4836 if (x_7 >= 10 && x_7 < 20)
4837 __builtin_unreachable ();
4838 x_8 = ASSERT_EXPR <x_7, ...>;
4839 if the only uses of x_7 are in the ASSERT_EXPR and
4840 in the condition. In that case, we can copy the
4841 range info from x_8 computed in this pass also
4842 for x_7. */
4843 if (is_unreachable
4844 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4845 single_pred (bb)))
4847 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4848 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4849 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4850 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4854 /* Propagate the RHS into every use of the LHS. For SSA names
4855 also propagate abnormals as it merely restores the original
4856 IL in this case (an replace_uses_by would assert). */
4857 if (TREE_CODE (var) == SSA_NAME)
4859 imm_use_iterator iter;
4860 use_operand_p use_p;
4861 gimple *use_stmt;
4862 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4863 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4864 SET_USE (use_p, var);
4866 else
4867 replace_uses_by (lhs, var);
4869 /* And finally, remove the copy, it is not needed. */
4870 gsi_remove (&si, true);
4871 release_defs (stmt);
4873 else
4875 if (!is_gimple_debug (gsi_stmt (si)))
4876 is_unreachable = 0;
4877 gsi_next (&si);
4882 /* Return true if STMT is interesting for VRP. */
4884 bool
4885 stmt_interesting_for_vrp (gimple *stmt)
4887 if (gimple_code (stmt) == GIMPLE_PHI)
4889 tree res = gimple_phi_result (stmt);
4890 return (!virtual_operand_p (res)
4891 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4892 || POINTER_TYPE_P (TREE_TYPE (res))));
4894 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4896 tree lhs = gimple_get_lhs (stmt);
4898 /* In general, assignments with virtual operands are not useful
4899 for deriving ranges, with the obvious exception of calls to
4900 builtin functions. */
4901 if (lhs && TREE_CODE (lhs) == SSA_NAME
4902 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4903 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4904 && (is_gimple_call (stmt)
4905 || !gimple_vuse (stmt)))
4906 return true;
4907 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4908 switch (gimple_call_internal_fn (stmt))
4910 case IFN_ADD_OVERFLOW:
4911 case IFN_SUB_OVERFLOW:
4912 case IFN_MUL_OVERFLOW:
4913 case IFN_ATOMIC_COMPARE_EXCHANGE:
4914 /* These internal calls return _Complex integer type,
4915 but are interesting to VRP nevertheless. */
4916 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4917 return true;
4918 break;
4919 default:
4920 break;
4923 else if (gimple_code (stmt) == GIMPLE_COND
4924 || gimple_code (stmt) == GIMPLE_SWITCH)
4925 return true;
4927 return false;
4930 /* Initialization required by ssa_propagate engine. */
4932 void
4933 vrp_prop::vrp_initialize ()
4935 basic_block bb;
4937 FOR_EACH_BB_FN (bb, cfun)
4939 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4940 gsi_next (&si))
4942 gphi *phi = si.phi ();
4943 if (!stmt_interesting_for_vrp (phi))
4945 tree lhs = PHI_RESULT (phi);
4946 set_def_to_varying (lhs);
4947 prop_set_simulate_again (phi, false);
4949 else
4950 prop_set_simulate_again (phi, true);
4953 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4954 gsi_next (&si))
4956 gimple *stmt = gsi_stmt (si);
4958 /* If the statement is a control insn, then we do not
4959 want to avoid simulating the statement once. Failure
4960 to do so means that those edges will never get added. */
4961 if (stmt_ends_bb_p (stmt))
4962 prop_set_simulate_again (stmt, true);
4963 else if (!stmt_interesting_for_vrp (stmt))
4965 set_defs_to_varying (stmt);
4966 prop_set_simulate_again (stmt, false);
4968 else
4969 prop_set_simulate_again (stmt, true);
4974 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
4975 that includes the value VAL. The search is restricted to the range
4976 [START_IDX, n - 1] where n is the size of VEC.
4978 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
4979 returned.
4981 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
4982 it is placed in IDX and false is returned.
4984 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
4985 returned. */
4987 bool
4988 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
4990 size_t n = gimple_switch_num_labels (stmt);
4991 size_t low, high;
4993 /* Find case label for minimum of the value range or the next one.
4994 At each iteration we are searching in [low, high - 1]. */
4996 for (low = start_idx, high = n; high != low; )
4998 tree t;
4999 int cmp;
5000 /* Note that i != high, so we never ask for n. */
5001 size_t i = (high + low) / 2;
5002 t = gimple_switch_label (stmt, i);
5004 /* Cache the result of comparing CASE_LOW and val. */
5005 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5007 if (cmp == 0)
5009 /* Ranges cannot be empty. */
5010 *idx = i;
5011 return true;
5013 else if (cmp > 0)
5014 high = i;
5015 else
5017 low = i + 1;
5018 if (CASE_HIGH (t) != NULL
5019 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5021 *idx = i;
5022 return true;
5027 *idx = high;
5028 return false;
5031 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5032 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5033 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5034 then MAX_IDX < MIN_IDX.
5035 Returns true if the default label is not needed. */
5037 bool
5038 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5039 size_t *max_idx)
5041 size_t i, j;
5042 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5043 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5045 if (i == j
5046 && min_take_default
5047 && max_take_default)
5049 /* Only the default case label reached.
5050 Return an empty range. */
5051 *min_idx = 1;
5052 *max_idx = 0;
5053 return false;
5055 else
5057 bool take_default = min_take_default || max_take_default;
5058 tree low, high;
5059 size_t k;
5061 if (max_take_default)
5062 j--;
5064 /* If the case label range is continuous, we do not need
5065 the default case label. Verify that. */
5066 high = CASE_LOW (gimple_switch_label (stmt, i));
5067 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5068 high = CASE_HIGH (gimple_switch_label (stmt, i));
5069 for (k = i + 1; k <= j; ++k)
5071 low = CASE_LOW (gimple_switch_label (stmt, k));
5072 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5074 take_default = true;
5075 break;
5077 high = low;
5078 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5079 high = CASE_HIGH (gimple_switch_label (stmt, k));
5082 *min_idx = i;
5083 *max_idx = j;
5084 return !take_default;
5088 /* Evaluate statement STMT. If the statement produces a useful range,
5089 return SSA_PROP_INTERESTING and record the SSA name with the
5090 interesting range into *OUTPUT_P.
5092 If STMT is a conditional branch and we can determine its truth
5093 value, the taken edge is recorded in *TAKEN_EDGE_P.
5095 If STMT produces a varying value, return SSA_PROP_VARYING. */
5097 enum ssa_prop_result
5098 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5100 tree lhs = gimple_get_lhs (stmt);
5101 value_range vr;
5102 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5104 if (*output_p)
5106 if (update_value_range (*output_p, &vr))
5108 if (dump_file && (dump_flags & TDF_DETAILS))
5110 fprintf (dump_file, "Found new range for ");
5111 print_generic_expr (dump_file, *output_p);
5112 fprintf (dump_file, ": ");
5113 dump_value_range (dump_file, &vr);
5114 fprintf (dump_file, "\n");
5117 if (vr.varying_p ())
5118 return SSA_PROP_VARYING;
5120 return SSA_PROP_INTERESTING;
5122 return SSA_PROP_NOT_INTERESTING;
5125 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5126 switch (gimple_call_internal_fn (stmt))
5128 case IFN_ADD_OVERFLOW:
5129 case IFN_SUB_OVERFLOW:
5130 case IFN_MUL_OVERFLOW:
5131 case IFN_ATOMIC_COMPARE_EXCHANGE:
5132 /* These internal calls return _Complex integer type,
5133 which VRP does not track, but the immediate uses
5134 thereof might be interesting. */
5135 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5137 imm_use_iterator iter;
5138 use_operand_p use_p;
5139 enum ssa_prop_result res = SSA_PROP_VARYING;
5141 set_def_to_varying (lhs);
5143 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5145 gimple *use_stmt = USE_STMT (use_p);
5146 if (!is_gimple_assign (use_stmt))
5147 continue;
5148 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5149 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5150 continue;
5151 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5152 tree use_lhs = gimple_assign_lhs (use_stmt);
5153 if (TREE_CODE (rhs1) != rhs_code
5154 || TREE_OPERAND (rhs1, 0) != lhs
5155 || TREE_CODE (use_lhs) != SSA_NAME
5156 || !stmt_interesting_for_vrp (use_stmt)
5157 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5158 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5159 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5160 continue;
5162 /* If there is a change in the value range for any of the
5163 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5164 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5165 or IMAGPART_EXPR immediate uses, but none of them have
5166 a change in their value ranges, return
5167 SSA_PROP_NOT_INTERESTING. If there are no
5168 {REAL,IMAG}PART_EXPR uses at all,
5169 return SSA_PROP_VARYING. */
5170 value_range new_vr;
5171 extract_range_basic (&new_vr, use_stmt);
5172 const value_range *old_vr = get_value_range (use_lhs);
5173 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5174 res = SSA_PROP_INTERESTING;
5175 else
5176 res = SSA_PROP_NOT_INTERESTING;
5177 new_vr.equiv_clear ();
5178 if (res == SSA_PROP_INTERESTING)
5180 *output_p = lhs;
5181 return res;
5185 return res;
5187 break;
5188 default:
5189 break;
5192 /* All other statements produce nothing of interest for VRP, so mark
5193 their outputs varying and prevent further simulation. */
5194 set_defs_to_varying (stmt);
5196 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5199 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5200 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5201 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5202 possible such range. The resulting range is not canonicalized. */
5204 static void
5205 union_ranges (enum value_range_kind *vr0type,
5206 tree *vr0min, tree *vr0max,
5207 enum value_range_kind vr1type,
5208 tree vr1min, tree vr1max)
5210 int cmpmin = compare_values (*vr0min, vr1min);
5211 int cmpmax = compare_values (*vr0max, vr1max);
5212 bool mineq = cmpmin == 0;
5213 bool maxeq = cmpmax == 0;
5215 /* [] is vr0, () is vr1 in the following classification comments. */
5216 if (mineq && maxeq)
5218 /* [( )] */
5219 if (*vr0type == vr1type)
5220 /* Nothing to do for equal ranges. */
5222 else if ((*vr0type == VR_RANGE
5223 && vr1type == VR_ANTI_RANGE)
5224 || (*vr0type == VR_ANTI_RANGE
5225 && vr1type == VR_RANGE))
5227 /* For anti-range with range union the result is varying. */
5228 goto give_up;
5230 else
5231 gcc_unreachable ();
5233 else if (operand_less_p (*vr0max, vr1min) == 1
5234 || operand_less_p (vr1max, *vr0min) == 1)
5236 /* [ ] ( ) or ( ) [ ]
5237 If the ranges have an empty intersection, result of the union
5238 operation is the anti-range or if both are anti-ranges
5239 it covers all. */
5240 if (*vr0type == VR_ANTI_RANGE
5241 && vr1type == VR_ANTI_RANGE)
5242 goto give_up;
5243 else if (*vr0type == VR_ANTI_RANGE
5244 && vr1type == VR_RANGE)
5246 else if (*vr0type == VR_RANGE
5247 && vr1type == VR_ANTI_RANGE)
5249 *vr0type = vr1type;
5250 *vr0min = vr1min;
5251 *vr0max = vr1max;
5253 else if (*vr0type == VR_RANGE
5254 && vr1type == VR_RANGE)
5256 /* The result is the convex hull of both ranges. */
5257 if (operand_less_p (*vr0max, vr1min) == 1)
5259 /* If the result can be an anti-range, create one. */
5260 if (TREE_CODE (*vr0max) == INTEGER_CST
5261 && TREE_CODE (vr1min) == INTEGER_CST
5262 && vrp_val_is_min (*vr0min)
5263 && vrp_val_is_max (vr1max))
5265 tree min = int_const_binop (PLUS_EXPR,
5266 *vr0max,
5267 build_int_cst (TREE_TYPE (*vr0max), 1));
5268 tree max = int_const_binop (MINUS_EXPR,
5269 vr1min,
5270 build_int_cst (TREE_TYPE (vr1min), 1));
5271 if (!operand_less_p (max, min))
5273 *vr0type = VR_ANTI_RANGE;
5274 *vr0min = min;
5275 *vr0max = max;
5277 else
5278 *vr0max = vr1max;
5280 else
5281 *vr0max = vr1max;
5283 else
5285 /* If the result can be an anti-range, create one. */
5286 if (TREE_CODE (vr1max) == INTEGER_CST
5287 && TREE_CODE (*vr0min) == INTEGER_CST
5288 && vrp_val_is_min (vr1min)
5289 && vrp_val_is_max (*vr0max))
5291 tree min = int_const_binop (PLUS_EXPR,
5292 vr1max,
5293 build_int_cst (TREE_TYPE (vr1max), 1));
5294 tree max = int_const_binop (MINUS_EXPR,
5295 *vr0min,
5296 build_int_cst (TREE_TYPE (*vr0min), 1));
5297 if (!operand_less_p (max, min))
5299 *vr0type = VR_ANTI_RANGE;
5300 *vr0min = min;
5301 *vr0max = max;
5303 else
5304 *vr0min = vr1min;
5306 else
5307 *vr0min = vr1min;
5310 else
5311 gcc_unreachable ();
5313 else if ((maxeq || cmpmax == 1)
5314 && (mineq || cmpmin == -1))
5316 /* [ ( ) ] or [( ) ] or [ ( )] */
5317 if (*vr0type == VR_RANGE
5318 && vr1type == VR_RANGE)
5320 else if (*vr0type == VR_ANTI_RANGE
5321 && vr1type == VR_ANTI_RANGE)
5323 *vr0type = vr1type;
5324 *vr0min = vr1min;
5325 *vr0max = vr1max;
5327 else if (*vr0type == VR_ANTI_RANGE
5328 && vr1type == VR_RANGE)
5330 /* Arbitrarily choose the right or left gap. */
5331 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5332 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5333 build_int_cst (TREE_TYPE (vr1min), 1));
5334 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5335 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5336 build_int_cst (TREE_TYPE (vr1max), 1));
5337 else
5338 goto give_up;
5340 else if (*vr0type == VR_RANGE
5341 && vr1type == VR_ANTI_RANGE)
5342 /* The result covers everything. */
5343 goto give_up;
5344 else
5345 gcc_unreachable ();
5347 else if ((maxeq || cmpmax == -1)
5348 && (mineq || cmpmin == 1))
5350 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5351 if (*vr0type == VR_RANGE
5352 && vr1type == VR_RANGE)
5354 *vr0type = vr1type;
5355 *vr0min = vr1min;
5356 *vr0max = vr1max;
5358 else if (*vr0type == VR_ANTI_RANGE
5359 && vr1type == VR_ANTI_RANGE)
5361 else if (*vr0type == VR_RANGE
5362 && vr1type == VR_ANTI_RANGE)
5364 *vr0type = VR_ANTI_RANGE;
5365 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5367 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5368 build_int_cst (TREE_TYPE (*vr0min), 1));
5369 *vr0min = vr1min;
5371 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5373 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5374 build_int_cst (TREE_TYPE (*vr0max), 1));
5375 *vr0max = vr1max;
5377 else
5378 goto give_up;
5380 else if (*vr0type == VR_ANTI_RANGE
5381 && vr1type == VR_RANGE)
5382 /* The result covers everything. */
5383 goto give_up;
5384 else
5385 gcc_unreachable ();
5387 else if (cmpmin == -1
5388 && cmpmax == -1
5389 && (operand_less_p (vr1min, *vr0max) == 1
5390 || operand_equal_p (vr1min, *vr0max, 0)))
5392 /* [ ( ] ) or [ ]( ) */
5393 if (*vr0type == VR_RANGE
5394 && vr1type == VR_RANGE)
5395 *vr0max = vr1max;
5396 else if (*vr0type == VR_ANTI_RANGE
5397 && vr1type == VR_ANTI_RANGE)
5398 *vr0min = vr1min;
5399 else if (*vr0type == VR_ANTI_RANGE
5400 && vr1type == VR_RANGE)
5402 if (TREE_CODE (vr1min) == INTEGER_CST)
5403 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5404 build_int_cst (TREE_TYPE (vr1min), 1));
5405 else
5406 goto give_up;
5408 else if (*vr0type == VR_RANGE
5409 && vr1type == VR_ANTI_RANGE)
5411 if (TREE_CODE (*vr0max) == INTEGER_CST)
5413 *vr0type = vr1type;
5414 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5415 build_int_cst (TREE_TYPE (*vr0max), 1));
5416 *vr0max = vr1max;
5418 else
5419 goto give_up;
5421 else
5422 gcc_unreachable ();
5424 else if (cmpmin == 1
5425 && cmpmax == 1
5426 && (operand_less_p (*vr0min, vr1max) == 1
5427 || operand_equal_p (*vr0min, vr1max, 0)))
5429 /* ( [ ) ] or ( )[ ] */
5430 if (*vr0type == VR_RANGE
5431 && vr1type == VR_RANGE)
5432 *vr0min = vr1min;
5433 else if (*vr0type == VR_ANTI_RANGE
5434 && vr1type == VR_ANTI_RANGE)
5435 *vr0max = vr1max;
5436 else if (*vr0type == VR_ANTI_RANGE
5437 && vr1type == VR_RANGE)
5439 if (TREE_CODE (vr1max) == INTEGER_CST)
5440 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5441 build_int_cst (TREE_TYPE (vr1max), 1));
5442 else
5443 goto give_up;
5445 else if (*vr0type == VR_RANGE
5446 && vr1type == VR_ANTI_RANGE)
5448 if (TREE_CODE (*vr0min) == INTEGER_CST)
5450 *vr0type = vr1type;
5451 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5452 build_int_cst (TREE_TYPE (*vr0min), 1));
5453 *vr0min = vr1min;
5455 else
5456 goto give_up;
5458 else
5459 gcc_unreachable ();
5461 else
5462 goto give_up;
5464 return;
5466 give_up:
5467 *vr0type = VR_VARYING;
5468 *vr0min = NULL_TREE;
5469 *vr0max = NULL_TREE;
5472 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5473 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5474 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5475 possible such range. The resulting range is not canonicalized. */
5477 static void
5478 intersect_ranges (enum value_range_kind *vr0type,
5479 tree *vr0min, tree *vr0max,
5480 enum value_range_kind vr1type,
5481 tree vr1min, tree vr1max)
5483 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5484 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5486 /* [] is vr0, () is vr1 in the following classification comments. */
5487 if (mineq && maxeq)
5489 /* [( )] */
5490 if (*vr0type == vr1type)
5491 /* Nothing to do for equal ranges. */
5493 else if ((*vr0type == VR_RANGE
5494 && vr1type == VR_ANTI_RANGE)
5495 || (*vr0type == VR_ANTI_RANGE
5496 && vr1type == VR_RANGE))
5498 /* For anti-range with range intersection the result is empty. */
5499 *vr0type = VR_UNDEFINED;
5500 *vr0min = NULL_TREE;
5501 *vr0max = NULL_TREE;
5503 else
5504 gcc_unreachable ();
5506 else if (operand_less_p (*vr0max, vr1min) == 1
5507 || operand_less_p (vr1max, *vr0min) == 1)
5509 /* [ ] ( ) or ( ) [ ]
5510 If the ranges have an empty intersection, the result of the
5511 intersect operation is the range for intersecting an
5512 anti-range with a range or empty when intersecting two ranges. */
5513 if (*vr0type == VR_RANGE
5514 && vr1type == VR_ANTI_RANGE)
5516 else if (*vr0type == VR_ANTI_RANGE
5517 && vr1type == VR_RANGE)
5519 *vr0type = vr1type;
5520 *vr0min = vr1min;
5521 *vr0max = vr1max;
5523 else if (*vr0type == VR_RANGE
5524 && vr1type == VR_RANGE)
5526 *vr0type = VR_UNDEFINED;
5527 *vr0min = NULL_TREE;
5528 *vr0max = NULL_TREE;
5530 else if (*vr0type == VR_ANTI_RANGE
5531 && vr1type == VR_ANTI_RANGE)
5533 /* If the anti-ranges are adjacent to each other merge them. */
5534 if (TREE_CODE (*vr0max) == INTEGER_CST
5535 && TREE_CODE (vr1min) == INTEGER_CST
5536 && operand_less_p (*vr0max, vr1min) == 1
5537 && integer_onep (int_const_binop (MINUS_EXPR,
5538 vr1min, *vr0max)))
5539 *vr0max = vr1max;
5540 else if (TREE_CODE (vr1max) == INTEGER_CST
5541 && TREE_CODE (*vr0min) == INTEGER_CST
5542 && operand_less_p (vr1max, *vr0min) == 1
5543 && integer_onep (int_const_binop (MINUS_EXPR,
5544 *vr0min, vr1max)))
5545 *vr0min = vr1min;
5546 /* Else arbitrarily take VR0. */
5549 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5550 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5552 /* [ ( ) ] or [( ) ] or [ ( )] */
5553 if (*vr0type == VR_RANGE
5554 && vr1type == VR_RANGE)
5556 /* If both are ranges the result is the inner one. */
5557 *vr0type = vr1type;
5558 *vr0min = vr1min;
5559 *vr0max = vr1max;
5561 else if (*vr0type == VR_RANGE
5562 && vr1type == VR_ANTI_RANGE)
5564 /* Choose the right gap if the left one is empty. */
5565 if (mineq)
5567 if (TREE_CODE (vr1max) != INTEGER_CST)
5568 *vr0min = vr1max;
5569 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5570 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5571 *vr0min
5572 = int_const_binop (MINUS_EXPR, vr1max,
5573 build_int_cst (TREE_TYPE (vr1max), -1));
5574 else
5575 *vr0min
5576 = int_const_binop (PLUS_EXPR, vr1max,
5577 build_int_cst (TREE_TYPE (vr1max), 1));
5579 /* Choose the left gap if the right one is empty. */
5580 else if (maxeq)
5582 if (TREE_CODE (vr1min) != INTEGER_CST)
5583 *vr0max = vr1min;
5584 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5585 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5586 *vr0max
5587 = int_const_binop (PLUS_EXPR, vr1min,
5588 build_int_cst (TREE_TYPE (vr1min), -1));
5589 else
5590 *vr0max
5591 = int_const_binop (MINUS_EXPR, vr1min,
5592 build_int_cst (TREE_TYPE (vr1min), 1));
5594 /* Choose the anti-range if the range is effectively varying. */
5595 else if (vrp_val_is_min (*vr0min)
5596 && vrp_val_is_max (*vr0max))
5598 *vr0type = vr1type;
5599 *vr0min = vr1min;
5600 *vr0max = vr1max;
5602 /* Else choose the range. */
5604 else if (*vr0type == VR_ANTI_RANGE
5605 && vr1type == VR_ANTI_RANGE)
5606 /* If both are anti-ranges the result is the outer one. */
5608 else if (*vr0type == VR_ANTI_RANGE
5609 && vr1type == VR_RANGE)
5611 /* The intersection is empty. */
5612 *vr0type = VR_UNDEFINED;
5613 *vr0min = NULL_TREE;
5614 *vr0max = NULL_TREE;
5616 else
5617 gcc_unreachable ();
5619 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5620 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5622 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5623 if (*vr0type == VR_RANGE
5624 && vr1type == VR_RANGE)
5625 /* Choose the inner range. */
5627 else if (*vr0type == VR_ANTI_RANGE
5628 && vr1type == VR_RANGE)
5630 /* Choose the right gap if the left is empty. */
5631 if (mineq)
5633 *vr0type = VR_RANGE;
5634 if (TREE_CODE (*vr0max) != INTEGER_CST)
5635 *vr0min = *vr0max;
5636 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5637 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5638 *vr0min
5639 = int_const_binop (MINUS_EXPR, *vr0max,
5640 build_int_cst (TREE_TYPE (*vr0max), -1));
5641 else
5642 *vr0min
5643 = int_const_binop (PLUS_EXPR, *vr0max,
5644 build_int_cst (TREE_TYPE (*vr0max), 1));
5645 *vr0max = vr1max;
5647 /* Choose the left gap if the right is empty. */
5648 else if (maxeq)
5650 *vr0type = VR_RANGE;
5651 if (TREE_CODE (*vr0min) != INTEGER_CST)
5652 *vr0max = *vr0min;
5653 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5654 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5655 *vr0max
5656 = int_const_binop (PLUS_EXPR, *vr0min,
5657 build_int_cst (TREE_TYPE (*vr0min), -1));
5658 else
5659 *vr0max
5660 = int_const_binop (MINUS_EXPR, *vr0min,
5661 build_int_cst (TREE_TYPE (*vr0min), 1));
5662 *vr0min = vr1min;
5664 /* Choose the anti-range if the range is effectively varying. */
5665 else if (vrp_val_is_min (vr1min)
5666 && vrp_val_is_max (vr1max))
5668 /* Choose the anti-range if it is ~[0,0], that range is special
5669 enough to special case when vr1's range is relatively wide.
5670 At least for types bigger than int - this covers pointers
5671 and arguments to functions like ctz. */
5672 else if (*vr0min == *vr0max
5673 && integer_zerop (*vr0min)
5674 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5675 >= TYPE_PRECISION (integer_type_node))
5676 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5677 && TREE_CODE (vr1max) == INTEGER_CST
5678 && TREE_CODE (vr1min) == INTEGER_CST
5679 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5680 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5682 /* Else choose the range. */
5683 else
5685 *vr0type = vr1type;
5686 *vr0min = vr1min;
5687 *vr0max = vr1max;
5690 else if (*vr0type == VR_ANTI_RANGE
5691 && vr1type == VR_ANTI_RANGE)
5693 /* If both are anti-ranges the result is the outer one. */
5694 *vr0type = vr1type;
5695 *vr0min = vr1min;
5696 *vr0max = vr1max;
5698 else if (vr1type == VR_ANTI_RANGE
5699 && *vr0type == VR_RANGE)
5701 /* The intersection is empty. */
5702 *vr0type = VR_UNDEFINED;
5703 *vr0min = NULL_TREE;
5704 *vr0max = NULL_TREE;
5706 else
5707 gcc_unreachable ();
5709 else if ((operand_less_p (vr1min, *vr0max) == 1
5710 || operand_equal_p (vr1min, *vr0max, 0))
5711 && operand_less_p (*vr0min, vr1min) == 1)
5713 /* [ ( ] ) or [ ]( ) */
5714 if (*vr0type == VR_ANTI_RANGE
5715 && vr1type == VR_ANTI_RANGE)
5716 *vr0max = vr1max;
5717 else if (*vr0type == VR_RANGE
5718 && vr1type == VR_RANGE)
5719 *vr0min = vr1min;
5720 else if (*vr0type == VR_RANGE
5721 && vr1type == VR_ANTI_RANGE)
5723 if (TREE_CODE (vr1min) == INTEGER_CST)
5724 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5725 build_int_cst (TREE_TYPE (vr1min), 1));
5726 else
5727 *vr0max = vr1min;
5729 else if (*vr0type == VR_ANTI_RANGE
5730 && vr1type == VR_RANGE)
5732 *vr0type = VR_RANGE;
5733 if (TREE_CODE (*vr0max) == INTEGER_CST)
5734 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5735 build_int_cst (TREE_TYPE (*vr0max), 1));
5736 else
5737 *vr0min = *vr0max;
5738 *vr0max = vr1max;
5740 else
5741 gcc_unreachable ();
5743 else if ((operand_less_p (*vr0min, vr1max) == 1
5744 || operand_equal_p (*vr0min, vr1max, 0))
5745 && operand_less_p (vr1min, *vr0min) == 1)
5747 /* ( [ ) ] or ( )[ ] */
5748 if (*vr0type == VR_ANTI_RANGE
5749 && vr1type == VR_ANTI_RANGE)
5750 *vr0min = vr1min;
5751 else if (*vr0type == VR_RANGE
5752 && vr1type == VR_RANGE)
5753 *vr0max = vr1max;
5754 else if (*vr0type == VR_RANGE
5755 && vr1type == VR_ANTI_RANGE)
5757 if (TREE_CODE (vr1max) == INTEGER_CST)
5758 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5759 build_int_cst (TREE_TYPE (vr1max), 1));
5760 else
5761 *vr0min = vr1max;
5763 else if (*vr0type == VR_ANTI_RANGE
5764 && vr1type == VR_RANGE)
5766 *vr0type = VR_RANGE;
5767 if (TREE_CODE (*vr0min) == INTEGER_CST)
5768 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5769 build_int_cst (TREE_TYPE (*vr0min), 1));
5770 else
5771 *vr0max = *vr0min;
5772 *vr0min = vr1min;
5774 else
5775 gcc_unreachable ();
5778 /* If we know the intersection is empty, there's no need to
5779 conservatively add anything else to the set. */
5780 if (*vr0type == VR_UNDEFINED)
5781 return;
5783 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5784 result for the intersection. That's always a conservative
5785 correct estimate unless VR1 is a constant singleton range
5786 in which case we choose that. */
5787 if (vr1type == VR_RANGE
5788 && is_gimple_min_invariant (vr1min)
5789 && vrp_operand_equal_p (vr1min, vr1max))
5791 *vr0type = vr1type;
5792 *vr0min = vr1min;
5793 *vr0max = vr1max;
5798 /* Helper for the intersection operation for value ranges. Given two
5799 value ranges VR0 and VR1, return the intersection of the two
5800 ranges. This may not be the smallest possible such range. */
5802 value_range_base
5803 value_range_base::intersect_helper (const value_range_base *vr0,
5804 const value_range_base *vr1)
5806 /* If either range is VR_VARYING the other one wins. */
5807 if (vr1->varying_p ())
5808 return *vr0;
5809 if (vr0->varying_p ())
5810 return *vr1;
5812 /* When either range is VR_UNDEFINED the resulting range is
5813 VR_UNDEFINED, too. */
5814 if (vr0->undefined_p ())
5815 return *vr0;
5816 if (vr1->undefined_p ())
5817 return *vr1;
5819 value_range_kind vr0type = vr0->kind ();
5820 tree vr0min = vr0->min ();
5821 tree vr0max = vr0->max ();
5822 intersect_ranges (&vr0type, &vr0min, &vr0max,
5823 vr1->kind (), vr1->min (), vr1->max ());
5824 /* Make sure to canonicalize the result though as the inversion of a
5825 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
5826 fall back to vr0 when this turns things to varying. */
5827 value_range_base tem;
5828 if (vr0type == VR_UNDEFINED)
5829 tem.set_undefined ();
5830 else if (vr0type == VR_VARYING)
5831 tem.set_varying (vr0->type ());
5832 else
5833 tem.set (vr0type, vr0min, vr0max);
5834 /* If that failed, use the saved original VR0. */
5835 if (tem.varying_p ())
5836 return *vr0;
5838 return tem;
5841 void
5842 value_range_base::intersect (const value_range_base *other)
5844 if (dump_file && (dump_flags & TDF_DETAILS))
5846 fprintf (dump_file, "Intersecting\n ");
5847 dump_value_range (dump_file, this);
5848 fprintf (dump_file, "\nand\n ");
5849 dump_value_range (dump_file, other);
5850 fprintf (dump_file, "\n");
5853 *this = intersect_helper (this, other);
5855 if (dump_file && (dump_flags & TDF_DETAILS))
5857 fprintf (dump_file, "to\n ");
5858 dump_value_range (dump_file, this);
5859 fprintf (dump_file, "\n");
5863 void
5864 value_range::intersect (const value_range *other)
5866 if (dump_file && (dump_flags & TDF_DETAILS))
5868 fprintf (dump_file, "Intersecting\n ");
5869 dump_value_range (dump_file, this);
5870 fprintf (dump_file, "\nand\n ");
5871 dump_value_range (dump_file, other);
5872 fprintf (dump_file, "\n");
5875 /* If THIS is varying we want to pick up equivalences from OTHER.
5876 Just special-case this here rather than trying to fixup after the
5877 fact. */
5878 if (this->varying_p ())
5879 this->deep_copy (other);
5880 else
5882 value_range_base tem = intersect_helper (this, other);
5883 this->update (tem.kind (), tem.min (), tem.max ());
5885 /* If the result is VR_UNDEFINED there is no need to mess with
5886 equivalencies. */
5887 if (!undefined_p ())
5889 /* The resulting set of equivalences for range intersection
5890 is the union of the two sets. */
5891 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
5892 bitmap_ior_into (m_equiv, other->m_equiv);
5893 else if (other->m_equiv && !m_equiv)
5895 /* All equivalence bitmaps are allocated from the same
5896 obstack. So we can use the obstack associated with
5897 VR to allocate this->m_equiv. */
5898 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
5899 bitmap_copy (m_equiv, other->m_equiv);
5904 if (dump_file && (dump_flags & TDF_DETAILS))
5906 fprintf (dump_file, "to\n ");
5907 dump_value_range (dump_file, this);
5908 fprintf (dump_file, "\n");
5912 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
5913 VR1, return a range that contains both VR0 and VR1. This may not be the
5914 smallest possible such range. */
5916 value_range_base
5917 value_range_base::union_helper (const value_range_base *vr0,
5918 const value_range_base *vr1)
5920 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
5921 if (vr1->undefined_p ()
5922 || vr0->varying_p ())
5923 return *vr0;
5925 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
5926 if (vr0->undefined_p ()
5927 || vr1->varying_p ())
5928 return *vr1;
5930 value_range_kind vr0type = vr0->kind ();
5931 tree vr0min = vr0->min ();
5932 tree vr0max = vr0->max ();
5933 union_ranges (&vr0type, &vr0min, &vr0max,
5934 vr1->kind (), vr1->min (), vr1->max ());
5936 /* Work on a temporary so we can still use vr0 when union returns varying. */
5937 value_range_base tem;
5938 if (vr0type == VR_UNDEFINED)
5939 tem.set_undefined ();
5940 else if (vr0type == VR_VARYING)
5941 tem.set_varying (vr0->type ());
5942 else
5943 tem.set (vr0type, vr0min, vr0max);
5945 /* Failed to find an efficient meet. Before giving up and setting
5946 the result to VARYING, see if we can at least derive a useful
5947 anti-range. */
5948 if (tem.varying_p ()
5949 && range_includes_zero_p (vr0) == 0
5950 && range_includes_zero_p (vr1) == 0)
5952 tem.set_nonzero (vr0->type ());
5953 return tem;
5956 return tem;
5960 /* Meet operation for value ranges. Given two value ranges VR0 and
5961 VR1, store in VR0 a range that contains both VR0 and VR1. This
5962 may not be the smallest possible such range. */
5964 void
5965 value_range_base::union_ (const value_range_base *other)
5967 if (dump_file && (dump_flags & TDF_DETAILS))
5969 fprintf (dump_file, "Meeting\n ");
5970 dump_value_range (dump_file, this);
5971 fprintf (dump_file, "\nand\n ");
5972 dump_value_range (dump_file, other);
5973 fprintf (dump_file, "\n");
5976 *this = union_helper (this, other);
5978 if (dump_file && (dump_flags & TDF_DETAILS))
5980 fprintf (dump_file, "to\n ");
5981 dump_value_range (dump_file, this);
5982 fprintf (dump_file, "\n");
5986 void
5987 value_range::union_ (const value_range *other)
5989 if (dump_file && (dump_flags & TDF_DETAILS))
5991 fprintf (dump_file, "Meeting\n ");
5992 dump_value_range (dump_file, this);
5993 fprintf (dump_file, "\nand\n ");
5994 dump_value_range (dump_file, other);
5995 fprintf (dump_file, "\n");
5998 /* If THIS is undefined we want to pick up equivalences from OTHER.
5999 Just special-case this here rather than trying to fixup after the fact. */
6000 if (this->undefined_p ())
6001 this->deep_copy (other);
6002 else
6004 value_range_base tem = union_helper (this, other);
6005 this->update (tem.kind (), tem.min (), tem.max ());
6007 /* The resulting set of equivalences is always the intersection of
6008 the two sets. */
6009 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6010 bitmap_and_into (this->m_equiv, other->m_equiv);
6011 else if (this->m_equiv && !other->m_equiv)
6012 bitmap_clear (this->m_equiv);
6015 if (dump_file && (dump_flags & TDF_DETAILS))
6017 fprintf (dump_file, "to\n ");
6018 dump_value_range (dump_file, this);
6019 fprintf (dump_file, "\n");
6023 /* Normalize addresses into constants. */
6025 value_range_base
6026 value_range_base::normalize_addresses () const
6028 if (!POINTER_TYPE_P (type ()) || range_has_numeric_bounds_p (this))
6029 return *this;
6031 if (!range_includes_zero_p (this))
6033 gcc_checking_assert (TREE_CODE (m_min) == ADDR_EXPR
6034 || TREE_CODE (m_max) == ADDR_EXPR);
6035 return range_nonzero (type ());
6037 return value_range_base (type ());
6040 /* Normalize symbolics and addresses into constants. */
6042 value_range_base
6043 value_range_base::normalize_symbolics () const
6045 if (varying_p () || undefined_p ())
6046 return *this;
6047 tree ttype = type ();
6048 bool min_symbolic = !is_gimple_min_invariant (min ());
6049 bool max_symbolic = !is_gimple_min_invariant (max ());
6050 if (!min_symbolic && !max_symbolic)
6051 return normalize_addresses ();
6053 // [SYM, SYM] -> VARYING
6054 if (min_symbolic && max_symbolic)
6056 value_range_base var;
6057 var.set_varying (ttype);
6058 return var;
6060 if (kind () == VR_RANGE)
6062 // [SYM, NUM] -> [-MIN, NUM]
6063 if (min_symbolic)
6064 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), max ());
6065 // [NUM, SYM] -> [NUM, +MAX]
6066 return value_range_base (VR_RANGE, min (), vrp_val_max (ttype, true));
6068 gcc_checking_assert (kind () == VR_ANTI_RANGE);
6069 // ~[SYM, NUM] -> [NUM + 1, +MAX]
6070 if (min_symbolic)
6072 if (!vrp_val_is_max (max ()))
6074 tree n = wide_int_to_tree (ttype, wi::to_wide (max ()) + 1);
6075 return value_range_base (VR_RANGE, n, vrp_val_max (ttype, true));
6077 value_range_base var;
6078 var.set_varying (ttype);
6079 return var;
6081 // ~[NUM, SYM] -> [-MIN, NUM - 1]
6082 if (!vrp_val_is_min (min ()))
6084 tree n = wide_int_to_tree (ttype, wi::to_wide (min ()) - 1);
6085 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), n);
6087 value_range_base var;
6088 var.set_varying (ttype);
6089 return var;
6092 /* Return the number of sub-ranges in a range. */
6094 unsigned
6095 value_range_base::num_pairs () const
6097 if (undefined_p ())
6098 return 0;
6099 if (varying_p ())
6100 return 1;
6101 if (symbolic_p ())
6102 return normalize_symbolics ().num_pairs ();
6103 if (m_kind == VR_ANTI_RANGE)
6105 // ~[MIN, X] has one sub-range of [X+1, MAX], and
6106 // ~[X, MAX] has one sub-range of [MIN, X-1].
6107 if (vrp_val_is_min (m_min, true) || vrp_val_is_max (m_max, true))
6108 return 1;
6109 return 2;
6111 return 1;
6114 /* Return the lower bound for a sub-range. PAIR is the sub-range in
6115 question. */
6117 wide_int
6118 value_range_base::lower_bound (unsigned pair) const
6120 if (symbolic_p ())
6121 return normalize_symbolics ().lower_bound (pair);
6123 gcc_checking_assert (!undefined_p ());
6124 gcc_checking_assert (pair + 1 <= num_pairs ());
6125 tree t = NULL;
6126 if (m_kind == VR_ANTI_RANGE)
6128 tree typ = type ();
6129 if (pair == 1 || vrp_val_is_min (m_min, true))
6130 t = wide_int_to_tree (typ, wi::to_wide (m_max) + 1);
6131 else
6132 t = vrp_val_min (typ, true);
6134 else
6135 t = m_min;
6136 return wi::to_wide (t);
6139 /* Return the upper bound for a sub-range. PAIR is the sub-range in
6140 question. */
6142 wide_int
6143 value_range_base::upper_bound (unsigned pair) const
6145 if (symbolic_p ())
6146 return normalize_symbolics ().upper_bound (pair);
6148 gcc_checking_assert (!undefined_p ());
6149 gcc_checking_assert (pair + 1 <= num_pairs ());
6150 tree t = NULL;
6151 if (m_kind == VR_ANTI_RANGE)
6153 tree typ = type ();
6154 if (pair == 1 || vrp_val_is_min (m_min, true))
6155 t = vrp_val_max (typ, true);
6156 else
6157 t = wide_int_to_tree (typ, wi::to_wide (m_min) - 1);
6159 else
6160 t = m_max;
6161 return wi::to_wide (t);
6164 /* Return the highest bound in a range. */
6166 wide_int
6167 value_range_base::upper_bound () const
6169 unsigned pairs = num_pairs ();
6170 gcc_checking_assert (pairs > 0);
6171 return upper_bound (pairs - 1);
6174 /* Return TRUE if range contains INTEGER_CST. */
6176 bool
6177 value_range_base::contains_p (tree cst) const
6179 gcc_checking_assert (TREE_CODE (cst) == INTEGER_CST);
6180 if (symbolic_p ())
6181 return normalize_symbolics ().contains_p (cst);
6182 return value_inside_range (cst) == 1;
6185 /* Return the inverse of a range. */
6187 void
6188 value_range_base::invert ()
6190 if (m_kind == VR_RANGE)
6191 m_kind = VR_ANTI_RANGE;
6192 else if (m_kind == VR_ANTI_RANGE)
6193 m_kind = VR_RANGE;
6194 else
6195 gcc_unreachable ();
6198 /* Range union, but for references. */
6200 void
6201 value_range_base::union_ (const value_range_base &r)
6203 /* Disable details for now, because it makes the ranger dump
6204 unnecessarily verbose. */
6205 bool details = dump_flags & TDF_DETAILS;
6206 if (details)
6207 dump_flags &= ~TDF_DETAILS;
6208 union_ (&r);
6209 if (details)
6210 dump_flags |= TDF_DETAILS;
6213 /* Range intersect, but for references. */
6215 void
6216 value_range_base::intersect (const value_range_base &r)
6218 /* Disable details for now, because it makes the ranger dump
6219 unnecessarily verbose. */
6220 bool details = dump_flags & TDF_DETAILS;
6221 if (details)
6222 dump_flags &= ~TDF_DETAILS;
6223 intersect (&r);
6224 if (details)
6225 dump_flags |= TDF_DETAILS;
6228 /* Return TRUE if two types are compatible for range operations. */
6230 static bool
6231 range_compatible_p (tree t1, tree t2)
6233 if (POINTER_TYPE_P (t1) && POINTER_TYPE_P (t2))
6234 return true;
6236 return types_compatible_p (t1, t2);
6239 bool
6240 value_range_base::operator== (const value_range_base &r) const
6242 if (undefined_p ())
6243 return r.undefined_p ();
6245 if (num_pairs () != r.num_pairs ()
6246 || !range_compatible_p (type (), r.type ()))
6247 return false;
6249 for (unsigned p = 0; p < num_pairs (); p++)
6250 if (wi::ne_p (lower_bound (p), r.lower_bound (p))
6251 || wi::ne_p (upper_bound (p), r.upper_bound (p)))
6252 return false;
6254 return true;
6257 /* Visit all arguments for PHI node PHI that flow through executable
6258 edges. If a valid value range can be derived from all the incoming
6259 value ranges, set a new range for the LHS of PHI. */
6261 enum ssa_prop_result
6262 vrp_prop::visit_phi (gphi *phi)
6264 tree lhs = PHI_RESULT (phi);
6265 value_range vr_result;
6266 extract_range_from_phi_node (phi, &vr_result);
6267 if (update_value_range (lhs, &vr_result))
6269 if (dump_file && (dump_flags & TDF_DETAILS))
6271 fprintf (dump_file, "Found new range for ");
6272 print_generic_expr (dump_file, lhs);
6273 fprintf (dump_file, ": ");
6274 dump_value_range (dump_file, &vr_result);
6275 fprintf (dump_file, "\n");
6278 if (vr_result.varying_p ())
6279 return SSA_PROP_VARYING;
6281 return SSA_PROP_INTERESTING;
6284 /* Nothing changed, don't add outgoing edges. */
6285 return SSA_PROP_NOT_INTERESTING;
6288 class vrp_folder : public substitute_and_fold_engine
6290 public:
6291 vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
6292 tree get_value (tree) FINAL OVERRIDE;
6293 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6294 bool fold_predicate_in (gimple_stmt_iterator *);
6296 class vr_values *vr_values;
6298 /* Delegators. */
6299 tree vrp_evaluate_conditional (tree_code code, tree op0,
6300 tree op1, gimple *stmt)
6301 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6302 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6303 { return vr_values->simplify_stmt_using_ranges (gsi); }
6304 tree op_with_constant_singleton_value_range (tree op)
6305 { return vr_values->op_with_constant_singleton_value_range (op); }
6308 /* If the statement pointed by SI has a predicate whose value can be
6309 computed using the value range information computed by VRP, compute
6310 its value and return true. Otherwise, return false. */
6312 bool
6313 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6315 bool assignment_p = false;
6316 tree val;
6317 gimple *stmt = gsi_stmt (*si);
6319 if (is_gimple_assign (stmt)
6320 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6322 assignment_p = true;
6323 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6324 gimple_assign_rhs1 (stmt),
6325 gimple_assign_rhs2 (stmt),
6326 stmt);
6328 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6329 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6330 gimple_cond_lhs (cond_stmt),
6331 gimple_cond_rhs (cond_stmt),
6332 stmt);
6333 else
6334 return false;
6336 if (val)
6338 if (assignment_p)
6339 val = fold_convert (gimple_expr_type (stmt), val);
6341 if (dump_file)
6343 fprintf (dump_file, "Folding predicate ");
6344 print_gimple_expr (dump_file, stmt, 0);
6345 fprintf (dump_file, " to ");
6346 print_generic_expr (dump_file, val);
6347 fprintf (dump_file, "\n");
6350 if (is_gimple_assign (stmt))
6351 gimple_assign_set_rhs_from_tree (si, val);
6352 else
6354 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6355 gcond *cond_stmt = as_a <gcond *> (stmt);
6356 if (integer_zerop (val))
6357 gimple_cond_make_false (cond_stmt);
6358 else if (integer_onep (val))
6359 gimple_cond_make_true (cond_stmt);
6360 else
6361 gcc_unreachable ();
6364 return true;
6367 return false;
6370 /* Callback for substitute_and_fold folding the stmt at *SI. */
6372 bool
6373 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6375 if (fold_predicate_in (si))
6376 return true;
6378 return simplify_stmt_using_ranges (si);
6381 /* If OP has a value range with a single constant value return that,
6382 otherwise return NULL_TREE. This returns OP itself if OP is a
6383 constant.
6385 Implemented as a pure wrapper right now, but this will change. */
6387 tree
6388 vrp_folder::get_value (tree op)
6390 return op_with_constant_singleton_value_range (op);
6393 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6394 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6395 BB. If no such ASSERT_EXPR is found, return OP. */
6397 static tree
6398 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6400 imm_use_iterator imm_iter;
6401 gimple *use_stmt;
6402 use_operand_p use_p;
6404 if (TREE_CODE (op) == SSA_NAME)
6406 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6408 use_stmt = USE_STMT (use_p);
6409 if (use_stmt != stmt
6410 && gimple_assign_single_p (use_stmt)
6411 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6412 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6413 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6414 return gimple_assign_lhs (use_stmt);
6417 return op;
6420 /* A hack. */
6421 static class vr_values *x_vr_values;
6423 /* A trivial wrapper so that we can present the generic jump threading
6424 code with a simple API for simplifying statements. STMT is the
6425 statement we want to simplify, WITHIN_STMT provides the location
6426 for any overflow warnings. */
6428 static tree
6429 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6430 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6431 basic_block bb)
6433 /* First see if the conditional is in the hash table. */
6434 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6435 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6436 return cached_lhs;
6438 vr_values *vr_values = x_vr_values;
6439 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6441 tree op0 = gimple_cond_lhs (cond_stmt);
6442 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6444 tree op1 = gimple_cond_rhs (cond_stmt);
6445 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6447 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6448 op0, op1, within_stmt);
6451 /* We simplify a switch statement by trying to determine which case label
6452 will be taken. If we are successful then we return the corresponding
6453 CASE_LABEL_EXPR. */
6454 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6456 tree op = gimple_switch_index (switch_stmt);
6457 if (TREE_CODE (op) != SSA_NAME)
6458 return NULL_TREE;
6460 op = lhs_of_dominating_assert (op, bb, stmt);
6462 const value_range *vr = vr_values->get_value_range (op);
6463 if (vr->undefined_p ()
6464 || vr->varying_p ()
6465 || vr->symbolic_p ())
6466 return NULL_TREE;
6468 if (vr->kind () == VR_RANGE)
6470 size_t i, j;
6471 /* Get the range of labels that contain a part of the operand's
6472 value range. */
6473 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6475 /* Is there only one such label? */
6476 if (i == j)
6478 tree label = gimple_switch_label (switch_stmt, i);
6480 /* The i'th label will be taken only if the value range of the
6481 operand is entirely within the bounds of this label. */
6482 if (CASE_HIGH (label) != NULL_TREE
6483 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6484 && tree_int_cst_compare (CASE_HIGH (label),
6485 vr->max ()) >= 0)
6486 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6487 && tree_int_cst_equal (vr->min (), vr->max ())))
6488 return label;
6491 /* If there are no such labels then the default label will be
6492 taken. */
6493 if (i > j)
6494 return gimple_switch_label (switch_stmt, 0);
6497 if (vr->kind () == VR_ANTI_RANGE)
6499 unsigned n = gimple_switch_num_labels (switch_stmt);
6500 tree min_label = gimple_switch_label (switch_stmt, 1);
6501 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6503 /* The default label will be taken only if the anti-range of the
6504 operand is entirely outside the bounds of all the (non-default)
6505 case labels. */
6506 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6507 && (CASE_HIGH (max_label) != NULL_TREE
6508 ? tree_int_cst_compare (vr->max (),
6509 CASE_HIGH (max_label)) >= 0
6510 : tree_int_cst_compare (vr->max (),
6511 CASE_LOW (max_label)) >= 0))
6512 return gimple_switch_label (switch_stmt, 0);
6515 return NULL_TREE;
6518 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6520 tree lhs = gimple_assign_lhs (assign_stmt);
6521 if (TREE_CODE (lhs) == SSA_NAME
6522 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6523 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6524 && stmt_interesting_for_vrp (stmt))
6526 edge dummy_e;
6527 tree dummy_tree;
6528 value_range new_vr;
6529 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6530 &dummy_tree, &new_vr);
6531 tree singleton;
6532 if (new_vr.singleton_p (&singleton))
6533 return singleton;
6537 return NULL_TREE;
6540 class vrp_dom_walker : public dom_walker
6542 public:
6543 vrp_dom_walker (cdi_direction direction,
6544 class const_and_copies *const_and_copies,
6545 class avail_exprs_stack *avail_exprs_stack)
6546 : dom_walker (direction, REACHABLE_BLOCKS),
6547 m_const_and_copies (const_and_copies),
6548 m_avail_exprs_stack (avail_exprs_stack),
6549 m_dummy_cond (NULL) {}
6551 virtual edge before_dom_children (basic_block);
6552 virtual void after_dom_children (basic_block);
6554 class vr_values *vr_values;
6556 private:
6557 class const_and_copies *m_const_and_copies;
6558 class avail_exprs_stack *m_avail_exprs_stack;
6560 gcond *m_dummy_cond;
6564 /* Called before processing dominator children of BB. We want to look
6565 at ASSERT_EXPRs and record information from them in the appropriate
6566 tables.
6568 We could look at other statements here. It's not seen as likely
6569 to significantly increase the jump threads we discover. */
6571 edge
6572 vrp_dom_walker::before_dom_children (basic_block bb)
6574 gimple_stmt_iterator gsi;
6576 m_avail_exprs_stack->push_marker ();
6577 m_const_and_copies->push_marker ();
6578 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6580 gimple *stmt = gsi_stmt (gsi);
6581 if (gimple_assign_single_p (stmt)
6582 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6584 tree rhs1 = gimple_assign_rhs1 (stmt);
6585 tree cond = TREE_OPERAND (rhs1, 1);
6586 tree inverted = invert_truthvalue (cond);
6587 vec<cond_equivalence> p;
6588 p.create (3);
6589 record_conditions (&p, cond, inverted);
6590 for (unsigned int i = 0; i < p.length (); i++)
6591 m_avail_exprs_stack->record_cond (&p[i]);
6593 tree lhs = gimple_assign_lhs (stmt);
6594 m_const_and_copies->record_const_or_copy (lhs,
6595 TREE_OPERAND (rhs1, 0));
6596 p.release ();
6597 continue;
6599 break;
6601 return NULL;
6604 /* Called after processing dominator children of BB. This is where we
6605 actually call into the threader. */
6606 void
6607 vrp_dom_walker::after_dom_children (basic_block bb)
6609 if (!m_dummy_cond)
6610 m_dummy_cond = gimple_build_cond (NE_EXPR,
6611 integer_zero_node, integer_zero_node,
6612 NULL, NULL);
6614 x_vr_values = vr_values;
6615 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6616 m_avail_exprs_stack, NULL,
6617 simplify_stmt_for_jump_threading);
6618 x_vr_values = NULL;
6620 m_avail_exprs_stack->pop_to_marker ();
6621 m_const_and_copies->pop_to_marker ();
6624 /* Blocks which have more than one predecessor and more than
6625 one successor present jump threading opportunities, i.e.,
6626 when the block is reached from a specific predecessor, we
6627 may be able to determine which of the outgoing edges will
6628 be traversed. When this optimization applies, we are able
6629 to avoid conditionals at runtime and we may expose secondary
6630 optimization opportunities.
6632 This routine is effectively a driver for the generic jump
6633 threading code. It basically just presents the generic code
6634 with edges that may be suitable for jump threading.
6636 Unlike DOM, we do not iterate VRP if jump threading was successful.
6637 While iterating may expose new opportunities for VRP, it is expected
6638 those opportunities would be very limited and the compile time cost
6639 to expose those opportunities would be significant.
6641 As jump threading opportunities are discovered, they are registered
6642 for later realization. */
6644 static void
6645 identify_jump_threads (class vr_values *vr_values)
6647 /* Ugh. When substituting values earlier in this pass we can
6648 wipe the dominance information. So rebuild the dominator
6649 information as we need it within the jump threading code. */
6650 calculate_dominance_info (CDI_DOMINATORS);
6652 /* We do not allow VRP information to be used for jump threading
6653 across a back edge in the CFG. Otherwise it becomes too
6654 difficult to avoid eliminating loop exit tests. Of course
6655 EDGE_DFS_BACK is not accurate at this time so we have to
6656 recompute it. */
6657 mark_dfs_back_edges ();
6659 /* Allocate our unwinder stack to unwind any temporary equivalences
6660 that might be recorded. */
6661 const_and_copies *equiv_stack = new const_and_copies ();
6663 hash_table<expr_elt_hasher> *avail_exprs
6664 = new hash_table<expr_elt_hasher> (1024);
6665 avail_exprs_stack *avail_exprs_stack
6666 = new class avail_exprs_stack (avail_exprs);
6668 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6669 walker.vr_values = vr_values;
6670 walker.walk (cfun->cfg->x_entry_block_ptr);
6672 /* We do not actually update the CFG or SSA graphs at this point as
6673 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6674 handle ASSERT_EXPRs gracefully. */
6675 delete equiv_stack;
6676 delete avail_exprs;
6677 delete avail_exprs_stack;
6680 /* Traverse all the blocks folding conditionals with known ranges. */
6682 void
6683 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6685 size_t i;
6687 /* We have completed propagating through the lattice. */
6688 vr_values.set_lattice_propagation_complete ();
6690 if (dump_file)
6692 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6693 vr_values.dump_all_value_ranges (dump_file);
6694 fprintf (dump_file, "\n");
6697 /* Set value range to non pointer SSA_NAMEs. */
6698 for (i = 0; i < num_ssa_names; i++)
6700 tree name = ssa_name (i);
6701 if (!name)
6702 continue;
6704 const value_range *vr = get_value_range (name);
6705 if (!name || !vr->constant_p ())
6706 continue;
6708 if (POINTER_TYPE_P (TREE_TYPE (name))
6709 && range_includes_zero_p (vr) == 0)
6710 set_ptr_nonnull (name);
6711 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6712 set_range_info (name, *vr);
6715 /* If we're checking array refs, we want to merge information on
6716 the executability of each edge between vrp_folder and the
6717 check_array_bounds_dom_walker: each can clear the
6718 EDGE_EXECUTABLE flag on edges, in different ways.
6720 Hence, if we're going to call check_all_array_refs, set
6721 the flag on every edge now, rather than in
6722 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6723 it from some edges. */
6724 if (warn_array_bounds && warn_array_bounds_p)
6725 set_all_edges_as_executable (cfun);
6727 class vrp_folder vrp_folder;
6728 vrp_folder.vr_values = &vr_values;
6729 vrp_folder.substitute_and_fold ();
6731 if (warn_array_bounds && warn_array_bounds_p)
6732 check_all_array_refs ();
6735 /* Main entry point to VRP (Value Range Propagation). This pass is
6736 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6737 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6738 Programming Language Design and Implementation, pp. 67-78, 1995.
6739 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6741 This is essentially an SSA-CCP pass modified to deal with ranges
6742 instead of constants.
6744 While propagating ranges, we may find that two or more SSA name
6745 have equivalent, though distinct ranges. For instance,
6747 1 x_9 = p_3->a;
6748 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6749 3 if (p_4 == q_2)
6750 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6751 5 endif
6752 6 if (q_2)
6754 In the code above, pointer p_5 has range [q_2, q_2], but from the
6755 code we can also determine that p_5 cannot be NULL and, if q_2 had
6756 a non-varying range, p_5's range should also be compatible with it.
6758 These equivalences are created by two expressions: ASSERT_EXPR and
6759 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6760 result of another assertion, then we can use the fact that p_5 and
6761 p_4 are equivalent when evaluating p_5's range.
6763 Together with value ranges, we also propagate these equivalences
6764 between names so that we can take advantage of information from
6765 multiple ranges when doing final replacement. Note that this
6766 equivalency relation is transitive but not symmetric.
6768 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6769 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6770 in contexts where that assertion does not hold (e.g., in line 6).
6772 TODO, the main difference between this pass and Patterson's is that
6773 we do not propagate edge probabilities. We only compute whether
6774 edges can be taken or not. That is, instead of having a spectrum
6775 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6776 DON'T KNOW. In the future, it may be worthwhile to propagate
6777 probabilities to aid branch prediction. */
6779 static unsigned int
6780 execute_vrp (bool warn_array_bounds_p)
6783 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6784 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6785 scev_initialize ();
6787 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6788 Inserting assertions may split edges which will invalidate
6789 EDGE_DFS_BACK. */
6790 insert_range_assertions ();
6792 threadedge_initialize_values ();
6794 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6795 mark_dfs_back_edges ();
6797 class vrp_prop vrp_prop;
6798 vrp_prop.vrp_initialize ();
6799 vrp_prop.ssa_propagate ();
6800 vrp_prop.vrp_finalize (warn_array_bounds_p);
6802 /* We must identify jump threading opportunities before we release
6803 the datastructures built by VRP. */
6804 identify_jump_threads (&vrp_prop.vr_values);
6806 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6807 was set by a type conversion can often be rewritten to use the
6808 RHS of the type conversion.
6810 However, doing so inhibits jump threading through the comparison.
6811 So that transformation is not performed until after jump threading
6812 is complete. */
6813 basic_block bb;
6814 FOR_EACH_BB_FN (bb, cfun)
6816 gimple *last = last_stmt (bb);
6817 if (last && gimple_code (last) == GIMPLE_COND)
6818 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6821 free_numbers_of_iterations_estimates (cfun);
6823 /* ASSERT_EXPRs must be removed before finalizing jump threads
6824 as finalizing jump threads calls the CFG cleanup code which
6825 does not properly handle ASSERT_EXPRs. */
6826 remove_range_assertions ();
6828 /* If we exposed any new variables, go ahead and put them into
6829 SSA form now, before we handle jump threading. This simplifies
6830 interactions between rewriting of _DECL nodes into SSA form
6831 and rewriting SSA_NAME nodes into SSA form after block
6832 duplication and CFG manipulation. */
6833 update_ssa (TODO_update_ssa);
6835 /* We identified all the jump threading opportunities earlier, but could
6836 not transform the CFG at that time. This routine transforms the
6837 CFG and arranges for the dominator tree to be rebuilt if necessary.
6839 Note the SSA graph update will occur during the normal TODO
6840 processing by the pass manager. */
6841 thread_through_all_blocks (false);
6843 vrp_prop.vr_values.cleanup_edges_and_switches ();
6844 threadedge_finalize_values ();
6846 scev_finalize ();
6847 loop_optimizer_finalize ();
6848 return 0;
6851 namespace {
6853 const pass_data pass_data_vrp =
6855 GIMPLE_PASS, /* type */
6856 "vrp", /* name */
6857 OPTGROUP_NONE, /* optinfo_flags */
6858 TV_TREE_VRP, /* tv_id */
6859 PROP_ssa, /* properties_required */
6860 0, /* properties_provided */
6861 0, /* properties_destroyed */
6862 0, /* todo_flags_start */
6863 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6866 class pass_vrp : public gimple_opt_pass
6868 public:
6869 pass_vrp (gcc::context *ctxt)
6870 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6873 /* opt_pass methods: */
6874 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6875 void set_pass_param (unsigned int n, bool param)
6877 gcc_assert (n == 0);
6878 warn_array_bounds_p = param;
6880 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6881 virtual unsigned int execute (function *)
6882 { return execute_vrp (warn_array_bounds_p); }
6884 private:
6885 bool warn_array_bounds_p;
6886 }; // class pass_vrp
6888 } // anon namespace
6890 gimple_opt_pass *
6891 make_pass_vrp (gcc::context *ctxt)
6893 return new pass_vrp (ctxt);
6897 /* Worker for determine_value_range. */
6899 static void
6900 determine_value_range_1 (value_range_base *vr, tree expr)
6902 if (BINARY_CLASS_P (expr))
6904 value_range_base vr0, vr1;
6905 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6906 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6907 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6908 &vr0, &vr1);
6910 else if (UNARY_CLASS_P (expr))
6912 value_range_base vr0;
6913 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6914 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6915 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6917 else if (TREE_CODE (expr) == INTEGER_CST)
6918 vr->set (expr);
6919 else
6921 value_range_kind kind;
6922 wide_int min, max;
6923 /* For SSA names try to extract range info computed by VRP. Otherwise
6924 fall back to varying. */
6925 if (TREE_CODE (expr) == SSA_NAME
6926 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6927 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6928 vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
6929 wide_int_to_tree (TREE_TYPE (expr), max));
6930 else
6931 vr->set_varying (TREE_TYPE (expr));
6935 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6936 the determined range type. */
6938 value_range_kind
6939 determine_value_range (tree expr, wide_int *min, wide_int *max)
6941 value_range_base vr;
6942 determine_value_range_1 (&vr, expr);
6943 if (vr.constant_p ())
6945 *min = wi::to_wide (vr.min ());
6946 *max = wi::to_wide (vr.max ());
6947 return vr.kind ();
6950 return VR_VARYING;