match_asm_constraints: Use copy_rtx where needed (PR88001)
[official-gcc.git] / gcc / tree-vrp.c
blob864de4195da9e25fbd662b62826ed00a51bf3d83
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 void
77 value_range_base::set (enum value_range_kind kind, tree min, tree max)
79 m_kind = kind;
80 m_min = min;
81 m_max = max;
82 if (flag_checking)
83 check ();
86 void
87 value_range::set_equiv (bitmap equiv)
89 /* Since updating the equivalence set involves deep copying the
90 bitmaps, only do it if absolutely necessary.
92 All equivalence bitmaps are allocated from the same obstack. So
93 we can use the obstack associated with EQUIV to allocate vr->equiv. */
94 if (m_equiv == NULL
95 && equiv != NULL)
96 m_equiv = BITMAP_ALLOC (equiv->obstack);
98 if (equiv != m_equiv)
100 if (equiv && !bitmap_empty_p (equiv))
101 bitmap_copy (m_equiv, equiv);
102 else
103 bitmap_clear (m_equiv);
107 /* Initialize value_range. */
109 void
110 value_range::set (enum value_range_kind kind, tree min, tree max,
111 bitmap equiv)
113 value_range_base::set (kind, min, max);
114 set_equiv (equiv);
115 if (flag_checking)
116 check ();
119 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
121 set (kind, min, max);
124 value_range::value_range (value_range_kind kind, tree min, tree max,
125 bitmap equiv)
127 m_equiv = NULL;
128 set (kind, min, max, equiv);
131 value_range::value_range (const value_range_base &other)
133 m_equiv = NULL;
134 set (other.kind (), other.min(), other.max (), NULL);
137 /* Like set, but keep the equivalences in place. */
139 void
140 value_range::update (value_range_kind kind, tree min, tree max)
142 set (kind, min, max,
143 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
146 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
148 Note: The code that avoids the bitmap sharing looks at the existing
149 this->m_equiv, so this function cannot be used to initalize an
150 object. Use the constructors for initialization. */
152 void
153 value_range::deep_copy (const value_range *from)
155 set (from->m_kind, from->min (), from->max (), from->m_equiv);
158 void
159 value_range::move (value_range *from)
161 set (from->m_kind, from->min (), from->max ());
162 m_equiv = from->m_equiv;
163 from->m_equiv = NULL;
166 /* Check the validity of the range. */
168 void
169 value_range_base::check ()
171 switch (m_kind)
173 case VR_RANGE:
174 case VR_ANTI_RANGE:
176 int cmp;
178 gcc_assert (m_min && m_max);
180 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
182 /* Creating ~[-MIN, +MAX] is stupid because that would be
183 the empty set. */
184 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
185 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
187 cmp = compare_values (m_min, m_max);
188 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
189 break;
191 case VR_UNDEFINED:
192 case VR_VARYING:
193 gcc_assert (!min () && !max ());
194 break;
195 default:
196 gcc_unreachable ();
200 void
201 value_range::check ()
203 value_range_base::check ();
204 switch (m_kind)
206 case VR_UNDEFINED:
207 case VR_VARYING:
208 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
209 default:;
213 /* Equality operator. We purposely do not overload ==, to avoid
214 confusion with the equality bitmap in the derived value_range
215 class. */
217 bool
218 value_range_base::equal_p (const value_range_base &other) const
220 return (m_kind == other.m_kind
221 && vrp_operand_equal_p (m_min, other.m_min)
222 && vrp_operand_equal_p (m_max, other.m_max));
225 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
226 IGNORE_EQUIVS is TRUE. */
228 bool
229 value_range::equal_p (const value_range &other, bool ignore_equivs) const
231 return (value_range_base::equal_p (other)
232 && (ignore_equivs
233 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
236 /* Return TRUE if this is a symbolic range. */
238 bool
239 value_range_base::symbolic_p () const
241 return (!varying_p ()
242 && !undefined_p ()
243 && (!is_gimple_min_invariant (m_min)
244 || !is_gimple_min_invariant (m_max)));
247 /* NOTE: This is not the inverse of symbolic_p because the range
248 could also be varying or undefined. Ideally they should be inverse
249 of each other, with varying only applying to symbolics. Varying of
250 constants would be represented as [-MIN, +MAX]. */
252 bool
253 value_range_base::constant_p () const
255 return (!varying_p ()
256 && !undefined_p ()
257 && TREE_CODE (m_min) == INTEGER_CST
258 && TREE_CODE (m_max) == INTEGER_CST);
261 void
262 value_range_base::set_undefined ()
264 set (VR_UNDEFINED, NULL, NULL);
267 void
268 value_range::set_undefined ()
270 set (VR_UNDEFINED, NULL, NULL, NULL);
273 void
274 value_range_base::set_varying ()
276 set (VR_VARYING, NULL, NULL);
279 void
280 value_range::set_varying ()
282 set (VR_VARYING, NULL, NULL, NULL);
285 /* Return TRUE if it is possible that range contains VAL. */
287 bool
288 value_range_base::may_contain_p (tree val) const
290 if (varying_p ())
291 return true;
293 if (undefined_p ())
294 return true;
296 if (m_kind == VR_ANTI_RANGE)
298 int res = value_inside_range (val, min (), max ());
299 return res == 0 || res == -2;
301 return value_inside_range (val, min (), max ()) != 0;
304 void
305 value_range::equiv_clear ()
307 if (m_equiv)
308 bitmap_clear (m_equiv);
311 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
312 bitmap. If no equivalence table has been created, OBSTACK is the
313 obstack to use (NULL for the default obstack).
315 This is the central point where equivalence processing can be
316 turned on/off. */
318 void
319 value_range::equiv_add (const_tree var,
320 const value_range *var_vr,
321 bitmap_obstack *obstack)
323 if (!m_equiv)
324 m_equiv = BITMAP_ALLOC (obstack);
325 unsigned ver = SSA_NAME_VERSION (var);
326 bitmap_set_bit (m_equiv, ver);
327 if (var_vr && var_vr->m_equiv)
328 bitmap_ior_into (m_equiv, var_vr->m_equiv);
331 /* If range is a singleton, place it in RESULT and return TRUE.
332 Note: A singleton can be any gimple invariant, not just constants.
333 So, [&x, &x] counts as a singleton. */
335 bool
336 value_range_base::singleton_p (tree *result) const
338 if (m_kind == VR_RANGE
339 && vrp_operand_equal_p (min (), max ())
340 && is_gimple_min_invariant (min ()))
342 if (result)
343 *result = min ();
344 return true;
346 return false;
349 tree
350 value_range_base::type () const
352 /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are
353 known to have non-zero min/max. */
354 gcc_assert (min ());
355 return TREE_TYPE (min ());
358 void
359 value_range_base::dump (FILE *file) const
361 if (undefined_p ())
362 fprintf (file, "UNDEFINED");
363 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
365 tree ttype = type ();
367 print_generic_expr (file, ttype);
368 fprintf (file, " ");
370 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
372 if (INTEGRAL_TYPE_P (ttype)
373 && !TYPE_UNSIGNED (ttype)
374 && vrp_val_is_min (min ())
375 && TYPE_PRECISION (ttype) != 1)
376 fprintf (file, "-INF");
377 else
378 print_generic_expr (file, min ());
380 fprintf (file, ", ");
382 if (INTEGRAL_TYPE_P (ttype)
383 && vrp_val_is_max (max ())
384 && TYPE_PRECISION (ttype) != 1)
385 fprintf (file, "+INF");
386 else
387 print_generic_expr (file, max ());
389 fprintf (file, "]");
391 else if (varying_p ())
392 fprintf (file, "VARYING");
393 else
394 gcc_unreachable ();
397 void
398 value_range::dump (FILE *file) const
400 value_range_base::dump (file);
401 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
402 && m_equiv)
404 bitmap_iterator bi;
405 unsigned i, c = 0;
407 fprintf (file, " EQUIVALENCES: { ");
409 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
411 print_generic_expr (file, ssa_name (i));
412 fprintf (file, " ");
413 c++;
416 fprintf (file, "} (%u elements)", c);
420 void
421 dump_value_range (FILE *file, const value_range *vr)
423 if (!vr)
424 fprintf (file, "[]");
425 else
426 vr->dump (file);
429 void
430 dump_value_range (FILE *file, const value_range_base *vr)
432 if (!vr)
433 fprintf (file, "[]");
434 else
435 vr->dump (file);
438 DEBUG_FUNCTION void
439 debug (const value_range_base *vr)
441 dump_value_range (stderr, vr);
444 DEBUG_FUNCTION void
445 debug (const value_range_base &vr)
447 dump_value_range (stderr, &vr);
450 DEBUG_FUNCTION void
451 debug (const value_range *vr)
453 dump_value_range (stderr, vr);
456 DEBUG_FUNCTION void
457 debug (const value_range &vr)
459 dump_value_range (stderr, &vr);
462 /* Return true if the SSA name NAME is live on the edge E. */
464 static bool
465 live_on_edge (edge e, tree name)
467 return (live[e->dest->index]
468 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
471 /* Location information for ASSERT_EXPRs. Each instance of this
472 structure describes an ASSERT_EXPR for an SSA name. Since a single
473 SSA name may have more than one assertion associated with it, these
474 locations are kept in a linked list attached to the corresponding
475 SSA name. */
476 struct assert_locus
478 /* Basic block where the assertion would be inserted. */
479 basic_block bb;
481 /* Some assertions need to be inserted on an edge (e.g., assertions
482 generated by COND_EXPRs). In those cases, BB will be NULL. */
483 edge e;
485 /* Pointer to the statement that generated this assertion. */
486 gimple_stmt_iterator si;
488 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
489 enum tree_code comp_code;
491 /* Value being compared against. */
492 tree val;
494 /* Expression to compare. */
495 tree expr;
497 /* Next node in the linked list. */
498 assert_locus *next;
501 /* If bit I is present, it means that SSA name N_i has a list of
502 assertions that should be inserted in the IL. */
503 static bitmap need_assert_for;
505 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
506 holds a list of ASSERT_LOCUS_T nodes that describe where
507 ASSERT_EXPRs for SSA name N_I should be inserted. */
508 static assert_locus **asserts_for;
510 /* Return the maximum value for TYPE. */
512 tree
513 vrp_val_max (const_tree type)
515 if (!INTEGRAL_TYPE_P (type))
516 return NULL_TREE;
518 return TYPE_MAX_VALUE (type);
521 /* Return the minimum value for TYPE. */
523 tree
524 vrp_val_min (const_tree type)
526 if (!INTEGRAL_TYPE_P (type))
527 return NULL_TREE;
529 return TYPE_MIN_VALUE (type);
532 /* Return whether VAL is equal to the maximum value of its type.
533 We can't do a simple equality comparison with TYPE_MAX_VALUE because
534 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
535 is not == to the integer constant with the same value in the type. */
537 bool
538 vrp_val_is_max (const_tree val)
540 tree type_max = vrp_val_max (TREE_TYPE (val));
541 return (val == type_max
542 || (type_max != NULL_TREE
543 && operand_equal_p (val, type_max, 0)));
546 /* Return whether VAL is equal to the minimum value of its type. */
548 bool
549 vrp_val_is_min (const_tree val)
551 tree type_min = vrp_val_min (TREE_TYPE (val));
552 return (val == type_min
553 || (type_min != NULL_TREE
554 && operand_equal_p (val, type_min, 0)));
557 /* VR_TYPE describes a range with mininum value *MIN and maximum
558 value *MAX. Restrict the range to the set of values that have
559 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
560 return the new range type.
562 SGN gives the sign of the values described by the range. */
564 enum value_range_kind
565 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
566 wide_int *min, wide_int *max,
567 const wide_int &nonzero_bits,
568 signop sgn)
570 if (vr_type == VR_ANTI_RANGE)
572 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
573 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
574 to create an inclusive upper bound for A and an inclusive lower
575 bound for B. */
576 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
577 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
579 /* If the calculation of A_MAX wrapped, A is effectively empty
580 and A_MAX is the highest value that satisfies NONZERO_BITS.
581 Likewise if the calculation of B_MIN wrapped, B is effectively
582 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
583 bool a_empty = wi::ge_p (a_max, *min, sgn);
584 bool b_empty = wi::le_p (b_min, *max, sgn);
586 /* If both A and B are empty, there are no valid values. */
587 if (a_empty && b_empty)
588 return VR_UNDEFINED;
590 /* If exactly one of A or B is empty, return a VR_RANGE for the
591 other one. */
592 if (a_empty || b_empty)
594 *min = b_min;
595 *max = a_max;
596 gcc_checking_assert (wi::le_p (*min, *max, sgn));
597 return VR_RANGE;
600 /* Update the VR_ANTI_RANGE bounds. */
601 *min = a_max + 1;
602 *max = b_min - 1;
603 gcc_checking_assert (wi::le_p (*min, *max, sgn));
605 /* Now check whether the excluded range includes any values that
606 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
607 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
609 unsigned int precision = min->get_precision ();
610 *min = wi::min_value (precision, sgn);
611 *max = wi::max_value (precision, sgn);
612 vr_type = VR_RANGE;
615 if (vr_type == VR_RANGE)
617 *max = wi::round_down_for_mask (*max, nonzero_bits);
619 /* Check that the range contains at least one valid value. */
620 if (wi::gt_p (*min, *max, sgn))
621 return VR_UNDEFINED;
623 *min = wi::round_up_for_mask (*min, nonzero_bits);
624 gcc_checking_assert (wi::le_p (*min, *max, sgn));
626 return vr_type;
630 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
631 This means adjusting VRTYPE, MIN and MAX representing the case of a
632 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
633 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
634 In corner cases where MAX+1 or MIN-1 wraps this will fall back
635 to varying.
636 This routine exists to ease canonicalization in the case where we
637 extract ranges from var + CST op limit. */
639 void
640 value_range_base::set_and_canonicalize (enum value_range_kind kind,
641 tree min, tree max)
643 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
644 if (kind == VR_UNDEFINED)
646 set_undefined ();
647 return;
649 else if (kind == VR_VARYING)
651 set_varying ();
652 return;
655 /* Nothing to canonicalize for symbolic ranges. */
656 if (TREE_CODE (min) != INTEGER_CST
657 || TREE_CODE (max) != INTEGER_CST)
659 set (kind, min, max);
660 return;
663 /* Wrong order for min and max, to swap them and the VR type we need
664 to adjust them. */
665 if (tree_int_cst_lt (max, min))
667 tree one, tmp;
669 /* For one bit precision if max < min, then the swapped
670 range covers all values, so for VR_RANGE it is varying and
671 for VR_ANTI_RANGE empty range, so drop to varying as well. */
672 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
674 set_varying ();
675 return;
678 one = build_int_cst (TREE_TYPE (min), 1);
679 tmp = int_const_binop (PLUS_EXPR, max, one);
680 max = int_const_binop (MINUS_EXPR, min, one);
681 min = tmp;
683 /* There's one corner case, if we had [C+1, C] before we now have
684 that again. But this represents an empty value range, so drop
685 to varying in this case. */
686 if (tree_int_cst_lt (max, min))
688 set_varying ();
689 return;
692 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
695 /* Anti-ranges that can be represented as ranges should be so. */
696 if (kind == VR_ANTI_RANGE)
698 /* For -fstrict-enums we may receive out-of-range ranges so consider
699 values < -INF and values > INF as -INF/INF as well. */
700 tree type = TREE_TYPE (min);
701 bool is_min = (INTEGRAL_TYPE_P (type)
702 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
703 bool is_max = (INTEGRAL_TYPE_P (type)
704 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
706 if (is_min && is_max)
708 /* We cannot deal with empty ranges, drop to varying.
709 ??? This could be VR_UNDEFINED instead. */
710 set_varying ();
711 return;
713 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
714 && (is_min || is_max))
716 /* Non-empty boolean ranges can always be represented
717 as a singleton range. */
718 if (is_min)
719 min = max = vrp_val_max (TREE_TYPE (min));
720 else
721 min = max = vrp_val_min (TREE_TYPE (min));
722 kind = VR_RANGE;
724 else if (is_min
725 /* As a special exception preserve non-null ranges. */
726 && !(TYPE_UNSIGNED (TREE_TYPE (min))
727 && integer_zerop (max)))
729 tree one = build_int_cst (TREE_TYPE (max), 1);
730 min = int_const_binop (PLUS_EXPR, max, one);
731 max = vrp_val_max (TREE_TYPE (max));
732 kind = VR_RANGE;
734 else if (is_max)
736 tree one = build_int_cst (TREE_TYPE (min), 1);
737 max = int_const_binop (MINUS_EXPR, min, one);
738 min = vrp_val_min (TREE_TYPE (min));
739 kind = VR_RANGE;
743 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
744 to make sure VRP iteration terminates, otherwise we can get into
745 oscillations. */
747 set (kind, min, max);
750 void
751 value_range::set_and_canonicalize (enum value_range_kind kind,
752 tree min, tree max, bitmap equiv)
754 value_range_base::set_and_canonicalize (kind, min, max);
755 if (this->kind () == VR_RANGE || this->kind () == VR_ANTI_RANGE)
756 set_equiv (equiv);
757 else
758 equiv_clear ();
761 void
762 value_range_base::set (tree val)
764 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
765 if (TREE_OVERFLOW_P (val))
766 val = drop_tree_overflow (val);
767 set (VR_RANGE, val, val);
770 void
771 value_range::set (tree val)
773 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
774 if (TREE_OVERFLOW_P (val))
775 val = drop_tree_overflow (val);
776 set (VR_RANGE, val, val, NULL);
779 /* Set value range VR to a non-NULL range of type TYPE. */
781 void
782 value_range_base::set_nonnull (tree type)
784 tree zero = build_int_cst (type, 0);
785 set (VR_ANTI_RANGE, zero, zero);
788 void
789 value_range::set_nonnull (tree type)
791 tree zero = build_int_cst (type, 0);
792 set (VR_ANTI_RANGE, zero, zero, NULL);
795 /* Set value range VR to a NULL range of type TYPE. */
797 void
798 value_range_base::set_null (tree type)
800 set (build_int_cst (type, 0));
803 void
804 value_range::set_null (tree type)
806 set (build_int_cst (type, 0));
809 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
811 bool
812 vrp_operand_equal_p (const_tree val1, const_tree val2)
814 if (val1 == val2)
815 return true;
816 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
817 return false;
818 return true;
821 /* Return true, if the bitmaps B1 and B2 are equal. */
823 bool
824 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
826 return (b1 == b2
827 || ((!b1 || bitmap_empty_p (b1))
828 && (!b2 || bitmap_empty_p (b2)))
829 || (b1 && b2
830 && bitmap_equal_p (b1, b2)));
833 /* Return true if VR is [0, 0]. */
835 static inline bool
836 range_is_null (const value_range_base *vr)
838 return vr->zero_p ();
841 static inline bool
842 range_is_nonnull (const value_range_base *vr)
844 return (vr->kind () == VR_ANTI_RANGE
845 && vr->min () == vr->max ()
846 && integer_zerop (vr->min ()));
849 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
850 a singleton. */
852 bool
853 range_int_cst_p (const value_range_base *vr)
855 return (vr->kind () == VR_RANGE
856 && TREE_CODE (vr->min ()) == INTEGER_CST
857 && TREE_CODE (vr->max ()) == INTEGER_CST);
860 /* Return true if VR is a INTEGER_CST singleton. */
862 bool
863 range_int_cst_singleton_p (const value_range_base *vr)
865 return (range_int_cst_p (vr)
866 && tree_int_cst_equal (vr->min (), vr->max ()));
869 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
870 otherwise. We only handle additive operations and set NEG to true if the
871 symbol is negated and INV to the invariant part, if any. */
873 tree
874 get_single_symbol (tree t, bool *neg, tree *inv)
876 bool neg_;
877 tree inv_;
879 *inv = NULL_TREE;
880 *neg = false;
882 if (TREE_CODE (t) == PLUS_EXPR
883 || TREE_CODE (t) == POINTER_PLUS_EXPR
884 || TREE_CODE (t) == MINUS_EXPR)
886 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
888 neg_ = (TREE_CODE (t) == MINUS_EXPR);
889 inv_ = TREE_OPERAND (t, 0);
890 t = TREE_OPERAND (t, 1);
892 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
894 neg_ = false;
895 inv_ = TREE_OPERAND (t, 1);
896 t = TREE_OPERAND (t, 0);
898 else
899 return NULL_TREE;
901 else
903 neg_ = false;
904 inv_ = NULL_TREE;
907 if (TREE_CODE (t) == NEGATE_EXPR)
909 t = TREE_OPERAND (t, 0);
910 neg_ = !neg_;
913 if (TREE_CODE (t) != SSA_NAME)
914 return NULL_TREE;
916 if (inv_ && TREE_OVERFLOW_P (inv_))
917 inv_ = drop_tree_overflow (inv_);
919 *neg = neg_;
920 *inv = inv_;
921 return t;
924 /* The reverse operation: build a symbolic expression with TYPE
925 from symbol SYM, negated according to NEG, and invariant INV. */
927 static tree
928 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
930 const bool pointer_p = POINTER_TYPE_P (type);
931 tree t = sym;
933 if (neg)
934 t = build1 (NEGATE_EXPR, type, t);
936 if (integer_zerop (inv))
937 return t;
939 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
942 /* Return
943 1 if VAL < VAL2
944 0 if !(VAL < VAL2)
945 -2 if those are incomparable. */
947 operand_less_p (tree val, tree val2)
949 /* LT is folded faster than GE and others. Inline the common case. */
950 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
951 return tree_int_cst_lt (val, val2);
952 else
954 tree tcmp;
956 fold_defer_overflow_warnings ();
958 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
960 fold_undefer_and_ignore_overflow_warnings ();
962 if (!tcmp
963 || TREE_CODE (tcmp) != INTEGER_CST)
964 return -2;
966 if (!integer_zerop (tcmp))
967 return 1;
970 return 0;
973 /* Compare two values VAL1 and VAL2. Return
975 -2 if VAL1 and VAL2 cannot be compared at compile-time,
976 -1 if VAL1 < VAL2,
977 0 if VAL1 == VAL2,
978 +1 if VAL1 > VAL2, and
979 +2 if VAL1 != VAL2
981 This is similar to tree_int_cst_compare but supports pointer values
982 and values that cannot be compared at compile time.
984 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
985 true if the return value is only valid if we assume that signed
986 overflow is undefined. */
989 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
991 if (val1 == val2)
992 return 0;
994 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
995 both integers. */
996 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
997 == POINTER_TYPE_P (TREE_TYPE (val2)));
999 /* Convert the two values into the same type. This is needed because
1000 sizetype causes sign extension even for unsigned types. */
1001 val2 = fold_convert (TREE_TYPE (val1), val2);
1002 STRIP_USELESS_TYPE_CONVERSION (val2);
1004 const bool overflow_undefined
1005 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1006 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1007 tree inv1, inv2;
1008 bool neg1, neg2;
1009 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1010 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1012 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1013 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1014 if (sym1 && sym2)
1016 /* Both values must use the same name with the same sign. */
1017 if (sym1 != sym2 || neg1 != neg2)
1018 return -2;
1020 /* [-]NAME + CST == [-]NAME + CST. */
1021 if (inv1 == inv2)
1022 return 0;
1024 /* If overflow is defined we cannot simplify more. */
1025 if (!overflow_undefined)
1026 return -2;
1028 if (strict_overflow_p != NULL
1029 /* Symbolic range building sets TREE_NO_WARNING to declare
1030 that overflow doesn't happen. */
1031 && (!inv1 || !TREE_NO_WARNING (val1))
1032 && (!inv2 || !TREE_NO_WARNING (val2)))
1033 *strict_overflow_p = true;
1035 if (!inv1)
1036 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1037 if (!inv2)
1038 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1040 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1041 TYPE_SIGN (TREE_TYPE (val1)));
1044 const bool cst1 = is_gimple_min_invariant (val1);
1045 const bool cst2 = is_gimple_min_invariant (val2);
1047 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1048 it might be possible to say something depending on the constants. */
1049 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1051 if (!overflow_undefined)
1052 return -2;
1054 if (strict_overflow_p != NULL
1055 /* Symbolic range building sets TREE_NO_WARNING to declare
1056 that overflow doesn't happen. */
1057 && (!sym1 || !TREE_NO_WARNING (val1))
1058 && (!sym2 || !TREE_NO_WARNING (val2)))
1059 *strict_overflow_p = true;
1061 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1062 tree cst = cst1 ? val1 : val2;
1063 tree inv = cst1 ? inv2 : inv1;
1065 /* Compute the difference between the constants. If it overflows or
1066 underflows, this means that we can trivially compare the NAME with
1067 it and, consequently, the two values with each other. */
1068 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1069 if (wi::cmp (0, wi::to_wide (inv), sgn)
1070 != wi::cmp (diff, wi::to_wide (cst), sgn))
1072 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1073 return cst1 ? res : -res;
1076 return -2;
1079 /* We cannot say anything more for non-constants. */
1080 if (!cst1 || !cst2)
1081 return -2;
1083 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1085 /* We cannot compare overflowed values. */
1086 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1087 return -2;
1089 if (TREE_CODE (val1) == INTEGER_CST
1090 && TREE_CODE (val2) == INTEGER_CST)
1091 return tree_int_cst_compare (val1, val2);
1093 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1095 if (known_eq (wi::to_poly_widest (val1),
1096 wi::to_poly_widest (val2)))
1097 return 0;
1098 if (known_lt (wi::to_poly_widest (val1),
1099 wi::to_poly_widest (val2)))
1100 return -1;
1101 if (known_gt (wi::to_poly_widest (val1),
1102 wi::to_poly_widest (val2)))
1103 return 1;
1106 return -2;
1108 else
1110 tree t;
1112 /* First see if VAL1 and VAL2 are not the same. */
1113 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1114 return 0;
1116 /* If VAL1 is a lower address than VAL2, return -1. */
1117 if (operand_less_p (val1, val2) == 1)
1118 return -1;
1120 /* If VAL1 is a higher address than VAL2, return +1. */
1121 if (operand_less_p (val2, val1) == 1)
1122 return 1;
1124 /* If VAL1 is different than VAL2, return +2.
1125 For integer constants we either have already returned -1 or 1
1126 or they are equivalent. We still might succeed in proving
1127 something about non-trivial operands. */
1128 if (TREE_CODE (val1) != INTEGER_CST
1129 || TREE_CODE (val2) != INTEGER_CST)
1131 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1132 if (t && integer_onep (t))
1133 return 2;
1136 return -2;
1140 /* Compare values like compare_values_warnv. */
1143 compare_values (tree val1, tree val2)
1145 bool sop;
1146 return compare_values_warnv (val1, val2, &sop);
1150 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1151 0 if VAL is not inside [MIN, MAX],
1152 -2 if we cannot tell either way.
1154 Benchmark compile/20001226-1.c compilation time after changing this
1155 function. */
1158 value_inside_range (tree val, tree min, tree max)
1160 int cmp1, cmp2;
1162 cmp1 = operand_less_p (val, min);
1163 if (cmp1 == -2)
1164 return -2;
1165 if (cmp1 == 1)
1166 return 0;
1168 cmp2 = operand_less_p (max, val);
1169 if (cmp2 == -2)
1170 return -2;
1172 return !cmp2;
1176 /* Return TRUE if *VR includes the value zero. */
1178 bool
1179 range_includes_zero_p (const value_range_base *vr)
1181 if (vr->varying_p () || vr->undefined_p ())
1182 return true;
1183 tree zero = build_int_cst (vr->type (), 0);
1184 return vr->may_contain_p (zero);
1187 /* If *VR has a value range that is a single constant value return that,
1188 otherwise return NULL_TREE.
1190 ?? This actually returns TRUE for [&x, &x], so perhaps "constant"
1191 is not the best name. */
1193 tree
1194 value_range_constant_singleton (const value_range_base *vr)
1196 tree result = NULL;
1197 if (vr->singleton_p (&result))
1198 return result;
1199 return NULL;
1202 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1204 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1206 Return TRUE if VR was a constant range and we were able to compute
1207 the bit masks. */
1209 bool
1210 vrp_set_zero_nonzero_bits (const tree expr_type,
1211 const value_range_base *vr,
1212 wide_int *may_be_nonzero,
1213 wide_int *must_be_nonzero)
1215 if (!range_int_cst_p (vr))
1217 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1218 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1219 return false;
1221 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1222 wi::to_wide (vr->min ()),
1223 wi::to_wide (vr->max ()),
1224 *may_be_nonzero, *must_be_nonzero);
1225 return true;
1228 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1229 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1230 false otherwise. If *AR can be represented with a single range
1231 *VR1 will be VR_UNDEFINED. */
1233 static bool
1234 ranges_from_anti_range (const value_range_base *ar,
1235 value_range_base *vr0, value_range_base *vr1)
1237 tree type = ar->type ();
1239 vr0->set_undefined ();
1240 vr1->set_undefined ();
1242 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1243 [A+1, +INF]. Not sure if this helps in practice, though. */
1245 if (ar->kind () != VR_ANTI_RANGE
1246 || TREE_CODE (ar->min ()) != INTEGER_CST
1247 || TREE_CODE (ar->max ()) != INTEGER_CST
1248 || !vrp_val_min (type)
1249 || !vrp_val_max (type))
1250 return false;
1252 if (tree_int_cst_lt (vrp_val_min (type), ar->min ()))
1253 vr0->set (VR_RANGE,
1254 vrp_val_min (type),
1255 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1256 if (tree_int_cst_lt (ar->max (), vrp_val_max (type)))
1257 vr1->set (VR_RANGE,
1258 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1259 vrp_val_max (type));
1260 if (vr0->undefined_p ())
1262 *vr0 = *vr1;
1263 vr1->set_undefined ();
1266 return !vr0->undefined_p ();
1269 /* Extract the components of a value range into a pair of wide ints in
1270 [WMIN, WMAX].
1272 If the value range is anything but a VR_*RANGE of constants, the
1273 resulting wide ints are set to [-MIN, +MAX] for the type. */
1275 static void inline
1276 extract_range_into_wide_ints (const value_range_base *vr,
1277 signop sign, unsigned prec,
1278 wide_int &wmin, wide_int &wmax)
1280 gcc_assert (vr->kind () != VR_ANTI_RANGE || vr->symbolic_p ());
1281 if (range_int_cst_p (vr))
1283 wmin = wi::to_wide (vr->min ());
1284 wmax = wi::to_wide (vr->max ());
1286 else
1288 wmin = wi::min_value (prec, sign);
1289 wmax = wi::max_value (prec, sign);
1293 /* Value range wrapper for wide_int_range_multiplicative_op:
1295 *VR = *VR0 .CODE. *VR1. */
1297 static void
1298 extract_range_from_multiplicative_op (value_range_base *vr,
1299 enum tree_code code,
1300 const value_range_base *vr0,
1301 const value_range_base *vr1)
1303 gcc_assert (code == MULT_EXPR
1304 || code == TRUNC_DIV_EXPR
1305 || code == FLOOR_DIV_EXPR
1306 || code == CEIL_DIV_EXPR
1307 || code == EXACT_DIV_EXPR
1308 || code == ROUND_DIV_EXPR
1309 || code == RSHIFT_EXPR
1310 || code == LSHIFT_EXPR);
1311 gcc_assert (vr0->kind () == VR_RANGE
1312 && vr0->kind () == vr1->kind ());
1314 tree type = vr0->type ();
1315 wide_int res_lb, res_ub;
1316 wide_int vr0_lb = wi::to_wide (vr0->min ());
1317 wide_int vr0_ub = wi::to_wide (vr0->max ());
1318 wide_int vr1_lb = wi::to_wide (vr1->min ());
1319 wide_int vr1_ub = wi::to_wide (vr1->max ());
1320 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1321 unsigned prec = TYPE_PRECISION (type);
1323 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1324 code, TYPE_SIGN (type), prec,
1325 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1326 overflow_undefined))
1327 vr->set_and_canonicalize (VR_RANGE,
1328 wide_int_to_tree (type, res_lb),
1329 wide_int_to_tree (type, res_ub));
1330 else
1331 vr->set_varying ();
1334 /* If BOUND will include a symbolic bound, adjust it accordingly,
1335 otherwise leave it as is.
1337 CODE is the original operation that combined the bounds (PLUS_EXPR
1338 or MINUS_EXPR).
1340 TYPE is the type of the original operation.
1342 SYM_OPn is the symbolic for OPn if it has a symbolic.
1344 NEG_OPn is TRUE if the OPn was negated. */
1346 static void
1347 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1348 tree sym_op0, tree sym_op1,
1349 bool neg_op0, bool neg_op1)
1351 bool minus_p = (code == MINUS_EXPR);
1352 /* If the result bound is constant, we're done; otherwise, build the
1353 symbolic lower bound. */
1354 if (sym_op0 == sym_op1)
1356 else if (sym_op0)
1357 bound = build_symbolic_expr (type, sym_op0,
1358 neg_op0, bound);
1359 else if (sym_op1)
1361 /* We may not negate if that might introduce
1362 undefined overflow. */
1363 if (!minus_p
1364 || neg_op1
1365 || TYPE_OVERFLOW_WRAPS (type))
1366 bound = build_symbolic_expr (type, sym_op1,
1367 neg_op1 ^ minus_p, bound);
1368 else
1369 bound = NULL_TREE;
1373 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1374 int bound according to CODE. CODE is the operation combining the
1375 bound (either a PLUS_EXPR or a MINUS_EXPR).
1377 TYPE is the type of the combine operation.
1379 WI is the wide int to store the result.
1381 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1382 if over/underflow occurred. */
1384 static void
1385 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1386 tree type, tree op0, tree op1)
1388 bool minus_p = (code == MINUS_EXPR);
1389 const signop sgn = TYPE_SIGN (type);
1390 const unsigned int prec = TYPE_PRECISION (type);
1392 /* Combine the bounds, if any. */
1393 if (op0 && op1)
1395 if (minus_p)
1396 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1397 else
1398 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1400 else if (op0)
1401 wi = wi::to_wide (op0);
1402 else if (op1)
1404 if (minus_p)
1405 wi = wi::neg (wi::to_wide (op1), &ovf);
1406 else
1407 wi = wi::to_wide (op1);
1409 else
1410 wi = wi::shwi (0, prec);
1413 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1414 put the result in VR.
1416 TYPE is the type of the range.
1418 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1419 occurred while originally calculating WMIN or WMAX. -1 indicates
1420 underflow. +1 indicates overflow. 0 indicates neither. */
1422 static void
1423 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1424 tree type,
1425 const wide_int &wmin, const wide_int &wmax,
1426 wi::overflow_type min_ovf,
1427 wi::overflow_type max_ovf)
1429 const signop sgn = TYPE_SIGN (type);
1430 const unsigned int prec = TYPE_PRECISION (type);
1432 /* For one bit precision if max < min, then the swapped
1433 range covers all values. */
1434 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1436 kind = VR_VARYING;
1437 return;
1440 if (TYPE_OVERFLOW_WRAPS (type))
1442 /* If overflow wraps, truncate the values and adjust the
1443 range kind and bounds appropriately. */
1444 wide_int tmin = wide_int::from (wmin, prec, sgn);
1445 wide_int tmax = wide_int::from (wmax, prec, sgn);
1446 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1448 /* If the limits are swapped, we wrapped around and cover
1449 the entire range. We have a similar check at the end of
1450 extract_range_from_binary_expr. */
1451 if (wi::gt_p (tmin, tmax, sgn))
1452 kind = VR_VARYING;
1453 else
1455 kind = VR_RANGE;
1456 /* No overflow or both overflow or underflow. The
1457 range kind stays VR_RANGE. */
1458 min = wide_int_to_tree (type, tmin);
1459 max = wide_int_to_tree (type, tmax);
1461 return;
1463 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1464 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1466 /* Min underflow or max overflow. The range kind
1467 changes to VR_ANTI_RANGE. */
1468 bool covers = false;
1469 wide_int tem = tmin;
1470 tmin = tmax + 1;
1471 if (wi::cmp (tmin, tmax, sgn) < 0)
1472 covers = true;
1473 tmax = tem - 1;
1474 if (wi::cmp (tmax, tem, sgn) > 0)
1475 covers = true;
1476 /* If the anti-range would cover nothing, drop to varying.
1477 Likewise if the anti-range bounds are outside of the
1478 types values. */
1479 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1481 kind = VR_VARYING;
1482 return;
1484 kind = VR_ANTI_RANGE;
1485 min = wide_int_to_tree (type, tmin);
1486 max = wide_int_to_tree (type, tmax);
1487 return;
1489 else
1491 /* Other underflow and/or overflow, drop to VR_VARYING. */
1492 kind = VR_VARYING;
1493 return;
1496 else
1498 /* If overflow does not wrap, saturate to the types min/max
1499 value. */
1500 wide_int type_min = wi::min_value (prec, sgn);
1501 wide_int type_max = wi::max_value (prec, sgn);
1502 kind = VR_RANGE;
1503 if (min_ovf == wi::OVF_UNDERFLOW)
1504 min = wide_int_to_tree (type, type_min);
1505 else if (min_ovf == wi::OVF_OVERFLOW)
1506 min = wide_int_to_tree (type, type_max);
1507 else
1508 min = wide_int_to_tree (type, wmin);
1510 if (max_ovf == wi::OVF_UNDERFLOW)
1511 max = wide_int_to_tree (type, type_min);
1512 else if (max_ovf == wi::OVF_OVERFLOW)
1513 max = wide_int_to_tree (type, type_max);
1514 else
1515 max = wide_int_to_tree (type, wmax);
1519 /* Extract range information from a binary operation CODE based on
1520 the ranges of each of its operands *VR0 and *VR1 with resulting
1521 type EXPR_TYPE. The resulting range is stored in *VR. */
1523 void
1524 extract_range_from_binary_expr (value_range_base *vr,
1525 enum tree_code code, tree expr_type,
1526 const value_range_base *vr0_,
1527 const value_range_base *vr1_)
1529 signop sign = TYPE_SIGN (expr_type);
1530 unsigned int prec = TYPE_PRECISION (expr_type);
1531 value_range_base vr0 = *vr0_, vr1 = *vr1_;
1532 value_range_base vrtem0, vrtem1;
1533 enum value_range_kind type;
1534 tree min = NULL_TREE, max = NULL_TREE;
1535 int cmp;
1537 if (!INTEGRAL_TYPE_P (expr_type)
1538 && !POINTER_TYPE_P (expr_type))
1540 vr->set_varying ();
1541 return;
1544 /* Not all binary expressions can be applied to ranges in a
1545 meaningful way. Handle only arithmetic operations. */
1546 if (code != PLUS_EXPR
1547 && code != MINUS_EXPR
1548 && code != POINTER_PLUS_EXPR
1549 && code != MULT_EXPR
1550 && code != TRUNC_DIV_EXPR
1551 && code != FLOOR_DIV_EXPR
1552 && code != CEIL_DIV_EXPR
1553 && code != EXACT_DIV_EXPR
1554 && code != ROUND_DIV_EXPR
1555 && code != TRUNC_MOD_EXPR
1556 && code != RSHIFT_EXPR
1557 && code != LSHIFT_EXPR
1558 && code != MIN_EXPR
1559 && code != MAX_EXPR
1560 && code != BIT_AND_EXPR
1561 && code != BIT_IOR_EXPR
1562 && code != BIT_XOR_EXPR)
1564 vr->set_varying ();
1565 return;
1568 /* If both ranges are UNDEFINED, so is the result. */
1569 if (vr0.undefined_p () && vr1.undefined_p ())
1571 vr->set_undefined ();
1572 return;
1574 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1575 code. At some point we may want to special-case operations that
1576 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1577 operand. */
1578 else if (vr0.undefined_p ())
1579 vr0.set_varying ();
1580 else if (vr1.undefined_p ())
1581 vr1.set_varying ();
1583 /* We get imprecise results from ranges_from_anti_range when
1584 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1585 range, but then we also need to hack up vrp_union. It's just
1586 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1587 if (code == EXACT_DIV_EXPR && range_is_nonnull (&vr0))
1589 vr->set_nonnull (expr_type);
1590 return;
1593 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1594 and express ~[] op X as ([]' op X) U ([]'' op X). */
1595 if (vr0.kind () == VR_ANTI_RANGE
1596 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1598 extract_range_from_binary_expr (vr, code, expr_type, &vrtem0, vr1_);
1599 if (!vrtem1.undefined_p ())
1601 value_range_base vrres;
1602 extract_range_from_binary_expr (&vrres, code, expr_type,
1603 &vrtem1, vr1_);
1604 vr->union_ (&vrres);
1606 return;
1608 /* Likewise for X op ~[]. */
1609 if (vr1.kind () == VR_ANTI_RANGE
1610 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1612 extract_range_from_binary_expr (vr, code, expr_type, vr0_, &vrtem0);
1613 if (!vrtem1.undefined_p ())
1615 value_range_base vrres;
1616 extract_range_from_binary_expr (&vrres, code, expr_type,
1617 vr0_, &vrtem1);
1618 vr->union_ (&vrres);
1620 return;
1623 /* The type of the resulting value range defaults to VR0.TYPE. */
1624 type = vr0.kind ();
1626 /* Refuse to operate on VARYING ranges, ranges of different kinds
1627 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1628 because we may be able to derive a useful range even if one of
1629 the operands is VR_VARYING or symbolic range. Similarly for
1630 divisions, MIN/MAX and PLUS/MINUS.
1632 TODO, we may be able to derive anti-ranges in some cases. */
1633 if (code != BIT_AND_EXPR
1634 && code != BIT_IOR_EXPR
1635 && code != TRUNC_DIV_EXPR
1636 && code != FLOOR_DIV_EXPR
1637 && code != CEIL_DIV_EXPR
1638 && code != EXACT_DIV_EXPR
1639 && code != ROUND_DIV_EXPR
1640 && code != TRUNC_MOD_EXPR
1641 && code != MIN_EXPR
1642 && code != MAX_EXPR
1643 && code != PLUS_EXPR
1644 && code != MINUS_EXPR
1645 && code != RSHIFT_EXPR
1646 && code != POINTER_PLUS_EXPR
1647 && (vr0.varying_p ()
1648 || vr1.varying_p ()
1649 || vr0.kind () != vr1.kind ()
1650 || vr0.symbolic_p ()
1651 || vr1.symbolic_p ()))
1653 vr->set_varying ();
1654 return;
1657 /* Now evaluate the expression to determine the new range. */
1658 if (POINTER_TYPE_P (expr_type))
1660 if (code == MIN_EXPR || code == MAX_EXPR)
1662 /* For MIN/MAX expressions with pointers, we only care about
1663 nullness, if both are non null, then the result is nonnull.
1664 If both are null, then the result is null. Otherwise they
1665 are varying. */
1666 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1667 vr->set_nonnull (expr_type);
1668 else if (range_is_null (&vr0) && range_is_null (&vr1))
1669 vr->set_null (expr_type);
1670 else
1671 vr->set_varying ();
1673 else if (code == POINTER_PLUS_EXPR)
1675 /* For pointer types, we are really only interested in asserting
1676 whether the expression evaluates to non-NULL.
1677 With -fno-delete-null-pointer-checks we need to be more
1678 conservative. As some object might reside at address 0,
1679 then some offset could be added to it and the same offset
1680 subtracted again and the result would be NULL.
1681 E.g.
1682 static int a[12]; where &a[0] is NULL and
1683 ptr = &a[6];
1684 ptr -= 6;
1685 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1686 where the first range doesn't include zero and the second one
1687 doesn't either. As the second operand is sizetype (unsigned),
1688 consider all ranges where the MSB could be set as possible
1689 subtractions where the result might be NULL. */
1690 if ((!range_includes_zero_p (&vr0)
1691 || !range_includes_zero_p (&vr1))
1692 && !TYPE_OVERFLOW_WRAPS (expr_type)
1693 && (flag_delete_null_pointer_checks
1694 || (range_int_cst_p (&vr1)
1695 && !tree_int_cst_sign_bit (vr1.max ()))))
1696 vr->set_nonnull (expr_type);
1697 else if (range_is_null (&vr0) && range_is_null (&vr1))
1698 vr->set_null (expr_type);
1699 else
1700 vr->set_varying ();
1702 else if (code == BIT_AND_EXPR)
1704 /* For pointer types, we are really only interested in asserting
1705 whether the expression evaluates to non-NULL. */
1706 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1707 vr->set_nonnull (expr_type);
1708 else if (range_is_null (&vr0) || range_is_null (&vr1))
1709 vr->set_null (expr_type);
1710 else
1711 vr->set_varying ();
1713 else
1714 vr->set_varying ();
1716 return;
1719 /* For integer ranges, apply the operation to each end of the
1720 range and see what we end up with. */
1721 if (code == PLUS_EXPR || code == MINUS_EXPR)
1723 /* This will normalize things such that calculating
1724 [0,0] - VR_VARYING is not dropped to varying, but is
1725 calculated as [MIN+1, MAX]. */
1726 if (vr0.varying_p ())
1727 vr0.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1728 if (vr1.varying_p ())
1729 vr1.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1731 const bool minus_p = (code == MINUS_EXPR);
1732 tree min_op0 = vr0.min ();
1733 tree min_op1 = minus_p ? vr1.max () : vr1.min ();
1734 tree max_op0 = vr0.max ();
1735 tree max_op1 = minus_p ? vr1.min () : vr1.max ();
1736 tree sym_min_op0 = NULL_TREE;
1737 tree sym_min_op1 = NULL_TREE;
1738 tree sym_max_op0 = NULL_TREE;
1739 tree sym_max_op1 = NULL_TREE;
1740 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1742 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1744 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1745 single-symbolic ranges, try to compute the precise resulting range,
1746 but only if we know that this resulting range will also be constant
1747 or single-symbolic. */
1748 if (vr0.kind () == VR_RANGE && vr1.kind () == VR_RANGE
1749 && (TREE_CODE (min_op0) == INTEGER_CST
1750 || (sym_min_op0
1751 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1752 && (TREE_CODE (min_op1) == INTEGER_CST
1753 || (sym_min_op1
1754 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1755 && (!(sym_min_op0 && sym_min_op1)
1756 || (sym_min_op0 == sym_min_op1
1757 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1758 && (TREE_CODE (max_op0) == INTEGER_CST
1759 || (sym_max_op0
1760 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1761 && (TREE_CODE (max_op1) == INTEGER_CST
1762 || (sym_max_op1
1763 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1764 && (!(sym_max_op0 && sym_max_op1)
1765 || (sym_max_op0 == sym_max_op1
1766 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1768 wide_int wmin, wmax;
1769 wi::overflow_type min_ovf = wi::OVF_NONE;
1770 wi::overflow_type max_ovf = wi::OVF_NONE;
1772 /* Build the bounds. */
1773 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1774 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1776 /* If we have overflow for the constant part and the resulting
1777 range will be symbolic, drop to VR_VARYING. */
1778 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1779 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1781 vr->set_varying ();
1782 return;
1785 /* Adjust the range for possible overflow. */
1786 min = NULL_TREE;
1787 max = NULL_TREE;
1788 set_value_range_with_overflow (type, min, max, expr_type,
1789 wmin, wmax, min_ovf, max_ovf);
1790 if (type == VR_VARYING)
1792 vr->set_varying ();
1793 return;
1796 /* Build the symbolic bounds if needed. */
1797 adjust_symbolic_bound (min, code, expr_type,
1798 sym_min_op0, sym_min_op1,
1799 neg_min_op0, neg_min_op1);
1800 adjust_symbolic_bound (max, code, expr_type,
1801 sym_max_op0, sym_max_op1,
1802 neg_max_op0, neg_max_op1);
1804 else
1806 /* For other cases, for example if we have a PLUS_EXPR with two
1807 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1808 to compute a precise range for such a case.
1809 ??? General even mixed range kind operations can be expressed
1810 by for example transforming ~[3, 5] + [1, 2] to range-only
1811 operations and a union primitive:
1812 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1813 [-INF+1, 4] U [6, +INF(OVF)]
1814 though usually the union is not exactly representable with
1815 a single range or anti-range as the above is
1816 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1817 but one could use a scheme similar to equivalences for this. */
1818 vr->set_varying ();
1819 return;
1822 else if (code == MIN_EXPR
1823 || code == MAX_EXPR)
1825 wide_int wmin, wmax;
1826 wide_int vr0_min, vr0_max;
1827 wide_int vr1_min, vr1_max;
1828 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1829 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1830 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1831 vr0_min, vr0_max, vr1_min, vr1_max))
1832 vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1833 wide_int_to_tree (expr_type, wmax));
1834 else
1835 vr->set_varying ();
1836 return;
1838 else if (code == MULT_EXPR)
1840 if (!range_int_cst_p (&vr0)
1841 || !range_int_cst_p (&vr1))
1843 vr->set_varying ();
1844 return;
1846 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1847 return;
1849 else if (code == RSHIFT_EXPR
1850 || code == LSHIFT_EXPR)
1852 if (range_int_cst_p (&vr1)
1853 && !wide_int_range_shift_undefined_p
1854 (TYPE_SIGN (TREE_TYPE (vr1.min ())),
1855 prec,
1856 wi::to_wide (vr1.min ()),
1857 wi::to_wide (vr1.max ())))
1859 if (code == RSHIFT_EXPR)
1861 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1862 useful ranges just from the shift count. E.g.
1863 x >> 63 for signed 64-bit x is always [-1, 0]. */
1864 if (vr0.kind () != VR_RANGE || vr0.symbolic_p ())
1865 vr0.set (VR_RANGE, vrp_val_min (expr_type),
1866 vrp_val_max (expr_type));
1867 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1868 return;
1870 else if (code == LSHIFT_EXPR
1871 && range_int_cst_p (&vr0))
1873 wide_int res_lb, res_ub;
1874 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1875 wi::to_wide (vr0.min ()),
1876 wi::to_wide (vr0.max ()),
1877 wi::to_wide (vr1.min ()),
1878 wi::to_wide (vr1.max ()),
1879 TYPE_OVERFLOW_UNDEFINED (expr_type)))
1881 min = wide_int_to_tree (expr_type, res_lb);
1882 max = wide_int_to_tree (expr_type, res_ub);
1883 vr->set_and_canonicalize (VR_RANGE, min, max);
1884 return;
1888 vr->set_varying ();
1889 return;
1891 else if (code == TRUNC_DIV_EXPR
1892 || code == FLOOR_DIV_EXPR
1893 || code == CEIL_DIV_EXPR
1894 || code == EXACT_DIV_EXPR
1895 || code == ROUND_DIV_EXPR)
1897 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1898 wide_int wmin, wmax, extra_min, extra_max;
1899 bool extra_range_p;
1901 /* Special case explicit division by zero as undefined. */
1902 if (range_is_null (&vr1))
1904 vr->set_undefined ();
1905 return;
1908 /* First, normalize ranges into constants we can handle. Note
1909 that VR_ANTI_RANGE's of constants were already normalized
1910 before arriving here.
1912 NOTE: As a future improvement, we may be able to do better
1913 with mixed symbolic (anti-)ranges like [0, A]. See note in
1914 ranges_from_anti_range. */
1915 extract_range_into_wide_ints (&vr0, sign, prec,
1916 dividend_min, dividend_max);
1917 extract_range_into_wide_ints (&vr1, sign, prec,
1918 divisor_min, divisor_max);
1919 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1920 dividend_min, dividend_max,
1921 divisor_min, divisor_max,
1922 TYPE_OVERFLOW_UNDEFINED (expr_type),
1923 extra_range_p, extra_min, extra_max))
1925 vr->set_varying ();
1926 return;
1928 vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1929 wide_int_to_tree (expr_type, wmax));
1930 if (extra_range_p)
1932 value_range_base
1933 extra_range (VR_RANGE, wide_int_to_tree (expr_type, extra_min),
1934 wide_int_to_tree (expr_type, extra_max));
1935 vr->union_ (&extra_range);
1937 return;
1939 else if (code == TRUNC_MOD_EXPR)
1941 if (range_is_null (&vr1))
1943 vr->set_undefined ();
1944 return;
1946 wide_int wmin, wmax, tmp;
1947 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1948 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1949 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1950 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1951 vr0_min, vr0_max, vr1_min, vr1_max);
1952 min = wide_int_to_tree (expr_type, wmin);
1953 max = wide_int_to_tree (expr_type, wmax);
1954 vr->set (VR_RANGE, min, max);
1955 return;
1957 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1959 wide_int may_be_nonzero0, may_be_nonzero1;
1960 wide_int must_be_nonzero0, must_be_nonzero1;
1961 wide_int wmin, wmax;
1962 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1963 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1964 &may_be_nonzero0, &must_be_nonzero0);
1965 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1966 &may_be_nonzero1, &must_be_nonzero1);
1967 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1968 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1969 if (code == BIT_AND_EXPR)
1971 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1972 vr0_min, vr0_max,
1973 vr1_min, vr1_max,
1974 must_be_nonzero0,
1975 may_be_nonzero0,
1976 must_be_nonzero1,
1977 may_be_nonzero1))
1979 min = wide_int_to_tree (expr_type, wmin);
1980 max = wide_int_to_tree (expr_type, wmax);
1981 vr->set (VR_RANGE, min, max);
1983 else
1984 vr->set_varying ();
1985 return;
1987 else if (code == BIT_IOR_EXPR)
1989 if (wide_int_range_bit_ior (wmin, wmax, sign,
1990 vr0_min, vr0_max,
1991 vr1_min, vr1_max,
1992 must_be_nonzero0,
1993 may_be_nonzero0,
1994 must_be_nonzero1,
1995 may_be_nonzero1))
1997 min = wide_int_to_tree (expr_type, wmin);
1998 max = wide_int_to_tree (expr_type, wmax);
1999 vr->set (VR_RANGE, min, max);
2001 else
2002 vr->set_varying ();
2003 return;
2005 else if (code == BIT_XOR_EXPR)
2007 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
2008 must_be_nonzero0,
2009 may_be_nonzero0,
2010 must_be_nonzero1,
2011 may_be_nonzero1))
2013 min = wide_int_to_tree (expr_type, wmin);
2014 max = wide_int_to_tree (expr_type, wmax);
2015 vr->set (VR_RANGE, min, max);
2017 else
2018 vr->set_varying ();
2019 return;
2022 else
2023 gcc_unreachable ();
2025 /* If either MIN or MAX overflowed, then set the resulting range to
2026 VARYING. */
2027 if (min == NULL_TREE
2028 || TREE_OVERFLOW_P (min)
2029 || max == NULL_TREE
2030 || TREE_OVERFLOW_P (max))
2032 vr->set_varying ();
2033 return;
2036 /* We punt for [-INF, +INF].
2037 We learn nothing when we have INF on both sides.
2038 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2039 if (vrp_val_is_min (min) && vrp_val_is_max (max))
2041 vr->set_varying ();
2042 return;
2045 cmp = compare_values (min, max);
2046 if (cmp == -2 || cmp == 1)
2048 /* If the new range has its limits swapped around (MIN > MAX),
2049 then the operation caused one of them to wrap around, mark
2050 the new range VARYING. */
2051 vr->set_varying ();
2053 else
2054 vr->set (type, min, max);
2057 /* Extract range information from a unary operation CODE based on
2058 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2059 The resulting range is stored in *VR. */
2061 void
2062 extract_range_from_unary_expr (value_range_base *vr,
2063 enum tree_code code, tree type,
2064 const value_range_base *vr0_, tree op0_type)
2066 signop sign = TYPE_SIGN (type);
2067 unsigned int prec = TYPE_PRECISION (type);
2068 value_range_base vr0 = *vr0_;
2069 value_range_base vrtem0, vrtem1;
2071 /* VRP only operates on integral and pointer types. */
2072 if (!(INTEGRAL_TYPE_P (op0_type)
2073 || POINTER_TYPE_P (op0_type))
2074 || !(INTEGRAL_TYPE_P (type)
2075 || POINTER_TYPE_P (type)))
2077 vr->set_varying ();
2078 return;
2081 /* If VR0 is UNDEFINED, so is the result. */
2082 if (vr0.undefined_p ())
2084 vr->set_undefined ();
2085 return;
2088 /* Handle operations that we express in terms of others. */
2089 if (code == PAREN_EXPR)
2091 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
2092 *vr = vr0;
2093 return;
2095 else if (code == NEGATE_EXPR)
2097 /* -X is simply 0 - X, so re-use existing code that also handles
2098 anti-ranges fine. */
2099 value_range_base zero;
2100 zero.set (build_int_cst (type, 0));
2101 extract_range_from_binary_expr (vr, MINUS_EXPR, type, &zero, &vr0);
2102 return;
2104 else if (code == BIT_NOT_EXPR)
2106 /* ~X is simply -1 - X, so re-use existing code that also handles
2107 anti-ranges fine. */
2108 value_range_base minusone;
2109 minusone.set (build_int_cst (type, -1));
2110 extract_range_from_binary_expr (vr, MINUS_EXPR, type, &minusone, &vr0);
2111 return;
2114 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2115 and express op ~[] as (op []') U (op []''). */
2116 if (vr0.kind () == VR_ANTI_RANGE
2117 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2119 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2120 if (!vrtem1.undefined_p ())
2122 value_range_base vrres;
2123 extract_range_from_unary_expr (&vrres, code, type,
2124 &vrtem1, op0_type);
2125 vr->union_ (&vrres);
2127 return;
2130 if (CONVERT_EXPR_CODE_P (code))
2132 tree inner_type = op0_type;
2133 tree outer_type = type;
2135 /* If the expression involves a pointer, we are only interested in
2136 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2138 This may lose precision when converting (char *)~[0,2] to
2139 int, because we'll forget that the pointer can also not be 1
2140 or 2. In practice we don't care, as this is some idiot
2141 storing a magic constant to a pointer. */
2142 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type))
2144 if (!range_includes_zero_p (&vr0))
2145 vr->set_nonnull (type);
2146 else if (range_is_null (&vr0))
2147 vr->set_null (type);
2148 else
2149 vr->set_varying ();
2150 return;
2153 /* The POINTER_TYPE_P code above will have dealt with all
2154 pointer anti-ranges. Any remaining anti-ranges at this point
2155 will be integer conversions from SSA names that will be
2156 normalized into VARYING. For instance: ~[x_55, x_55]. */
2157 gcc_assert (vr0.kind () != VR_ANTI_RANGE
2158 || TREE_CODE (vr0.min ()) != INTEGER_CST);
2160 /* NOTES: Previously we were returning VARYING for all symbolics, but
2161 we can do better by treating them as [-MIN, +MAX]. For
2162 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2163 we can return: ~[0x8000000, 0xffffffff7fffffff].
2165 We were also failing to convert ~[0,0] from char* to unsigned,
2166 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
2167 wide_int vr0_min, vr0_max, wmin, wmax;
2168 signop inner_sign = TYPE_SIGN (inner_type);
2169 signop outer_sign = TYPE_SIGN (outer_type);
2170 unsigned inner_prec = TYPE_PRECISION (inner_type);
2171 unsigned outer_prec = TYPE_PRECISION (outer_type);
2172 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
2173 vr0_min, vr0_max);
2174 if (wide_int_range_convert (wmin, wmax,
2175 inner_sign, inner_prec,
2176 outer_sign, outer_prec,
2177 vr0_min, vr0_max))
2179 tree min = wide_int_to_tree (outer_type, wmin);
2180 tree max = wide_int_to_tree (outer_type, wmax);
2181 vr->set_and_canonicalize (VR_RANGE, min, max);
2183 else
2184 vr->set_varying ();
2185 return;
2187 else if (code == ABS_EXPR)
2189 wide_int wmin, wmax;
2190 wide_int vr0_min, vr0_max;
2191 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
2192 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
2193 TYPE_OVERFLOW_UNDEFINED (type)))
2194 vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2195 wide_int_to_tree (type, wmax));
2196 else
2197 vr->set_varying ();
2198 return;
2201 /* For unhandled operations fall back to varying. */
2202 vr->set_varying ();
2203 return;
2206 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2207 create a new SSA name N and return the assertion assignment
2208 'N = ASSERT_EXPR <V, V OP W>'. */
2210 static gimple *
2211 build_assert_expr_for (tree cond, tree v)
2213 tree a;
2214 gassign *assertion;
2216 gcc_assert (TREE_CODE (v) == SSA_NAME
2217 && COMPARISON_CLASS_P (cond));
2219 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2220 assertion = gimple_build_assign (NULL_TREE, a);
2222 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2223 operand of the ASSERT_EXPR. Create it so the new name and the old one
2224 are registered in the replacement table so that we can fix the SSA web
2225 after adding all the ASSERT_EXPRs. */
2226 tree new_def = create_new_def_for (v, assertion, NULL);
2227 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2228 given we have to be able to fully propagate those out to re-create
2229 valid SSA when removing the asserts. */
2230 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2231 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2233 return assertion;
2237 /* Return false if EXPR is a predicate expression involving floating
2238 point values. */
2240 static inline bool
2241 fp_predicate (gimple *stmt)
2243 GIMPLE_CHECK (stmt, GIMPLE_COND);
2245 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2248 /* If the range of values taken by OP can be inferred after STMT executes,
2249 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2250 describes the inferred range. Return true if a range could be
2251 inferred. */
2253 bool
2254 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2256 *val_p = NULL_TREE;
2257 *comp_code_p = ERROR_MARK;
2259 /* Do not attempt to infer anything in names that flow through
2260 abnormal edges. */
2261 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2262 return false;
2264 /* If STMT is the last statement of a basic block with no normal
2265 successors, there is no point inferring anything about any of its
2266 operands. We would not be able to find a proper insertion point
2267 for the assertion, anyway. */
2268 if (stmt_ends_bb_p (stmt))
2270 edge_iterator ei;
2271 edge e;
2273 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2274 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2275 break;
2276 if (e == NULL)
2277 return false;
2280 if (infer_nonnull_range (stmt, op))
2282 *val_p = build_int_cst (TREE_TYPE (op), 0);
2283 *comp_code_p = NE_EXPR;
2284 return true;
2287 return false;
2291 void dump_asserts_for (FILE *, tree);
2292 void debug_asserts_for (tree);
2293 void dump_all_asserts (FILE *);
2294 void debug_all_asserts (void);
2296 /* Dump all the registered assertions for NAME to FILE. */
2298 void
2299 dump_asserts_for (FILE *file, tree name)
2301 assert_locus *loc;
2303 fprintf (file, "Assertions to be inserted for ");
2304 print_generic_expr (file, name);
2305 fprintf (file, "\n");
2307 loc = asserts_for[SSA_NAME_VERSION (name)];
2308 while (loc)
2310 fprintf (file, "\t");
2311 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2312 fprintf (file, "\n\tBB #%d", loc->bb->index);
2313 if (loc->e)
2315 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2316 loc->e->dest->index);
2317 dump_edge_info (file, loc->e, dump_flags, 0);
2319 fprintf (file, "\n\tPREDICATE: ");
2320 print_generic_expr (file, loc->expr);
2321 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2322 print_generic_expr (file, loc->val);
2323 fprintf (file, "\n\n");
2324 loc = loc->next;
2327 fprintf (file, "\n");
2331 /* Dump all the registered assertions for NAME to stderr. */
2333 DEBUG_FUNCTION void
2334 debug_asserts_for (tree name)
2336 dump_asserts_for (stderr, name);
2340 /* Dump all the registered assertions for all the names to FILE. */
2342 void
2343 dump_all_asserts (FILE *file)
2345 unsigned i;
2346 bitmap_iterator bi;
2348 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2349 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2350 dump_asserts_for (file, ssa_name (i));
2351 fprintf (file, "\n");
2355 /* Dump all the registered assertions for all the names to stderr. */
2357 DEBUG_FUNCTION void
2358 debug_all_asserts (void)
2360 dump_all_asserts (stderr);
2363 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2365 static void
2366 add_assert_info (vec<assert_info> &asserts,
2367 tree name, tree expr, enum tree_code comp_code, tree val)
2369 assert_info info;
2370 info.comp_code = comp_code;
2371 info.name = name;
2372 if (TREE_OVERFLOW_P (val))
2373 val = drop_tree_overflow (val);
2374 info.val = val;
2375 info.expr = expr;
2376 asserts.safe_push (info);
2377 if (dump_enabled_p ())
2378 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2379 "Adding assert for %T from %T %s %T\n",
2380 name, expr, op_symbol_code (comp_code), val);
2383 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2384 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2385 E->DEST, then register this location as a possible insertion point
2386 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2388 BB, E and SI provide the exact insertion point for the new
2389 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2390 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2391 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2392 must not be NULL. */
2394 static void
2395 register_new_assert_for (tree name, tree expr,
2396 enum tree_code comp_code,
2397 tree val,
2398 basic_block bb,
2399 edge e,
2400 gimple_stmt_iterator si)
2402 assert_locus *n, *loc, *last_loc;
2403 basic_block dest_bb;
2405 gcc_checking_assert (bb == NULL || e == NULL);
2407 if (e == NULL)
2408 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2409 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2411 /* Never build an assert comparing against an integer constant with
2412 TREE_OVERFLOW set. This confuses our undefined overflow warning
2413 machinery. */
2414 if (TREE_OVERFLOW_P (val))
2415 val = drop_tree_overflow (val);
2417 /* The new assertion A will be inserted at BB or E. We need to
2418 determine if the new location is dominated by a previously
2419 registered location for A. If we are doing an edge insertion,
2420 assume that A will be inserted at E->DEST. Note that this is not
2421 necessarily true.
2423 If E is a critical edge, it will be split. But even if E is
2424 split, the new block will dominate the same set of blocks that
2425 E->DEST dominates.
2427 The reverse, however, is not true, blocks dominated by E->DEST
2428 will not be dominated by the new block created to split E. So,
2429 if the insertion location is on a critical edge, we will not use
2430 the new location to move another assertion previously registered
2431 at a block dominated by E->DEST. */
2432 dest_bb = (bb) ? bb : e->dest;
2434 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2435 VAL at a block dominating DEST_BB, then we don't need to insert a new
2436 one. Similarly, if the same assertion already exists at a block
2437 dominated by DEST_BB and the new location is not on a critical
2438 edge, then update the existing location for the assertion (i.e.,
2439 move the assertion up in the dominance tree).
2441 Note, this is implemented as a simple linked list because there
2442 should not be more than a handful of assertions registered per
2443 name. If this becomes a performance problem, a table hashed by
2444 COMP_CODE and VAL could be implemented. */
2445 loc = asserts_for[SSA_NAME_VERSION (name)];
2446 last_loc = loc;
2447 while (loc)
2449 if (loc->comp_code == comp_code
2450 && (loc->val == val
2451 || operand_equal_p (loc->val, val, 0))
2452 && (loc->expr == expr
2453 || operand_equal_p (loc->expr, expr, 0)))
2455 /* If E is not a critical edge and DEST_BB
2456 dominates the existing location for the assertion, move
2457 the assertion up in the dominance tree by updating its
2458 location information. */
2459 if ((e == NULL || !EDGE_CRITICAL_P (e))
2460 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2462 loc->bb = dest_bb;
2463 loc->e = e;
2464 loc->si = si;
2465 return;
2469 /* Update the last node of the list and move to the next one. */
2470 last_loc = loc;
2471 loc = loc->next;
2474 /* If we didn't find an assertion already registered for
2475 NAME COMP_CODE VAL, add a new one at the end of the list of
2476 assertions associated with NAME. */
2477 n = XNEW (struct assert_locus);
2478 n->bb = dest_bb;
2479 n->e = e;
2480 n->si = si;
2481 n->comp_code = comp_code;
2482 n->val = val;
2483 n->expr = expr;
2484 n->next = NULL;
2486 if (last_loc)
2487 last_loc->next = n;
2488 else
2489 asserts_for[SSA_NAME_VERSION (name)] = n;
2491 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2494 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2495 Extract a suitable test code and value and store them into *CODE_P and
2496 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2498 If no extraction was possible, return FALSE, otherwise return TRUE.
2500 If INVERT is true, then we invert the result stored into *CODE_P. */
2502 static bool
2503 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2504 tree cond_op0, tree cond_op1,
2505 bool invert, enum tree_code *code_p,
2506 tree *val_p)
2508 enum tree_code comp_code;
2509 tree val;
2511 /* Otherwise, we have a comparison of the form NAME COMP VAL
2512 or VAL COMP NAME. */
2513 if (name == cond_op1)
2515 /* If the predicate is of the form VAL COMP NAME, flip
2516 COMP around because we need to register NAME as the
2517 first operand in the predicate. */
2518 comp_code = swap_tree_comparison (cond_code);
2519 val = cond_op0;
2521 else if (name == cond_op0)
2523 /* The comparison is of the form NAME COMP VAL, so the
2524 comparison code remains unchanged. */
2525 comp_code = cond_code;
2526 val = cond_op1;
2528 else
2529 gcc_unreachable ();
2531 /* Invert the comparison code as necessary. */
2532 if (invert)
2533 comp_code = invert_tree_comparison (comp_code, 0);
2535 /* VRP only handles integral and pointer types. */
2536 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2537 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2538 return false;
2540 /* Do not register always-false predicates.
2541 FIXME: this works around a limitation in fold() when dealing with
2542 enumerations. Given 'enum { N1, N2 } x;', fold will not
2543 fold 'if (x > N2)' to 'if (0)'. */
2544 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2545 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2547 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2548 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2550 if (comp_code == GT_EXPR
2551 && (!max
2552 || compare_values (val, max) == 0))
2553 return false;
2555 if (comp_code == LT_EXPR
2556 && (!min
2557 || compare_values (val, min) == 0))
2558 return false;
2560 *code_p = comp_code;
2561 *val_p = val;
2562 return true;
2565 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2566 (otherwise return VAL). VAL and MASK must be zero-extended for
2567 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2568 (to transform signed values into unsigned) and at the end xor
2569 SGNBIT back. */
2571 static wide_int
2572 masked_increment (const wide_int &val_in, const wide_int &mask,
2573 const wide_int &sgnbit, unsigned int prec)
2575 wide_int bit = wi::one (prec), res;
2576 unsigned int i;
2578 wide_int val = val_in ^ sgnbit;
2579 for (i = 0; i < prec; i++, bit += bit)
2581 res = mask;
2582 if ((res & bit) == 0)
2583 continue;
2584 res = bit - 1;
2585 res = wi::bit_and_not (val + bit, res);
2586 res &= mask;
2587 if (wi::gtu_p (res, val))
2588 return res ^ sgnbit;
2590 return val ^ sgnbit;
2593 /* Helper for overflow_comparison_p
2595 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2596 OP1's defining statement to see if it ultimately has the form
2597 OP0 CODE (OP0 PLUS INTEGER_CST)
2599 If so, return TRUE indicating this is an overflow test and store into
2600 *NEW_CST an updated constant that can be used in a narrowed range test.
2602 REVERSED indicates if the comparison was originally:
2604 OP1 CODE' OP0.
2606 This affects how we build the updated constant. */
2608 static bool
2609 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2610 bool follow_assert_exprs, bool reversed, tree *new_cst)
2612 /* See if this is a relational operation between two SSA_NAMES with
2613 unsigned, overflow wrapping values. If so, check it more deeply. */
2614 if ((code == LT_EXPR || code == LE_EXPR
2615 || code == GE_EXPR || code == GT_EXPR)
2616 && TREE_CODE (op0) == SSA_NAME
2617 && TREE_CODE (op1) == SSA_NAME
2618 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2619 && TYPE_UNSIGNED (TREE_TYPE (op0))
2620 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2622 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2624 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2625 if (follow_assert_exprs)
2627 while (gimple_assign_single_p (op1_def)
2628 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2630 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2631 if (TREE_CODE (op1) != SSA_NAME)
2632 break;
2633 op1_def = SSA_NAME_DEF_STMT (op1);
2637 /* Now look at the defining statement of OP1 to see if it adds
2638 or subtracts a nonzero constant from another operand. */
2639 if (op1_def
2640 && is_gimple_assign (op1_def)
2641 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2642 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2643 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2645 tree target = gimple_assign_rhs1 (op1_def);
2647 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2648 for one where TARGET appears on the RHS. */
2649 if (follow_assert_exprs)
2651 /* Now see if that "other operand" is op0, following the chain
2652 of ASSERT_EXPRs if necessary. */
2653 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2654 while (op0 != target
2655 && gimple_assign_single_p (op0_def)
2656 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2658 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2659 if (TREE_CODE (op0) != SSA_NAME)
2660 break;
2661 op0_def = SSA_NAME_DEF_STMT (op0);
2665 /* If we did not find our target SSA_NAME, then this is not
2666 an overflow test. */
2667 if (op0 != target)
2668 return false;
2670 tree type = TREE_TYPE (op0);
2671 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2672 tree inc = gimple_assign_rhs2 (op1_def);
2673 if (reversed)
2674 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2675 else
2676 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2677 return true;
2680 return false;
2683 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2684 OP1's defining statement to see if it ultimately has the form
2685 OP0 CODE (OP0 PLUS INTEGER_CST)
2687 If so, return TRUE indicating this is an overflow test and store into
2688 *NEW_CST an updated constant that can be used in a narrowed range test.
2690 These statements are left as-is in the IL to facilitate discovery of
2691 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2692 the alternate range representation is often useful within VRP. */
2694 bool
2695 overflow_comparison_p (tree_code code, tree name, tree val,
2696 bool use_equiv_p, tree *new_cst)
2698 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2699 return true;
2700 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2701 use_equiv_p, true, new_cst);
2705 /* Try to register an edge assertion for SSA name NAME on edge E for
2706 the condition COND contributing to the conditional jump pointed to by BSI.
2707 Invert the condition COND if INVERT is true. */
2709 static void
2710 register_edge_assert_for_2 (tree name, edge e,
2711 enum tree_code cond_code,
2712 tree cond_op0, tree cond_op1, bool invert,
2713 vec<assert_info> &asserts)
2715 tree val;
2716 enum tree_code comp_code;
2718 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2719 cond_op0,
2720 cond_op1,
2721 invert, &comp_code, &val))
2722 return;
2724 /* Queue the assert. */
2725 tree x;
2726 if (overflow_comparison_p (comp_code, name, val, false, &x))
2728 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2729 ? GT_EXPR : LE_EXPR);
2730 add_assert_info (asserts, name, name, new_code, x);
2732 add_assert_info (asserts, name, name, comp_code, val);
2734 /* In the case of NAME <= CST and NAME being defined as
2735 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2736 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2737 This catches range and anti-range tests. */
2738 if ((comp_code == LE_EXPR
2739 || comp_code == GT_EXPR)
2740 && TREE_CODE (val) == INTEGER_CST
2741 && TYPE_UNSIGNED (TREE_TYPE (val)))
2743 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2744 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2746 /* Extract CST2 from the (optional) addition. */
2747 if (is_gimple_assign (def_stmt)
2748 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2750 name2 = gimple_assign_rhs1 (def_stmt);
2751 cst2 = gimple_assign_rhs2 (def_stmt);
2752 if (TREE_CODE (name2) == SSA_NAME
2753 && TREE_CODE (cst2) == INTEGER_CST)
2754 def_stmt = SSA_NAME_DEF_STMT (name2);
2757 /* Extract NAME2 from the (optional) sign-changing cast. */
2758 if (gimple_assign_cast_p (def_stmt))
2760 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2761 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2762 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2763 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2764 name3 = gimple_assign_rhs1 (def_stmt);
2767 /* If name3 is used later, create an ASSERT_EXPR for it. */
2768 if (name3 != NULL_TREE
2769 && TREE_CODE (name3) == SSA_NAME
2770 && (cst2 == NULL_TREE
2771 || TREE_CODE (cst2) == INTEGER_CST)
2772 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2774 tree tmp;
2776 /* Build an expression for the range test. */
2777 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2778 if (cst2 != NULL_TREE)
2779 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2780 add_assert_info (asserts, name3, tmp, comp_code, val);
2783 /* If name2 is used later, create an ASSERT_EXPR for it. */
2784 if (name2 != NULL_TREE
2785 && TREE_CODE (name2) == SSA_NAME
2786 && TREE_CODE (cst2) == INTEGER_CST
2787 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2789 tree tmp;
2791 /* Build an expression for the range test. */
2792 tmp = name2;
2793 if (TREE_TYPE (name) != TREE_TYPE (name2))
2794 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2795 if (cst2 != NULL_TREE)
2796 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2797 add_assert_info (asserts, name2, tmp, comp_code, val);
2801 /* In the case of post-in/decrement tests like if (i++) ... and uses
2802 of the in/decremented value on the edge the extra name we want to
2803 assert for is not on the def chain of the name compared. Instead
2804 it is in the set of use stmts.
2805 Similar cases happen for conversions that were simplified through
2806 fold_{sign_changed,widened}_comparison. */
2807 if ((comp_code == NE_EXPR
2808 || comp_code == EQ_EXPR)
2809 && TREE_CODE (val) == INTEGER_CST)
2811 imm_use_iterator ui;
2812 gimple *use_stmt;
2813 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2815 if (!is_gimple_assign (use_stmt))
2816 continue;
2818 /* Cut off to use-stmts that are dominating the predecessor. */
2819 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2820 continue;
2822 tree name2 = gimple_assign_lhs (use_stmt);
2823 if (TREE_CODE (name2) != SSA_NAME)
2824 continue;
2826 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2827 tree cst;
2828 if (code == PLUS_EXPR
2829 || code == MINUS_EXPR)
2831 cst = gimple_assign_rhs2 (use_stmt);
2832 if (TREE_CODE (cst) != INTEGER_CST)
2833 continue;
2834 cst = int_const_binop (code, val, cst);
2836 else if (CONVERT_EXPR_CODE_P (code))
2838 /* For truncating conversions we cannot record
2839 an inequality. */
2840 if (comp_code == NE_EXPR
2841 && (TYPE_PRECISION (TREE_TYPE (name2))
2842 < TYPE_PRECISION (TREE_TYPE (name))))
2843 continue;
2844 cst = fold_convert (TREE_TYPE (name2), val);
2846 else
2847 continue;
2849 if (TREE_OVERFLOW_P (cst))
2850 cst = drop_tree_overflow (cst);
2851 add_assert_info (asserts, name2, name2, comp_code, cst);
2855 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2856 && TREE_CODE (val) == INTEGER_CST)
2858 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2859 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2860 tree val2 = NULL_TREE;
2861 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2862 wide_int mask = wi::zero (prec);
2863 unsigned int nprec = prec;
2864 enum tree_code rhs_code = ERROR_MARK;
2866 if (is_gimple_assign (def_stmt))
2867 rhs_code = gimple_assign_rhs_code (def_stmt);
2869 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2870 assert that A != CST1 -+ CST2. */
2871 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2872 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2874 tree op0 = gimple_assign_rhs1 (def_stmt);
2875 tree op1 = gimple_assign_rhs2 (def_stmt);
2876 if (TREE_CODE (op0) == SSA_NAME
2877 && TREE_CODE (op1) == INTEGER_CST)
2879 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2880 ? MINUS_EXPR : PLUS_EXPR);
2881 op1 = int_const_binop (reverse_op, val, op1);
2882 if (TREE_OVERFLOW (op1))
2883 op1 = drop_tree_overflow (op1);
2884 add_assert_info (asserts, op0, op0, comp_code, op1);
2888 /* Add asserts for NAME cmp CST and NAME being defined
2889 as NAME = (int) NAME2. */
2890 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2891 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2892 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2893 && gimple_assign_cast_p (def_stmt))
2895 name2 = gimple_assign_rhs1 (def_stmt);
2896 if (CONVERT_EXPR_CODE_P (rhs_code)
2897 && TREE_CODE (name2) == SSA_NAME
2898 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2899 && TYPE_UNSIGNED (TREE_TYPE (name2))
2900 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2901 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2902 || !tree_int_cst_equal (val,
2903 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2905 tree tmp, cst;
2906 enum tree_code new_comp_code = comp_code;
2908 cst = fold_convert (TREE_TYPE (name2),
2909 TYPE_MIN_VALUE (TREE_TYPE (val)));
2910 /* Build an expression for the range test. */
2911 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2912 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2913 fold_convert (TREE_TYPE (name2), val));
2914 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2916 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2917 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2918 build_int_cst (TREE_TYPE (name2), 1));
2920 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2924 /* Add asserts for NAME cmp CST and NAME being defined as
2925 NAME = NAME2 >> CST2.
2927 Extract CST2 from the right shift. */
2928 if (rhs_code == RSHIFT_EXPR)
2930 name2 = gimple_assign_rhs1 (def_stmt);
2931 cst2 = gimple_assign_rhs2 (def_stmt);
2932 if (TREE_CODE (name2) == SSA_NAME
2933 && tree_fits_uhwi_p (cst2)
2934 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2935 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2936 && type_has_mode_precision_p (TREE_TYPE (val)))
2938 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2939 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2942 if (val2 != NULL_TREE
2943 && TREE_CODE (val2) == INTEGER_CST
2944 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2945 TREE_TYPE (val),
2946 val2, cst2), val))
2948 enum tree_code new_comp_code = comp_code;
2949 tree tmp, new_val;
2951 tmp = name2;
2952 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2954 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2956 tree type = build_nonstandard_integer_type (prec, 1);
2957 tmp = build1 (NOP_EXPR, type, name2);
2958 val2 = fold_convert (type, val2);
2960 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2961 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2962 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2964 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2966 wide_int minval
2967 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2968 new_val = val2;
2969 if (minval == wi::to_wide (new_val))
2970 new_val = NULL_TREE;
2972 else
2974 wide_int maxval
2975 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2976 mask |= wi::to_wide (val2);
2977 if (wi::eq_p (mask, maxval))
2978 new_val = NULL_TREE;
2979 else
2980 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2983 if (new_val)
2984 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2987 /* If we have a conversion that doesn't change the value of the source
2988 simply register the same assert for it. */
2989 if (CONVERT_EXPR_CODE_P (rhs_code))
2991 wide_int rmin, rmax;
2992 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2993 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2994 && TREE_CODE (rhs1) == SSA_NAME
2995 /* Make sure the relation preserves the upper/lower boundary of
2996 the range conservatively. */
2997 && (comp_code == NE_EXPR
2998 || comp_code == EQ_EXPR
2999 || (TYPE_SIGN (TREE_TYPE (name))
3000 == TYPE_SIGN (TREE_TYPE (rhs1)))
3001 || ((comp_code == LE_EXPR
3002 || comp_code == LT_EXPR)
3003 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3004 || ((comp_code == GE_EXPR
3005 || comp_code == GT_EXPR)
3006 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
3007 /* And the conversion does not alter the value we compare
3008 against and all values in rhs1 can be represented in
3009 the converted to type. */
3010 && int_fits_type_p (val, TREE_TYPE (rhs1))
3011 && ((TYPE_PRECISION (TREE_TYPE (name))
3012 > TYPE_PRECISION (TREE_TYPE (rhs1)))
3013 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
3014 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
3015 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
3016 add_assert_info (asserts, rhs1, rhs1,
3017 comp_code, fold_convert (TREE_TYPE (rhs1), val));
3020 /* Add asserts for NAME cmp CST and NAME being defined as
3021 NAME = NAME2 & CST2.
3023 Extract CST2 from the and.
3025 Also handle
3026 NAME = (unsigned) NAME2;
3027 casts where NAME's type is unsigned and has smaller precision
3028 than NAME2's type as if it was NAME = NAME2 & MASK. */
3029 names[0] = NULL_TREE;
3030 names[1] = NULL_TREE;
3031 cst2 = NULL_TREE;
3032 if (rhs_code == BIT_AND_EXPR
3033 || (CONVERT_EXPR_CODE_P (rhs_code)
3034 && INTEGRAL_TYPE_P (TREE_TYPE (val))
3035 && TYPE_UNSIGNED (TREE_TYPE (val))
3036 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3037 > prec))
3039 name2 = gimple_assign_rhs1 (def_stmt);
3040 if (rhs_code == BIT_AND_EXPR)
3041 cst2 = gimple_assign_rhs2 (def_stmt);
3042 else
3044 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3045 nprec = TYPE_PRECISION (TREE_TYPE (name2));
3047 if (TREE_CODE (name2) == SSA_NAME
3048 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3049 && TREE_CODE (cst2) == INTEGER_CST
3050 && !integer_zerop (cst2)
3051 && (nprec > 1
3052 || TYPE_UNSIGNED (TREE_TYPE (val))))
3054 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3055 if (gimple_assign_cast_p (def_stmt2))
3057 names[1] = gimple_assign_rhs1 (def_stmt2);
3058 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3059 || TREE_CODE (names[1]) != SSA_NAME
3060 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3061 || (TYPE_PRECISION (TREE_TYPE (name2))
3062 != TYPE_PRECISION (TREE_TYPE (names[1]))))
3063 names[1] = NULL_TREE;
3065 names[0] = name2;
3068 if (names[0] || names[1])
3070 wide_int minv, maxv, valv, cst2v;
3071 wide_int tem, sgnbit;
3072 bool valid_p = false, valn, cst2n;
3073 enum tree_code ccode = comp_code;
3075 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3076 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3077 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3078 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3079 /* If CST2 doesn't have most significant bit set,
3080 but VAL is negative, we have comparison like
3081 if ((x & 0x123) > -4) (always true). Just give up. */
3082 if (!cst2n && valn)
3083 ccode = ERROR_MARK;
3084 if (cst2n)
3085 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3086 else
3087 sgnbit = wi::zero (nprec);
3088 minv = valv & cst2v;
3089 switch (ccode)
3091 case EQ_EXPR:
3092 /* Minimum unsigned value for equality is VAL & CST2
3093 (should be equal to VAL, otherwise we probably should
3094 have folded the comparison into false) and
3095 maximum unsigned value is VAL | ~CST2. */
3096 maxv = valv | ~cst2v;
3097 valid_p = true;
3098 break;
3100 case NE_EXPR:
3101 tem = valv | ~cst2v;
3102 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
3103 if (valv == 0)
3105 cst2n = false;
3106 sgnbit = wi::zero (nprec);
3107 goto gt_expr;
3109 /* If (VAL | ~CST2) is all ones, handle it as
3110 (X & CST2) < VAL. */
3111 if (tem == -1)
3113 cst2n = false;
3114 valn = false;
3115 sgnbit = wi::zero (nprec);
3116 goto lt_expr;
3118 if (!cst2n && wi::neg_p (cst2v))
3119 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3120 if (sgnbit != 0)
3122 if (valv == sgnbit)
3124 cst2n = true;
3125 valn = true;
3126 goto gt_expr;
3128 if (tem == wi::mask (nprec - 1, false, nprec))
3130 cst2n = true;
3131 goto lt_expr;
3133 if (!cst2n)
3134 sgnbit = wi::zero (nprec);
3136 break;
3138 case GE_EXPR:
3139 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3140 is VAL and maximum unsigned value is ~0. For signed
3141 comparison, if CST2 doesn't have most significant bit
3142 set, handle it similarly. If CST2 has MSB set,
3143 the minimum is the same, and maximum is ~0U/2. */
3144 if (minv != valv)
3146 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3147 VAL. */
3148 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3149 if (minv == valv)
3150 break;
3152 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3153 valid_p = true;
3154 break;
3156 case GT_EXPR:
3157 gt_expr:
3158 /* Find out smallest MINV where MINV > VAL
3159 && (MINV & CST2) == MINV, if any. If VAL is signed and
3160 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
3161 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3162 if (minv == valv)
3163 break;
3164 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3165 valid_p = true;
3166 break;
3168 case LE_EXPR:
3169 /* Minimum unsigned value for <= is 0 and maximum
3170 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3171 Otherwise, find smallest VAL2 where VAL2 > VAL
3172 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3173 as maximum.
3174 For signed comparison, if CST2 doesn't have most
3175 significant bit set, handle it similarly. If CST2 has
3176 MSB set, the maximum is the same and minimum is INT_MIN. */
3177 if (minv == valv)
3178 maxv = valv;
3179 else
3181 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3182 if (maxv == valv)
3183 break;
3184 maxv -= 1;
3186 maxv |= ~cst2v;
3187 minv = sgnbit;
3188 valid_p = true;
3189 break;
3191 case LT_EXPR:
3192 lt_expr:
3193 /* Minimum unsigned value for < is 0 and maximum
3194 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3195 Otherwise, find smallest VAL2 where VAL2 > VAL
3196 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3197 as maximum.
3198 For signed comparison, if CST2 doesn't have most
3199 significant bit set, handle it similarly. If CST2 has
3200 MSB set, the maximum is the same and minimum is INT_MIN. */
3201 if (minv == valv)
3203 if (valv == sgnbit)
3204 break;
3205 maxv = valv;
3207 else
3209 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3210 if (maxv == valv)
3211 break;
3213 maxv -= 1;
3214 maxv |= ~cst2v;
3215 minv = sgnbit;
3216 valid_p = true;
3217 break;
3219 default:
3220 break;
3222 if (valid_p
3223 && (maxv - minv) != -1)
3225 tree tmp, new_val, type;
3226 int i;
3228 for (i = 0; i < 2; i++)
3229 if (names[i])
3231 wide_int maxv2 = maxv;
3232 tmp = names[i];
3233 type = TREE_TYPE (names[i]);
3234 if (!TYPE_UNSIGNED (type))
3236 type = build_nonstandard_integer_type (nprec, 1);
3237 tmp = build1 (NOP_EXPR, type, names[i]);
3239 if (minv != 0)
3241 tmp = build2 (PLUS_EXPR, type, tmp,
3242 wide_int_to_tree (type, -minv));
3243 maxv2 = maxv - minv;
3245 new_val = wide_int_to_tree (type, maxv2);
3246 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3253 /* OP is an operand of a truth value expression which is known to have
3254 a particular value. Register any asserts for OP and for any
3255 operands in OP's defining statement.
3257 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3258 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3260 static void
3261 register_edge_assert_for_1 (tree op, enum tree_code code,
3262 edge e, vec<assert_info> &asserts)
3264 gimple *op_def;
3265 tree val;
3266 enum tree_code rhs_code;
3268 /* We only care about SSA_NAMEs. */
3269 if (TREE_CODE (op) != SSA_NAME)
3270 return;
3272 /* We know that OP will have a zero or nonzero value. */
3273 val = build_int_cst (TREE_TYPE (op), 0);
3274 add_assert_info (asserts, op, op, code, val);
3276 /* Now look at how OP is set. If it's set from a comparison,
3277 a truth operation or some bit operations, then we may be able
3278 to register information about the operands of that assignment. */
3279 op_def = SSA_NAME_DEF_STMT (op);
3280 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3281 return;
3283 rhs_code = gimple_assign_rhs_code (op_def);
3285 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3287 bool invert = (code == EQ_EXPR ? true : false);
3288 tree op0 = gimple_assign_rhs1 (op_def);
3289 tree op1 = gimple_assign_rhs2 (op_def);
3291 if (TREE_CODE (op0) == SSA_NAME)
3292 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3293 if (TREE_CODE (op1) == SSA_NAME)
3294 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3296 else if ((code == NE_EXPR
3297 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3298 || (code == EQ_EXPR
3299 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3301 /* Recurse on each operand. */
3302 tree op0 = gimple_assign_rhs1 (op_def);
3303 tree op1 = gimple_assign_rhs2 (op_def);
3304 if (TREE_CODE (op0) == SSA_NAME
3305 && has_single_use (op0))
3306 register_edge_assert_for_1 (op0, code, e, asserts);
3307 if (TREE_CODE (op1) == SSA_NAME
3308 && has_single_use (op1))
3309 register_edge_assert_for_1 (op1, code, e, asserts);
3311 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3312 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3314 /* Recurse, flipping CODE. */
3315 code = invert_tree_comparison (code, false);
3316 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3318 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3320 /* Recurse through the copy. */
3321 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3323 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3325 /* Recurse through the type conversion, unless it is a narrowing
3326 conversion or conversion from non-integral type. */
3327 tree rhs = gimple_assign_rhs1 (op_def);
3328 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3329 && (TYPE_PRECISION (TREE_TYPE (rhs))
3330 <= TYPE_PRECISION (TREE_TYPE (op))))
3331 register_edge_assert_for_1 (rhs, code, e, asserts);
3335 /* Check if comparison
3336 NAME COND_OP INTEGER_CST
3337 has a form of
3338 (X & 11...100..0) COND_OP XX...X00...0
3339 Such comparison can yield assertions like
3340 X >= XX...X00...0
3341 X <= XX...X11...1
3342 in case of COND_OP being EQ_EXPR or
3343 X < XX...X00...0
3344 X > XX...X11...1
3345 in case of NE_EXPR. */
3347 static bool
3348 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3349 tree *new_name, tree *low, enum tree_code *low_code,
3350 tree *high, enum tree_code *high_code)
3352 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3354 if (!is_gimple_assign (def_stmt)
3355 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3356 return false;
3358 tree t = gimple_assign_rhs1 (def_stmt);
3359 tree maskt = gimple_assign_rhs2 (def_stmt);
3360 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3361 return false;
3363 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3364 wide_int inv_mask = ~mask;
3365 /* Must have been removed by now so don't bother optimizing. */
3366 if (mask == 0 || inv_mask == 0)
3367 return false;
3369 /* Assume VALT is INTEGER_CST. */
3370 wi::tree_to_wide_ref val = wi::to_wide (valt);
3372 if ((inv_mask & (inv_mask + 1)) != 0
3373 || (val & mask) != val)
3374 return false;
3376 bool is_range = cond_code == EQ_EXPR;
3378 tree type = TREE_TYPE (t);
3379 wide_int min = wi::min_value (type),
3380 max = wi::max_value (type);
3382 if (is_range)
3384 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3385 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3387 else
3389 /* We can still generate assertion if one of alternatives
3390 is known to always be false. */
3391 if (val == min)
3393 *low_code = (enum tree_code) 0;
3394 *high_code = GT_EXPR;
3396 else if ((val | inv_mask) == max)
3398 *low_code = LT_EXPR;
3399 *high_code = (enum tree_code) 0;
3401 else
3402 return false;
3405 *new_name = t;
3406 *low = wide_int_to_tree (type, val);
3407 *high = wide_int_to_tree (type, val | inv_mask);
3409 return true;
3412 /* Try to register an edge assertion for SSA name NAME on edge E for
3413 the condition COND contributing to the conditional jump pointed to by
3414 SI. */
3416 void
3417 register_edge_assert_for (tree name, edge e,
3418 enum tree_code cond_code, tree cond_op0,
3419 tree cond_op1, vec<assert_info> &asserts)
3421 tree val;
3422 enum tree_code comp_code;
3423 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3425 /* Do not attempt to infer anything in names that flow through
3426 abnormal edges. */
3427 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3428 return;
3430 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3431 cond_op0, cond_op1,
3432 is_else_edge,
3433 &comp_code, &val))
3434 return;
3436 /* Register ASSERT_EXPRs for name. */
3437 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3438 cond_op1, is_else_edge, asserts);
3441 /* If COND is effectively an equality test of an SSA_NAME against
3442 the value zero or one, then we may be able to assert values
3443 for SSA_NAMEs which flow into COND. */
3445 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3446 statement of NAME we can assert both operands of the BIT_AND_EXPR
3447 have nonzero value. */
3448 if (((comp_code == EQ_EXPR && integer_onep (val))
3449 || (comp_code == NE_EXPR && integer_zerop (val))))
3451 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3453 if (is_gimple_assign (def_stmt)
3454 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3456 tree op0 = gimple_assign_rhs1 (def_stmt);
3457 tree op1 = gimple_assign_rhs2 (def_stmt);
3458 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3459 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3463 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3464 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3465 have zero value. */
3466 if (((comp_code == EQ_EXPR && integer_zerop (val))
3467 || (comp_code == NE_EXPR && integer_onep (val))))
3469 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3471 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3472 necessarily zero value, or if type-precision is one. */
3473 if (is_gimple_assign (def_stmt)
3474 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3475 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3476 || comp_code == EQ_EXPR)))
3478 tree op0 = gimple_assign_rhs1 (def_stmt);
3479 tree op1 = gimple_assign_rhs2 (def_stmt);
3480 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3481 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3485 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3486 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3487 && TREE_CODE (val) == INTEGER_CST)
3489 enum tree_code low_code, high_code;
3490 tree low, high;
3491 if (is_masked_range_test (name, val, comp_code, &name, &low,
3492 &low_code, &high, &high_code))
3494 if (low_code != ERROR_MARK)
3495 register_edge_assert_for_2 (name, e, low_code, name,
3496 low, /*invert*/false, asserts);
3497 if (high_code != ERROR_MARK)
3498 register_edge_assert_for_2 (name, e, high_code, name,
3499 high, /*invert*/false, asserts);
3504 /* Finish found ASSERTS for E and register them at GSI. */
3506 static void
3507 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3508 vec<assert_info> &asserts)
3510 for (unsigned i = 0; i < asserts.length (); ++i)
3511 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3512 reachable from E. */
3513 if (live_on_edge (e, asserts[i].name))
3514 register_new_assert_for (asserts[i].name, asserts[i].expr,
3515 asserts[i].comp_code, asserts[i].val,
3516 NULL, e, gsi);
3521 /* Determine whether the outgoing edges of BB should receive an
3522 ASSERT_EXPR for each of the operands of BB's LAST statement.
3523 The last statement of BB must be a COND_EXPR.
3525 If any of the sub-graphs rooted at BB have an interesting use of
3526 the predicate operands, an assert location node is added to the
3527 list of assertions for the corresponding operands. */
3529 static void
3530 find_conditional_asserts (basic_block bb, gcond *last)
3532 gimple_stmt_iterator bsi;
3533 tree op;
3534 edge_iterator ei;
3535 edge e;
3536 ssa_op_iter iter;
3538 bsi = gsi_for_stmt (last);
3540 /* Look for uses of the operands in each of the sub-graphs
3541 rooted at BB. We need to check each of the outgoing edges
3542 separately, so that we know what kind of ASSERT_EXPR to
3543 insert. */
3544 FOR_EACH_EDGE (e, ei, bb->succs)
3546 if (e->dest == bb)
3547 continue;
3549 /* Register the necessary assertions for each operand in the
3550 conditional predicate. */
3551 auto_vec<assert_info, 8> asserts;
3552 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3553 register_edge_assert_for (op, e,
3554 gimple_cond_code (last),
3555 gimple_cond_lhs (last),
3556 gimple_cond_rhs (last), asserts);
3557 finish_register_edge_assert_for (e, bsi, asserts);
3561 struct case_info
3563 tree expr;
3564 basic_block bb;
3567 /* Compare two case labels sorting first by the destination bb index
3568 and then by the case value. */
3570 static int
3571 compare_case_labels (const void *p1, const void *p2)
3573 const struct case_info *ci1 = (const struct case_info *) p1;
3574 const struct case_info *ci2 = (const struct case_info *) p2;
3575 int idx1 = ci1->bb->index;
3576 int idx2 = ci2->bb->index;
3578 if (idx1 < idx2)
3579 return -1;
3580 else if (idx1 == idx2)
3582 /* Make sure the default label is first in a group. */
3583 if (!CASE_LOW (ci1->expr))
3584 return -1;
3585 else if (!CASE_LOW (ci2->expr))
3586 return 1;
3587 else
3588 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3589 CASE_LOW (ci2->expr));
3591 else
3592 return 1;
3595 /* Determine whether the outgoing edges of BB should receive an
3596 ASSERT_EXPR for each of the operands of BB's LAST statement.
3597 The last statement of BB must be a SWITCH_EXPR.
3599 If any of the sub-graphs rooted at BB have an interesting use of
3600 the predicate operands, an assert location node is added to the
3601 list of assertions for the corresponding operands. */
3603 static void
3604 find_switch_asserts (basic_block bb, gswitch *last)
3606 gimple_stmt_iterator bsi;
3607 tree op;
3608 edge e;
3609 struct case_info *ci;
3610 size_t n = gimple_switch_num_labels (last);
3611 #if GCC_VERSION >= 4000
3612 unsigned int idx;
3613 #else
3614 /* Work around GCC 3.4 bug (PR 37086). */
3615 volatile unsigned int idx;
3616 #endif
3618 bsi = gsi_for_stmt (last);
3619 op = gimple_switch_index (last);
3620 if (TREE_CODE (op) != SSA_NAME)
3621 return;
3623 /* Build a vector of case labels sorted by destination label. */
3624 ci = XNEWVEC (struct case_info, n);
3625 for (idx = 0; idx < n; ++idx)
3627 ci[idx].expr = gimple_switch_label (last, idx);
3628 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3630 edge default_edge = find_edge (bb, ci[0].bb);
3631 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3633 for (idx = 0; idx < n; ++idx)
3635 tree min, max;
3636 tree cl = ci[idx].expr;
3637 basic_block cbb = ci[idx].bb;
3639 min = CASE_LOW (cl);
3640 max = CASE_HIGH (cl);
3642 /* If there are multiple case labels with the same destination
3643 we need to combine them to a single value range for the edge. */
3644 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3646 /* Skip labels until the last of the group. */
3647 do {
3648 ++idx;
3649 } while (idx < n && cbb == ci[idx].bb);
3650 --idx;
3652 /* Pick up the maximum of the case label range. */
3653 if (CASE_HIGH (ci[idx].expr))
3654 max = CASE_HIGH (ci[idx].expr);
3655 else
3656 max = CASE_LOW (ci[idx].expr);
3659 /* Can't extract a useful assertion out of a range that includes the
3660 default label. */
3661 if (min == NULL_TREE)
3662 continue;
3664 /* Find the edge to register the assert expr on. */
3665 e = find_edge (bb, cbb);
3667 /* Register the necessary assertions for the operand in the
3668 SWITCH_EXPR. */
3669 auto_vec<assert_info, 8> asserts;
3670 register_edge_assert_for (op, e,
3671 max ? GE_EXPR : EQ_EXPR,
3672 op, fold_convert (TREE_TYPE (op), min),
3673 asserts);
3674 if (max)
3675 register_edge_assert_for (op, e, LE_EXPR, op,
3676 fold_convert (TREE_TYPE (op), max),
3677 asserts);
3678 finish_register_edge_assert_for (e, bsi, asserts);
3681 XDELETEVEC (ci);
3683 if (!live_on_edge (default_edge, op))
3684 return;
3686 /* Now register along the default label assertions that correspond to the
3687 anti-range of each label. */
3688 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3689 if (insertion_limit == 0)
3690 return;
3692 /* We can't do this if the default case shares a label with another case. */
3693 tree default_cl = gimple_switch_default_label (last);
3694 for (idx = 1; idx < n; idx++)
3696 tree min, max;
3697 tree cl = gimple_switch_label (last, idx);
3698 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3699 continue;
3701 min = CASE_LOW (cl);
3702 max = CASE_HIGH (cl);
3704 /* Combine contiguous case ranges to reduce the number of assertions
3705 to insert. */
3706 for (idx = idx + 1; idx < n; idx++)
3708 tree next_min, next_max;
3709 tree next_cl = gimple_switch_label (last, idx);
3710 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3711 break;
3713 next_min = CASE_LOW (next_cl);
3714 next_max = CASE_HIGH (next_cl);
3716 wide_int difference = (wi::to_wide (next_min)
3717 - wi::to_wide (max ? max : min));
3718 if (wi::eq_p (difference, 1))
3719 max = next_max ? next_max : next_min;
3720 else
3721 break;
3723 idx--;
3725 if (max == NULL_TREE)
3727 /* Register the assertion OP != MIN. */
3728 auto_vec<assert_info, 8> asserts;
3729 min = fold_convert (TREE_TYPE (op), min);
3730 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3731 asserts);
3732 finish_register_edge_assert_for (default_edge, bsi, asserts);
3734 else
3736 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3737 which will give OP the anti-range ~[MIN,MAX]. */
3738 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3739 min = fold_convert (TREE_TYPE (uop), min);
3740 max = fold_convert (TREE_TYPE (uop), max);
3742 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3743 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3744 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3745 NULL, default_edge, bsi);
3748 if (--insertion_limit == 0)
3749 break;
3754 /* Traverse all the statements in block BB looking for statements that
3755 may generate useful assertions for the SSA names in their operand.
3756 If a statement produces a useful assertion A for name N_i, then the
3757 list of assertions already generated for N_i is scanned to
3758 determine if A is actually needed.
3760 If N_i already had the assertion A at a location dominating the
3761 current location, then nothing needs to be done. Otherwise, the
3762 new location for A is recorded instead.
3764 1- For every statement S in BB, all the variables used by S are
3765 added to bitmap FOUND_IN_SUBGRAPH.
3767 2- If statement S uses an operand N in a way that exposes a known
3768 value range for N, then if N was not already generated by an
3769 ASSERT_EXPR, create a new assert location for N. For instance,
3770 if N is a pointer and the statement dereferences it, we can
3771 assume that N is not NULL.
3773 3- COND_EXPRs are a special case of #2. We can derive range
3774 information from the predicate but need to insert different
3775 ASSERT_EXPRs for each of the sub-graphs rooted at the
3776 conditional block. If the last statement of BB is a conditional
3777 expression of the form 'X op Y', then
3779 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3781 b) If the conditional is the only entry point to the sub-graph
3782 corresponding to the THEN_CLAUSE, recurse into it. On
3783 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3784 an ASSERT_EXPR is added for the corresponding variable.
3786 c) Repeat step (b) on the ELSE_CLAUSE.
3788 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3790 For instance,
3792 if (a == 9)
3793 b = a;
3794 else
3795 b = c + 1;
3797 In this case, an assertion on the THEN clause is useful to
3798 determine that 'a' is always 9 on that edge. However, an assertion
3799 on the ELSE clause would be unnecessary.
3801 4- If BB does not end in a conditional expression, then we recurse
3802 into BB's dominator children.
3804 At the end of the recursive traversal, every SSA name will have a
3805 list of locations where ASSERT_EXPRs should be added. When a new
3806 location for name N is found, it is registered by calling
3807 register_new_assert_for. That function keeps track of all the
3808 registered assertions to prevent adding unnecessary assertions.
3809 For instance, if a pointer P_4 is dereferenced more than once in a
3810 dominator tree, only the location dominating all the dereference of
3811 P_4 will receive an ASSERT_EXPR. */
3813 static void
3814 find_assert_locations_1 (basic_block bb, sbitmap live)
3816 gimple *last;
3818 last = last_stmt (bb);
3820 /* If BB's last statement is a conditional statement involving integer
3821 operands, determine if we need to add ASSERT_EXPRs. */
3822 if (last
3823 && gimple_code (last) == GIMPLE_COND
3824 && !fp_predicate (last)
3825 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3826 find_conditional_asserts (bb, as_a <gcond *> (last));
3828 /* If BB's last statement is a switch statement involving integer
3829 operands, determine if we need to add ASSERT_EXPRs. */
3830 if (last
3831 && gimple_code (last) == GIMPLE_SWITCH
3832 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3833 find_switch_asserts (bb, as_a <gswitch *> (last));
3835 /* Traverse all the statements in BB marking used names and looking
3836 for statements that may infer assertions for their used operands. */
3837 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3838 gsi_prev (&si))
3840 gimple *stmt;
3841 tree op;
3842 ssa_op_iter i;
3844 stmt = gsi_stmt (si);
3846 if (is_gimple_debug (stmt))
3847 continue;
3849 /* See if we can derive an assertion for any of STMT's operands. */
3850 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3852 tree value;
3853 enum tree_code comp_code;
3855 /* If op is not live beyond this stmt, do not bother to insert
3856 asserts for it. */
3857 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3858 continue;
3860 /* If OP is used in such a way that we can infer a value
3861 range for it, and we don't find a previous assertion for
3862 it, create a new assertion location node for OP. */
3863 if (infer_value_range (stmt, op, &comp_code, &value))
3865 /* If we are able to infer a nonzero value range for OP,
3866 then walk backwards through the use-def chain to see if OP
3867 was set via a typecast.
3869 If so, then we can also infer a nonzero value range
3870 for the operand of the NOP_EXPR. */
3871 if (comp_code == NE_EXPR && integer_zerop (value))
3873 tree t = op;
3874 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3876 while (is_gimple_assign (def_stmt)
3877 && CONVERT_EXPR_CODE_P
3878 (gimple_assign_rhs_code (def_stmt))
3879 && TREE_CODE
3880 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3881 && POINTER_TYPE_P
3882 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3884 t = gimple_assign_rhs1 (def_stmt);
3885 def_stmt = SSA_NAME_DEF_STMT (t);
3887 /* Note we want to register the assert for the
3888 operand of the NOP_EXPR after SI, not after the
3889 conversion. */
3890 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3891 register_new_assert_for (t, t, comp_code, value,
3892 bb, NULL, si);
3896 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3900 /* Update live. */
3901 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3902 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3903 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3904 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3907 /* Traverse all PHI nodes in BB, updating live. */
3908 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3909 gsi_next (&si))
3911 use_operand_p arg_p;
3912 ssa_op_iter i;
3913 gphi *phi = si.phi ();
3914 tree res = gimple_phi_result (phi);
3916 if (virtual_operand_p (res))
3917 continue;
3919 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3921 tree arg = USE_FROM_PTR (arg_p);
3922 if (TREE_CODE (arg) == SSA_NAME)
3923 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3926 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3930 /* Do an RPO walk over the function computing SSA name liveness
3931 on-the-fly and deciding on assert expressions to insert. */
3933 static void
3934 find_assert_locations (void)
3936 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3937 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3938 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3939 int rpo_cnt, i;
3941 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3942 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3943 for (i = 0; i < rpo_cnt; ++i)
3944 bb_rpo[rpo[i]] = i;
3946 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3947 the order we compute liveness and insert asserts we otherwise
3948 fail to insert asserts into the loop latch. */
3949 loop_p loop;
3950 FOR_EACH_LOOP (loop, 0)
3952 i = loop->latch->index;
3953 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3954 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3955 !gsi_end_p (gsi); gsi_next (&gsi))
3957 gphi *phi = gsi.phi ();
3958 if (virtual_operand_p (gimple_phi_result (phi)))
3959 continue;
3960 tree arg = gimple_phi_arg_def (phi, j);
3961 if (TREE_CODE (arg) == SSA_NAME)
3963 if (live[i] == NULL)
3965 live[i] = sbitmap_alloc (num_ssa_names);
3966 bitmap_clear (live[i]);
3968 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3973 for (i = rpo_cnt - 1; i >= 0; --i)
3975 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3976 edge e;
3977 edge_iterator ei;
3979 if (!live[rpo[i]])
3981 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3982 bitmap_clear (live[rpo[i]]);
3985 /* Process BB and update the live information with uses in
3986 this block. */
3987 find_assert_locations_1 (bb, live[rpo[i]]);
3989 /* Merge liveness into the predecessor blocks and free it. */
3990 if (!bitmap_empty_p (live[rpo[i]]))
3992 int pred_rpo = i;
3993 FOR_EACH_EDGE (e, ei, bb->preds)
3995 int pred = e->src->index;
3996 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3997 continue;
3999 if (!live[pred])
4001 live[pred] = sbitmap_alloc (num_ssa_names);
4002 bitmap_clear (live[pred]);
4004 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4006 if (bb_rpo[pred] < pred_rpo)
4007 pred_rpo = bb_rpo[pred];
4010 /* Record the RPO number of the last visited block that needs
4011 live information from this block. */
4012 last_rpo[rpo[i]] = pred_rpo;
4014 else
4016 sbitmap_free (live[rpo[i]]);
4017 live[rpo[i]] = NULL;
4020 /* We can free all successors live bitmaps if all their
4021 predecessors have been visited already. */
4022 FOR_EACH_EDGE (e, ei, bb->succs)
4023 if (last_rpo[e->dest->index] == i
4024 && live[e->dest->index])
4026 sbitmap_free (live[e->dest->index]);
4027 live[e->dest->index] = NULL;
4031 XDELETEVEC (rpo);
4032 XDELETEVEC (bb_rpo);
4033 XDELETEVEC (last_rpo);
4034 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4035 if (live[i])
4036 sbitmap_free (live[i]);
4037 XDELETEVEC (live);
4040 /* Create an ASSERT_EXPR for NAME and insert it in the location
4041 indicated by LOC. Return true if we made any edge insertions. */
4043 static bool
4044 process_assert_insertions_for (tree name, assert_locus *loc)
4046 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4047 gimple *stmt;
4048 tree cond;
4049 gimple *assert_stmt;
4050 edge_iterator ei;
4051 edge e;
4053 /* If we have X <=> X do not insert an assert expr for that. */
4054 if (loc->expr == loc->val)
4055 return false;
4057 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4058 assert_stmt = build_assert_expr_for (cond, name);
4059 if (loc->e)
4061 /* We have been asked to insert the assertion on an edge. This
4062 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4063 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4064 || (gimple_code (gsi_stmt (loc->si))
4065 == GIMPLE_SWITCH));
4067 gsi_insert_on_edge (loc->e, assert_stmt);
4068 return true;
4071 /* If the stmt iterator points at the end then this is an insertion
4072 at the beginning of a block. */
4073 if (gsi_end_p (loc->si))
4075 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4076 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4077 return false;
4080 /* Otherwise, we can insert right after LOC->SI iff the
4081 statement must not be the last statement in the block. */
4082 stmt = gsi_stmt (loc->si);
4083 if (!stmt_ends_bb_p (stmt))
4085 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4086 return false;
4089 /* If STMT must be the last statement in BB, we can only insert new
4090 assertions on the non-abnormal edge out of BB. Note that since
4091 STMT is not control flow, there may only be one non-abnormal/eh edge
4092 out of BB. */
4093 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4094 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4096 gsi_insert_on_edge (e, assert_stmt);
4097 return true;
4100 gcc_unreachable ();
4103 /* Qsort helper for sorting assert locations. If stable is true, don't
4104 use iterative_hash_expr because it can be unstable for -fcompare-debug,
4105 on the other side some pointers might be NULL. */
4107 template <bool stable>
4108 static int
4109 compare_assert_loc (const void *pa, const void *pb)
4111 assert_locus * const a = *(assert_locus * const *)pa;
4112 assert_locus * const b = *(assert_locus * const *)pb;
4114 /* If stable, some asserts might be optimized away already, sort
4115 them last. */
4116 if (stable)
4118 if (a == NULL)
4119 return b != NULL;
4120 else if (b == NULL)
4121 return -1;
4124 if (a->e == NULL && b->e != NULL)
4125 return 1;
4126 else if (a->e != NULL && b->e == NULL)
4127 return -1;
4129 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4130 no need to test both a->e and b->e. */
4132 /* Sort after destination index. */
4133 if (a->e == NULL)
4135 else if (a->e->dest->index > b->e->dest->index)
4136 return 1;
4137 else if (a->e->dest->index < b->e->dest->index)
4138 return -1;
4140 /* Sort after comp_code. */
4141 if (a->comp_code > b->comp_code)
4142 return 1;
4143 else if (a->comp_code < b->comp_code)
4144 return -1;
4146 hashval_t ha, hb;
4148 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4149 uses DECL_UID of the VAR_DECL, so sorting might differ between
4150 -g and -g0. When doing the removal of redundant assert exprs
4151 and commonization to successors, this does not matter, but for
4152 the final sort needs to be stable. */
4153 if (stable)
4155 ha = 0;
4156 hb = 0;
4158 else
4160 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4161 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4164 /* Break the tie using hashing and source/bb index. */
4165 if (ha == hb)
4166 return (a->e != NULL
4167 ? a->e->src->index - b->e->src->index
4168 : a->bb->index - b->bb->index);
4169 return ha > hb ? 1 : -1;
4172 /* Process all the insertions registered for every name N_i registered
4173 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4174 found in ASSERTS_FOR[i]. */
4176 static void
4177 process_assert_insertions (void)
4179 unsigned i;
4180 bitmap_iterator bi;
4181 bool update_edges_p = false;
4182 int num_asserts = 0;
4184 if (dump_file && (dump_flags & TDF_DETAILS))
4185 dump_all_asserts (dump_file);
4187 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4189 assert_locus *loc = asserts_for[i];
4190 gcc_assert (loc);
4192 auto_vec<assert_locus *, 16> asserts;
4193 for (; loc; loc = loc->next)
4194 asserts.safe_push (loc);
4195 asserts.qsort (compare_assert_loc<false>);
4197 /* Push down common asserts to successors and remove redundant ones. */
4198 unsigned ecnt = 0;
4199 assert_locus *common = NULL;
4200 unsigned commonj = 0;
4201 for (unsigned j = 0; j < asserts.length (); ++j)
4203 loc = asserts[j];
4204 if (! loc->e)
4205 common = NULL;
4206 else if (! common
4207 || loc->e->dest != common->e->dest
4208 || loc->comp_code != common->comp_code
4209 || ! operand_equal_p (loc->val, common->val, 0)
4210 || ! operand_equal_p (loc->expr, common->expr, 0))
4212 commonj = j;
4213 common = loc;
4214 ecnt = 1;
4216 else if (loc->e == asserts[j-1]->e)
4218 /* Remove duplicate asserts. */
4219 if (commonj == j - 1)
4221 commonj = j;
4222 common = loc;
4224 free (asserts[j-1]);
4225 asserts[j-1] = NULL;
4227 else
4229 ecnt++;
4230 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4232 /* We have the same assertion on all incoming edges of a BB.
4233 Insert it at the beginning of that block. */
4234 loc->bb = loc->e->dest;
4235 loc->e = NULL;
4236 loc->si = gsi_none ();
4237 common = NULL;
4238 /* Clear asserts commoned. */
4239 for (; commonj != j; ++commonj)
4240 if (asserts[commonj])
4242 free (asserts[commonj]);
4243 asserts[commonj] = NULL;
4249 /* The asserts vector sorting above might be unstable for
4250 -fcompare-debug, sort again to ensure a stable sort. */
4251 asserts.qsort (compare_assert_loc<true>);
4252 for (unsigned j = 0; j < asserts.length (); ++j)
4254 loc = asserts[j];
4255 if (! loc)
4256 break;
4257 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4258 num_asserts++;
4259 free (loc);
4263 if (update_edges_p)
4264 gsi_commit_edge_inserts ();
4266 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4267 num_asserts);
4271 /* Traverse the flowgraph looking for conditional jumps to insert range
4272 expressions. These range expressions are meant to provide information
4273 to optimizations that need to reason in terms of value ranges. They
4274 will not be expanded into RTL. For instance, given:
4276 x = ...
4277 y = ...
4278 if (x < y)
4279 y = x - 2;
4280 else
4281 x = y + 3;
4283 this pass will transform the code into:
4285 x = ...
4286 y = ...
4287 if (x < y)
4289 x = ASSERT_EXPR <x, x < y>
4290 y = x - 2
4292 else
4294 y = ASSERT_EXPR <y, x >= y>
4295 x = y + 3
4298 The idea is that once copy and constant propagation have run, other
4299 optimizations will be able to determine what ranges of values can 'x'
4300 take in different paths of the code, simply by checking the reaching
4301 definition of 'x'. */
4303 static void
4304 insert_range_assertions (void)
4306 need_assert_for = BITMAP_ALLOC (NULL);
4307 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4309 calculate_dominance_info (CDI_DOMINATORS);
4311 find_assert_locations ();
4312 if (!bitmap_empty_p (need_assert_for))
4314 process_assert_insertions ();
4315 update_ssa (TODO_update_ssa_no_phi);
4318 if (dump_file && (dump_flags & TDF_DETAILS))
4320 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4321 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4324 free (asserts_for);
4325 BITMAP_FREE (need_assert_for);
4328 class vrp_prop : public ssa_propagation_engine
4330 public:
4331 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4332 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4334 void vrp_initialize (void);
4335 void vrp_finalize (bool);
4336 void check_all_array_refs (void);
4337 void check_array_ref (location_t, tree, bool);
4338 void check_mem_ref (location_t, tree, bool);
4339 void search_for_addr_array (tree, location_t);
4341 class vr_values vr_values;
4342 /* Temporary delegator to minimize code churn. */
4343 value_range *get_value_range (const_tree op)
4344 { return vr_values.get_value_range (op); }
4345 void set_defs_to_varying (gimple *stmt)
4346 { return vr_values.set_defs_to_varying (stmt); }
4347 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4348 tree *output_p, value_range *vr)
4349 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4350 bool update_value_range (const_tree op, value_range *vr)
4351 { return vr_values.update_value_range (op, vr); }
4352 void extract_range_basic (value_range *vr, gimple *stmt)
4353 { vr_values.extract_range_basic (vr, stmt); }
4354 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4355 { vr_values.extract_range_from_phi_node (phi, vr); }
4357 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4358 and "struct" hacks. If VRP can determine that the
4359 array subscript is a constant, check if it is outside valid
4360 range. If the array subscript is a RANGE, warn if it is
4361 non-overlapping with valid range.
4362 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4364 void
4365 vrp_prop::check_array_ref (location_t location, tree ref,
4366 bool ignore_off_by_one)
4368 const value_range *vr = NULL;
4369 tree low_sub, up_sub;
4370 tree low_bound, up_bound, up_bound_p1;
4372 if (TREE_NO_WARNING (ref))
4373 return;
4375 low_sub = up_sub = TREE_OPERAND (ref, 1);
4376 up_bound = array_ref_up_bound (ref);
4378 if (!up_bound
4379 || TREE_CODE (up_bound) != INTEGER_CST
4380 || (warn_array_bounds < 2
4381 && array_at_struct_end_p (ref)))
4383 /* Accesses to trailing arrays via pointers may access storage
4384 beyond the types array bounds. For such arrays, or for flexible
4385 array members, as well as for other arrays of an unknown size,
4386 replace the upper bound with a more permissive one that assumes
4387 the size of the largest object is PTRDIFF_MAX. */
4388 tree eltsize = array_ref_element_size (ref);
4390 if (TREE_CODE (eltsize) != INTEGER_CST
4391 || integer_zerop (eltsize))
4393 up_bound = NULL_TREE;
4394 up_bound_p1 = NULL_TREE;
4396 else
4398 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4399 tree arg = TREE_OPERAND (ref, 0);
4400 poly_int64 off;
4402 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4403 maxbound = wide_int_to_tree (sizetype,
4404 wi::sub (wi::to_wide (maxbound),
4405 off));
4406 else
4407 maxbound = fold_convert (sizetype, maxbound);
4409 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4411 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4412 build_int_cst (ptrdiff_type_node, 1));
4415 else
4416 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4417 build_int_cst (TREE_TYPE (up_bound), 1));
4419 low_bound = array_ref_low_bound (ref);
4421 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4423 bool warned = false;
4425 /* Empty array. */
4426 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4427 warned = warning_at (location, OPT_Warray_bounds,
4428 "array subscript %E is above array bounds of %qT",
4429 low_bound, artype);
4431 if (TREE_CODE (low_sub) == SSA_NAME)
4433 vr = get_value_range (low_sub);
4434 if (!vr->undefined_p () && !vr->varying_p ())
4436 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4437 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4441 if (vr && vr->kind () == VR_ANTI_RANGE)
4443 if (up_bound
4444 && TREE_CODE (up_sub) == INTEGER_CST
4445 && (ignore_off_by_one
4446 ? tree_int_cst_lt (up_bound, up_sub)
4447 : tree_int_cst_le (up_bound, up_sub))
4448 && TREE_CODE (low_sub) == INTEGER_CST
4449 && tree_int_cst_le (low_sub, low_bound))
4450 warned = warning_at (location, OPT_Warray_bounds,
4451 "array subscript [%E, %E] is outside "
4452 "array bounds of %qT",
4453 low_sub, up_sub, artype);
4455 else if (up_bound
4456 && TREE_CODE (up_sub) == INTEGER_CST
4457 && (ignore_off_by_one
4458 ? !tree_int_cst_le (up_sub, up_bound_p1)
4459 : !tree_int_cst_le (up_sub, up_bound)))
4461 if (dump_file && (dump_flags & TDF_DETAILS))
4463 fprintf (dump_file, "Array bound warning for ");
4464 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4465 fprintf (dump_file, "\n");
4467 warned = warning_at (location, OPT_Warray_bounds,
4468 "array subscript %E is above array bounds of %qT",
4469 up_sub, artype);
4471 else if (TREE_CODE (low_sub) == INTEGER_CST
4472 && tree_int_cst_lt (low_sub, low_bound))
4474 if (dump_file && (dump_flags & TDF_DETAILS))
4476 fprintf (dump_file, "Array bound warning for ");
4477 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4478 fprintf (dump_file, "\n");
4480 warned = warning_at (location, OPT_Warray_bounds,
4481 "array subscript %E is below array bounds of %qT",
4482 low_sub, artype);
4485 if (warned)
4487 ref = TREE_OPERAND (ref, 0);
4489 if (DECL_P (ref))
4490 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4492 TREE_NO_WARNING (ref) = 1;
4496 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4497 references to string constants. If VRP can determine that the array
4498 subscript is a constant, check if it is outside valid range.
4499 If the array subscript is a RANGE, warn if it is non-overlapping
4500 with valid range.
4501 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4502 (used to allow one-past-the-end indices for code that takes
4503 the address of the just-past-the-end element of an array). */
4505 void
4506 vrp_prop::check_mem_ref (location_t location, tree ref,
4507 bool ignore_off_by_one)
4509 if (TREE_NO_WARNING (ref))
4510 return;
4512 tree arg = TREE_OPERAND (ref, 0);
4513 /* The constant and variable offset of the reference. */
4514 tree cstoff = TREE_OPERAND (ref, 1);
4515 tree varoff = NULL_TREE;
4517 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4519 /* The array or string constant bounds in bytes. Initially set
4520 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4521 determined. */
4522 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4524 /* The minimum and maximum intermediate offset. For a reference
4525 to be valid, not only does the final offset/subscript must be
4526 in bounds but all intermediate offsets should be as well.
4527 GCC may be able to deal gracefully with such out-of-bounds
4528 offsets so the checking is only enbaled at -Warray-bounds=2
4529 where it may help detect bugs in uses of the intermediate
4530 offsets that could otherwise not be detectable. */
4531 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4532 offset_int extrema[2] = { 0, wi::abs (ioff) };
4534 /* The range of the byte offset into the reference. */
4535 offset_int offrange[2] = { 0, 0 };
4537 const value_range *vr = NULL;
4539 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4540 The loop computes the the range of the final offset for expressions
4541 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4542 in some range. */
4543 while (TREE_CODE (arg) == SSA_NAME)
4545 gimple *def = SSA_NAME_DEF_STMT (arg);
4546 if (!is_gimple_assign (def))
4547 break;
4549 tree_code code = gimple_assign_rhs_code (def);
4550 if (code == POINTER_PLUS_EXPR)
4552 arg = gimple_assign_rhs1 (def);
4553 varoff = gimple_assign_rhs2 (def);
4555 else if (code == ASSERT_EXPR)
4557 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4558 continue;
4560 else
4561 return;
4563 /* VAROFF should always be a SSA_NAME here (and not even
4564 INTEGER_CST) but there's no point in taking chances. */
4565 if (TREE_CODE (varoff) != SSA_NAME)
4566 break;
4568 vr = get_value_range (varoff);
4569 if (!vr || vr->undefined_p () || vr->varying_p ())
4570 break;
4572 if (!vr->constant_p ())
4573 break;
4575 if (vr->kind () == VR_RANGE)
4577 if (tree_int_cst_lt (vr->min (), vr->max ()))
4579 offset_int min
4580 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4581 offset_int max
4582 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4583 if (min < max)
4585 offrange[0] += min;
4586 offrange[1] += max;
4588 else
4590 offrange[0] += max;
4591 offrange[1] += min;
4594 else
4596 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4597 to OFFRANGE. */
4598 offrange[0] += arrbounds[0];
4599 offrange[1] += arrbounds[1];
4602 else
4604 /* For an anti-range, analogously to the above, conservatively
4605 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4606 offrange[0] += arrbounds[0];
4607 offrange[1] += arrbounds[1];
4610 /* Keep track of the minimum and maximum offset. */
4611 if (offrange[1] < 0 && offrange[1] < extrema[0])
4612 extrema[0] = offrange[1];
4613 if (offrange[0] > 0 && offrange[0] > extrema[1])
4614 extrema[1] = offrange[0];
4616 if (offrange[0] < arrbounds[0])
4617 offrange[0] = arrbounds[0];
4619 if (offrange[1] > arrbounds[1])
4620 offrange[1] = arrbounds[1];
4623 if (TREE_CODE (arg) == ADDR_EXPR)
4625 arg = TREE_OPERAND (arg, 0);
4626 if (TREE_CODE (arg) != STRING_CST
4627 && TREE_CODE (arg) != VAR_DECL)
4628 return;
4630 else
4631 return;
4633 /* The type of the object being referred to. It can be an array,
4634 string literal, or a non-array type when the MEM_REF represents
4635 a reference/subscript via a pointer to an object that is not
4636 an element of an array. References to members of structs and
4637 unions are excluded because MEM_REF doesn't make it possible
4638 to identify the member where the reference originated.
4639 Incomplete types are excluded as well because their size is
4640 not known. */
4641 tree reftype = TREE_TYPE (arg);
4642 if (POINTER_TYPE_P (reftype)
4643 || !COMPLETE_TYPE_P (reftype)
4644 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4645 || RECORD_OR_UNION_TYPE_P (reftype))
4646 return;
4648 offset_int eltsize;
4649 if (TREE_CODE (reftype) == ARRAY_TYPE)
4651 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4653 if (tree dom = TYPE_DOMAIN (reftype))
4655 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4656 if (array_at_struct_end_p (arg)
4657 || !bnds[0] || !bnds[1])
4659 arrbounds[0] = 0;
4660 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4662 else
4664 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4665 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4668 else
4670 arrbounds[0] = 0;
4671 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4674 if (TREE_CODE (ref) == MEM_REF)
4676 /* For MEM_REF determine a tighter bound of the non-array
4677 element type. */
4678 tree eltype = TREE_TYPE (reftype);
4679 while (TREE_CODE (eltype) == ARRAY_TYPE)
4680 eltype = TREE_TYPE (eltype);
4681 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4684 else
4686 eltsize = 1;
4687 arrbounds[0] = 0;
4688 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4691 offrange[0] += ioff;
4692 offrange[1] += ioff;
4694 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4695 is set (when taking the address of the one-past-last element
4696 of an array) but always use the stricter bound in diagnostics. */
4697 offset_int ubound = arrbounds[1];
4698 if (ignore_off_by_one)
4699 ubound += 1;
4701 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4703 /* Treat a reference to a non-array object as one to an array
4704 of a single element. */
4705 if (TREE_CODE (reftype) != ARRAY_TYPE)
4706 reftype = build_array_type_nelts (reftype, 1);
4708 if (TREE_CODE (ref) == MEM_REF)
4710 /* Extract the element type out of MEM_REF and use its size
4711 to compute the index to print in the diagnostic; arrays
4712 in MEM_REF don't mean anything. */
4713 tree type = TREE_TYPE (ref);
4714 while (TREE_CODE (type) == ARRAY_TYPE)
4715 type = TREE_TYPE (type);
4716 tree size = TYPE_SIZE_UNIT (type);
4717 offrange[0] = offrange[0] / wi::to_offset (size);
4718 offrange[1] = offrange[1] / wi::to_offset (size);
4720 else
4722 /* For anything other than MEM_REF, compute the index to
4723 print in the diagnostic as the offset over element size. */
4724 offrange[0] = offrange[0] / eltsize;
4725 offrange[1] = offrange[1] / eltsize;
4728 bool warned;
4729 if (offrange[0] == offrange[1])
4730 warned = warning_at (location, OPT_Warray_bounds,
4731 "array subscript %wi is outside array bounds "
4732 "of %qT",
4733 offrange[0].to_shwi (), reftype);
4734 else
4735 warned = warning_at (location, OPT_Warray_bounds,
4736 "array subscript [%wi, %wi] is outside "
4737 "array bounds of %qT",
4738 offrange[0].to_shwi (),
4739 offrange[1].to_shwi (), reftype);
4740 if (warned && DECL_P (arg))
4741 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4743 TREE_NO_WARNING (ref) = 1;
4744 return;
4747 if (warn_array_bounds < 2)
4748 return;
4750 /* At level 2 check also intermediate offsets. */
4751 int i = 0;
4752 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4754 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4756 warning_at (location, OPT_Warray_bounds,
4757 "intermediate array offset %wi is outside array bounds "
4758 "of %qT",
4759 tmpidx, reftype);
4760 TREE_NO_WARNING (ref) = 1;
4764 /* Searches if the expr T, located at LOCATION computes
4765 address of an ARRAY_REF, and call check_array_ref on it. */
4767 void
4768 vrp_prop::search_for_addr_array (tree t, location_t location)
4770 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4773 if (TREE_CODE (t) == ARRAY_REF)
4774 check_array_ref (location, t, true /*ignore_off_by_one*/);
4775 else if (TREE_CODE (t) == MEM_REF)
4776 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4778 t = TREE_OPERAND (t, 0);
4780 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4782 if (TREE_CODE (t) != MEM_REF
4783 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4784 || TREE_NO_WARNING (t))
4785 return;
4787 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4788 tree low_bound, up_bound, el_sz;
4789 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4790 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4791 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4792 return;
4794 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4795 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4796 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4797 if (!low_bound
4798 || TREE_CODE (low_bound) != INTEGER_CST
4799 || !up_bound
4800 || TREE_CODE (up_bound) != INTEGER_CST
4801 || !el_sz
4802 || TREE_CODE (el_sz) != INTEGER_CST)
4803 return;
4805 offset_int idx;
4806 if (!mem_ref_offset (t).is_constant (&idx))
4807 return;
4809 bool warned = false;
4810 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4811 if (idx < 0)
4813 if (dump_file && (dump_flags & TDF_DETAILS))
4815 fprintf (dump_file, "Array bound warning for ");
4816 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4817 fprintf (dump_file, "\n");
4819 warned = warning_at (location, OPT_Warray_bounds,
4820 "array subscript %wi is below "
4821 "array bounds of %qT",
4822 idx.to_shwi (), TREE_TYPE (tem));
4824 else if (idx > (wi::to_offset (up_bound)
4825 - wi::to_offset (low_bound) + 1))
4827 if (dump_file && (dump_flags & TDF_DETAILS))
4829 fprintf (dump_file, "Array bound warning for ");
4830 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4831 fprintf (dump_file, "\n");
4833 warned = warning_at (location, OPT_Warray_bounds,
4834 "array subscript %wu is above "
4835 "array bounds of %qT",
4836 idx.to_uhwi (), TREE_TYPE (tem));
4839 if (warned)
4841 if (DECL_P (t))
4842 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4844 TREE_NO_WARNING (t) = 1;
4848 /* walk_tree() callback that checks if *TP is
4849 an ARRAY_REF inside an ADDR_EXPR (in which an array
4850 subscript one outside the valid range is allowed). Call
4851 check_array_ref for each ARRAY_REF found. The location is
4852 passed in DATA. */
4854 static tree
4855 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4857 tree t = *tp;
4858 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4859 location_t location;
4861 if (EXPR_HAS_LOCATION (t))
4862 location = EXPR_LOCATION (t);
4863 else
4864 location = gimple_location (wi->stmt);
4866 *walk_subtree = TRUE;
4868 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4869 if (TREE_CODE (t) == ARRAY_REF)
4870 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4871 else if (TREE_CODE (t) == MEM_REF)
4872 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4873 else if (TREE_CODE (t) == ADDR_EXPR)
4875 vrp_prop->search_for_addr_array (t, location);
4876 *walk_subtree = FALSE;
4879 return NULL_TREE;
4882 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4883 to walk over all statements of all reachable BBs and call
4884 check_array_bounds on them. */
4886 class check_array_bounds_dom_walker : public dom_walker
4888 public:
4889 check_array_bounds_dom_walker (vrp_prop *prop)
4890 : dom_walker (CDI_DOMINATORS,
4891 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4892 flags, so that we can merge in information on
4893 non-executable edges from vrp_folder . */
4894 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4895 m_prop (prop) {}
4896 ~check_array_bounds_dom_walker () {}
4898 edge before_dom_children (basic_block) FINAL OVERRIDE;
4900 private:
4901 vrp_prop *m_prop;
4904 /* Implementation of dom_walker::before_dom_children.
4906 Walk over all statements of BB and call check_array_bounds on them,
4907 and determine if there's a unique successor edge. */
4909 edge
4910 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4912 gimple_stmt_iterator si;
4913 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4915 gimple *stmt = gsi_stmt (si);
4916 struct walk_stmt_info wi;
4917 if (!gimple_has_location (stmt)
4918 || is_gimple_debug (stmt))
4919 continue;
4921 memset (&wi, 0, sizeof (wi));
4923 wi.info = m_prop;
4925 walk_gimple_op (stmt, check_array_bounds, &wi);
4928 /* Determine if there's a unique successor edge, and if so, return
4929 that back to dom_walker, ensuring that we don't visit blocks that
4930 became unreachable during the VRP propagation
4931 (PR tree-optimization/83312). */
4932 return find_taken_edge (bb, NULL_TREE);
4935 /* Walk over all statements of all reachable BBs and call check_array_bounds
4936 on them. */
4938 void
4939 vrp_prop::check_all_array_refs ()
4941 check_array_bounds_dom_walker w (this);
4942 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4945 /* Return true if all imm uses of VAR are either in STMT, or
4946 feed (optionally through a chain of single imm uses) GIMPLE_COND
4947 in basic block COND_BB. */
4949 static bool
4950 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4952 use_operand_p use_p, use2_p;
4953 imm_use_iterator iter;
4955 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4956 if (USE_STMT (use_p) != stmt)
4958 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4959 if (is_gimple_debug (use_stmt))
4960 continue;
4961 while (is_gimple_assign (use_stmt)
4962 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4963 && single_imm_use (gimple_assign_lhs (use_stmt),
4964 &use2_p, &use_stmt2))
4965 use_stmt = use_stmt2;
4966 if (gimple_code (use_stmt) != GIMPLE_COND
4967 || gimple_bb (use_stmt) != cond_bb)
4968 return false;
4970 return true;
4973 /* Handle
4974 _4 = x_3 & 31;
4975 if (_4 != 0)
4976 goto <bb 6>;
4977 else
4978 goto <bb 7>;
4979 <bb 6>:
4980 __builtin_unreachable ();
4981 <bb 7>:
4982 x_5 = ASSERT_EXPR <x_3, ...>;
4983 If x_3 has no other immediate uses (checked by caller),
4984 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4985 from the non-zero bitmask. */
4987 void
4988 maybe_set_nonzero_bits (edge e, tree var)
4990 basic_block cond_bb = e->src;
4991 gimple *stmt = last_stmt (cond_bb);
4992 tree cst;
4994 if (stmt == NULL
4995 || gimple_code (stmt) != GIMPLE_COND
4996 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4997 ? EQ_EXPR : NE_EXPR)
4998 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4999 || !integer_zerop (gimple_cond_rhs (stmt)))
5000 return;
5002 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5003 if (!is_gimple_assign (stmt)
5004 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5005 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5006 return;
5007 if (gimple_assign_rhs1 (stmt) != var)
5009 gimple *stmt2;
5011 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5012 return;
5013 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5014 if (!gimple_assign_cast_p (stmt2)
5015 || gimple_assign_rhs1 (stmt2) != var
5016 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5017 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5018 != TYPE_PRECISION (TREE_TYPE (var))))
5019 return;
5021 cst = gimple_assign_rhs2 (stmt);
5022 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5023 wi::to_wide (cst)));
5026 /* Convert range assertion expressions into the implied copies and
5027 copy propagate away the copies. Doing the trivial copy propagation
5028 here avoids the need to run the full copy propagation pass after
5029 VRP.
5031 FIXME, this will eventually lead to copy propagation removing the
5032 names that had useful range information attached to them. For
5033 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5034 then N_i will have the range [3, +INF].
5036 However, by converting the assertion into the implied copy
5037 operation N_i = N_j, we will then copy-propagate N_j into the uses
5038 of N_i and lose the range information. We may want to hold on to
5039 ASSERT_EXPRs a little while longer as the ranges could be used in
5040 things like jump threading.
5042 The problem with keeping ASSERT_EXPRs around is that passes after
5043 VRP need to handle them appropriately.
5045 Another approach would be to make the range information a first
5046 class property of the SSA_NAME so that it can be queried from
5047 any pass. This is made somewhat more complex by the need for
5048 multiple ranges to be associated with one SSA_NAME. */
5050 static void
5051 remove_range_assertions (void)
5053 basic_block bb;
5054 gimple_stmt_iterator si;
5055 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5056 a basic block preceeded by GIMPLE_COND branching to it and
5057 __builtin_trap, -1 if not yet checked, 0 otherwise. */
5058 int is_unreachable;
5060 /* Note that the BSI iterator bump happens at the bottom of the
5061 loop and no bump is necessary if we're removing the statement
5062 referenced by the current BSI. */
5063 FOR_EACH_BB_FN (bb, cfun)
5064 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5066 gimple *stmt = gsi_stmt (si);
5068 if (is_gimple_assign (stmt)
5069 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5071 tree lhs = gimple_assign_lhs (stmt);
5072 tree rhs = gimple_assign_rhs1 (stmt);
5073 tree var;
5075 var = ASSERT_EXPR_VAR (rhs);
5077 if (TREE_CODE (var) == SSA_NAME
5078 && !POINTER_TYPE_P (TREE_TYPE (lhs))
5079 && SSA_NAME_RANGE_INFO (lhs))
5081 if (is_unreachable == -1)
5083 is_unreachable = 0;
5084 if (single_pred_p (bb)
5085 && assert_unreachable_fallthru_edge_p
5086 (single_pred_edge (bb)))
5087 is_unreachable = 1;
5089 /* Handle
5090 if (x_7 >= 10 && x_7 < 20)
5091 __builtin_unreachable ();
5092 x_8 = ASSERT_EXPR <x_7, ...>;
5093 if the only uses of x_7 are in the ASSERT_EXPR and
5094 in the condition. In that case, we can copy the
5095 range info from x_8 computed in this pass also
5096 for x_7. */
5097 if (is_unreachable
5098 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5099 single_pred (bb)))
5101 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5102 SSA_NAME_RANGE_INFO (lhs)->get_min (),
5103 SSA_NAME_RANGE_INFO (lhs)->get_max ());
5104 maybe_set_nonzero_bits (single_pred_edge (bb), var);
5108 /* Propagate the RHS into every use of the LHS. For SSA names
5109 also propagate abnormals as it merely restores the original
5110 IL in this case (an replace_uses_by would assert). */
5111 if (TREE_CODE (var) == SSA_NAME)
5113 imm_use_iterator iter;
5114 use_operand_p use_p;
5115 gimple *use_stmt;
5116 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5117 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5118 SET_USE (use_p, var);
5120 else
5121 replace_uses_by (lhs, var);
5123 /* And finally, remove the copy, it is not needed. */
5124 gsi_remove (&si, true);
5125 release_defs (stmt);
5127 else
5129 if (!is_gimple_debug (gsi_stmt (si)))
5130 is_unreachable = 0;
5131 gsi_next (&si);
5136 /* Return true if STMT is interesting for VRP. */
5138 bool
5139 stmt_interesting_for_vrp (gimple *stmt)
5141 if (gimple_code (stmt) == GIMPLE_PHI)
5143 tree res = gimple_phi_result (stmt);
5144 return (!virtual_operand_p (res)
5145 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5146 || POINTER_TYPE_P (TREE_TYPE (res))));
5148 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5150 tree lhs = gimple_get_lhs (stmt);
5152 /* In general, assignments with virtual operands are not useful
5153 for deriving ranges, with the obvious exception of calls to
5154 builtin functions. */
5155 if (lhs && TREE_CODE (lhs) == SSA_NAME
5156 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5157 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5158 && (is_gimple_call (stmt)
5159 || !gimple_vuse (stmt)))
5160 return true;
5161 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5162 switch (gimple_call_internal_fn (stmt))
5164 case IFN_ADD_OVERFLOW:
5165 case IFN_SUB_OVERFLOW:
5166 case IFN_MUL_OVERFLOW:
5167 case IFN_ATOMIC_COMPARE_EXCHANGE:
5168 /* These internal calls return _Complex integer type,
5169 but are interesting to VRP nevertheless. */
5170 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5171 return true;
5172 break;
5173 default:
5174 break;
5177 else if (gimple_code (stmt) == GIMPLE_COND
5178 || gimple_code (stmt) == GIMPLE_SWITCH)
5179 return true;
5181 return false;
5184 /* Initialization required by ssa_propagate engine. */
5186 void
5187 vrp_prop::vrp_initialize ()
5189 basic_block bb;
5191 FOR_EACH_BB_FN (bb, cfun)
5193 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5194 gsi_next (&si))
5196 gphi *phi = si.phi ();
5197 if (!stmt_interesting_for_vrp (phi))
5199 tree lhs = PHI_RESULT (phi);
5200 get_value_range (lhs)->set_varying ();
5201 prop_set_simulate_again (phi, false);
5203 else
5204 prop_set_simulate_again (phi, true);
5207 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5208 gsi_next (&si))
5210 gimple *stmt = gsi_stmt (si);
5212 /* If the statement is a control insn, then we do not
5213 want to avoid simulating the statement once. Failure
5214 to do so means that those edges will never get added. */
5215 if (stmt_ends_bb_p (stmt))
5216 prop_set_simulate_again (stmt, true);
5217 else if (!stmt_interesting_for_vrp (stmt))
5219 set_defs_to_varying (stmt);
5220 prop_set_simulate_again (stmt, false);
5222 else
5223 prop_set_simulate_again (stmt, true);
5228 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5229 that includes the value VAL. The search is restricted to the range
5230 [START_IDX, n - 1] where n is the size of VEC.
5232 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5233 returned.
5235 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5236 it is placed in IDX and false is returned.
5238 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5239 returned. */
5241 bool
5242 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5244 size_t n = gimple_switch_num_labels (stmt);
5245 size_t low, high;
5247 /* Find case label for minimum of the value range or the next one.
5248 At each iteration we are searching in [low, high - 1]. */
5250 for (low = start_idx, high = n; high != low; )
5252 tree t;
5253 int cmp;
5254 /* Note that i != high, so we never ask for n. */
5255 size_t i = (high + low) / 2;
5256 t = gimple_switch_label (stmt, i);
5258 /* Cache the result of comparing CASE_LOW and val. */
5259 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5261 if (cmp == 0)
5263 /* Ranges cannot be empty. */
5264 *idx = i;
5265 return true;
5267 else if (cmp > 0)
5268 high = i;
5269 else
5271 low = i + 1;
5272 if (CASE_HIGH (t) != NULL
5273 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5275 *idx = i;
5276 return true;
5281 *idx = high;
5282 return false;
5285 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5286 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5287 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5288 then MAX_IDX < MIN_IDX.
5289 Returns true if the default label is not needed. */
5291 bool
5292 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5293 size_t *max_idx)
5295 size_t i, j;
5296 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5297 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5299 if (i == j
5300 && min_take_default
5301 && max_take_default)
5303 /* Only the default case label reached.
5304 Return an empty range. */
5305 *min_idx = 1;
5306 *max_idx = 0;
5307 return false;
5309 else
5311 bool take_default = min_take_default || max_take_default;
5312 tree low, high;
5313 size_t k;
5315 if (max_take_default)
5316 j--;
5318 /* If the case label range is continuous, we do not need
5319 the default case label. Verify that. */
5320 high = CASE_LOW (gimple_switch_label (stmt, i));
5321 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5322 high = CASE_HIGH (gimple_switch_label (stmt, i));
5323 for (k = i + 1; k <= j; ++k)
5325 low = CASE_LOW (gimple_switch_label (stmt, k));
5326 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5328 take_default = true;
5329 break;
5331 high = low;
5332 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5333 high = CASE_HIGH (gimple_switch_label (stmt, k));
5336 *min_idx = i;
5337 *max_idx = j;
5338 return !take_default;
5342 /* Evaluate statement STMT. If the statement produces a useful range,
5343 return SSA_PROP_INTERESTING and record the SSA name with the
5344 interesting range into *OUTPUT_P.
5346 If STMT is a conditional branch and we can determine its truth
5347 value, the taken edge is recorded in *TAKEN_EDGE_P.
5349 If STMT produces a varying value, return SSA_PROP_VARYING. */
5351 enum ssa_prop_result
5352 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5354 tree lhs = gimple_get_lhs (stmt);
5355 value_range vr;
5356 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5358 if (*output_p)
5360 if (update_value_range (*output_p, &vr))
5362 if (dump_file && (dump_flags & TDF_DETAILS))
5364 fprintf (dump_file, "Found new range for ");
5365 print_generic_expr (dump_file, *output_p);
5366 fprintf (dump_file, ": ");
5367 dump_value_range (dump_file, &vr);
5368 fprintf (dump_file, "\n");
5371 if (vr.varying_p ())
5372 return SSA_PROP_VARYING;
5374 return SSA_PROP_INTERESTING;
5376 return SSA_PROP_NOT_INTERESTING;
5379 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5380 switch (gimple_call_internal_fn (stmt))
5382 case IFN_ADD_OVERFLOW:
5383 case IFN_SUB_OVERFLOW:
5384 case IFN_MUL_OVERFLOW:
5385 case IFN_ATOMIC_COMPARE_EXCHANGE:
5386 /* These internal calls return _Complex integer type,
5387 which VRP does not track, but the immediate uses
5388 thereof might be interesting. */
5389 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5391 imm_use_iterator iter;
5392 use_operand_p use_p;
5393 enum ssa_prop_result res = SSA_PROP_VARYING;
5395 get_value_range (lhs)->set_varying ();
5397 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5399 gimple *use_stmt = USE_STMT (use_p);
5400 if (!is_gimple_assign (use_stmt))
5401 continue;
5402 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5403 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5404 continue;
5405 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5406 tree use_lhs = gimple_assign_lhs (use_stmt);
5407 if (TREE_CODE (rhs1) != rhs_code
5408 || TREE_OPERAND (rhs1, 0) != lhs
5409 || TREE_CODE (use_lhs) != SSA_NAME
5410 || !stmt_interesting_for_vrp (use_stmt)
5411 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5412 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5413 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5414 continue;
5416 /* If there is a change in the value range for any of the
5417 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5418 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5419 or IMAGPART_EXPR immediate uses, but none of them have
5420 a change in their value ranges, return
5421 SSA_PROP_NOT_INTERESTING. If there are no
5422 {REAL,IMAG}PART_EXPR uses at all,
5423 return SSA_PROP_VARYING. */
5424 value_range new_vr;
5425 extract_range_basic (&new_vr, use_stmt);
5426 const value_range *old_vr = get_value_range (use_lhs);
5427 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5428 res = SSA_PROP_INTERESTING;
5429 else
5430 res = SSA_PROP_NOT_INTERESTING;
5431 new_vr.equiv_clear ();
5432 if (res == SSA_PROP_INTERESTING)
5434 *output_p = lhs;
5435 return res;
5439 return res;
5441 break;
5442 default:
5443 break;
5446 /* All other statements produce nothing of interest for VRP, so mark
5447 their outputs varying and prevent further simulation. */
5448 set_defs_to_varying (stmt);
5450 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5453 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5454 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5455 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5456 possible such range. The resulting range is not canonicalized. */
5458 static void
5459 union_ranges (enum value_range_kind *vr0type,
5460 tree *vr0min, tree *vr0max,
5461 enum value_range_kind vr1type,
5462 tree vr1min, tree vr1max)
5464 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5465 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5467 /* [] is vr0, () is vr1 in the following classification comments. */
5468 if (mineq && maxeq)
5470 /* [( )] */
5471 if (*vr0type == vr1type)
5472 /* Nothing to do for equal ranges. */
5474 else if ((*vr0type == VR_RANGE
5475 && vr1type == VR_ANTI_RANGE)
5476 || (*vr0type == VR_ANTI_RANGE
5477 && vr1type == VR_RANGE))
5479 /* For anti-range with range union the result is varying. */
5480 goto give_up;
5482 else
5483 gcc_unreachable ();
5485 else if (operand_less_p (*vr0max, vr1min) == 1
5486 || operand_less_p (vr1max, *vr0min) == 1)
5488 /* [ ] ( ) or ( ) [ ]
5489 If the ranges have an empty intersection, result of the union
5490 operation is the anti-range or if both are anti-ranges
5491 it covers all. */
5492 if (*vr0type == VR_ANTI_RANGE
5493 && vr1type == VR_ANTI_RANGE)
5494 goto give_up;
5495 else if (*vr0type == VR_ANTI_RANGE
5496 && vr1type == VR_RANGE)
5498 else if (*vr0type == VR_RANGE
5499 && vr1type == VR_ANTI_RANGE)
5501 *vr0type = vr1type;
5502 *vr0min = vr1min;
5503 *vr0max = vr1max;
5505 else if (*vr0type == VR_RANGE
5506 && vr1type == VR_RANGE)
5508 /* The result is the convex hull of both ranges. */
5509 if (operand_less_p (*vr0max, vr1min) == 1)
5511 /* If the result can be an anti-range, create one. */
5512 if (TREE_CODE (*vr0max) == INTEGER_CST
5513 && TREE_CODE (vr1min) == INTEGER_CST
5514 && vrp_val_is_min (*vr0min)
5515 && vrp_val_is_max (vr1max))
5517 tree min = int_const_binop (PLUS_EXPR,
5518 *vr0max,
5519 build_int_cst (TREE_TYPE (*vr0max), 1));
5520 tree max = int_const_binop (MINUS_EXPR,
5521 vr1min,
5522 build_int_cst (TREE_TYPE (vr1min), 1));
5523 if (!operand_less_p (max, min))
5525 *vr0type = VR_ANTI_RANGE;
5526 *vr0min = min;
5527 *vr0max = max;
5529 else
5530 *vr0max = vr1max;
5532 else
5533 *vr0max = vr1max;
5535 else
5537 /* If the result can be an anti-range, create one. */
5538 if (TREE_CODE (vr1max) == INTEGER_CST
5539 && TREE_CODE (*vr0min) == INTEGER_CST
5540 && vrp_val_is_min (vr1min)
5541 && vrp_val_is_max (*vr0max))
5543 tree min = int_const_binop (PLUS_EXPR,
5544 vr1max,
5545 build_int_cst (TREE_TYPE (vr1max), 1));
5546 tree max = int_const_binop (MINUS_EXPR,
5547 *vr0min,
5548 build_int_cst (TREE_TYPE (*vr0min), 1));
5549 if (!operand_less_p (max, min))
5551 *vr0type = VR_ANTI_RANGE;
5552 *vr0min = min;
5553 *vr0max = max;
5555 else
5556 *vr0min = vr1min;
5558 else
5559 *vr0min = vr1min;
5562 else
5563 gcc_unreachable ();
5565 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5566 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5568 /* [ ( ) ] or [( ) ] or [ ( )] */
5569 if (*vr0type == VR_RANGE
5570 && vr1type == VR_RANGE)
5572 else if (*vr0type == VR_ANTI_RANGE
5573 && vr1type == VR_ANTI_RANGE)
5575 *vr0type = vr1type;
5576 *vr0min = vr1min;
5577 *vr0max = vr1max;
5579 else if (*vr0type == VR_ANTI_RANGE
5580 && vr1type == VR_RANGE)
5582 /* Arbitrarily choose the right or left gap. */
5583 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5584 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5585 build_int_cst (TREE_TYPE (vr1min), 1));
5586 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5587 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5588 build_int_cst (TREE_TYPE (vr1max), 1));
5589 else
5590 goto give_up;
5592 else if (*vr0type == VR_RANGE
5593 && vr1type == VR_ANTI_RANGE)
5594 /* The result covers everything. */
5595 goto give_up;
5596 else
5597 gcc_unreachable ();
5599 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5600 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5602 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5603 if (*vr0type == VR_RANGE
5604 && vr1type == VR_RANGE)
5606 *vr0type = vr1type;
5607 *vr0min = vr1min;
5608 *vr0max = vr1max;
5610 else if (*vr0type == VR_ANTI_RANGE
5611 && vr1type == VR_ANTI_RANGE)
5613 else if (*vr0type == VR_RANGE
5614 && vr1type == VR_ANTI_RANGE)
5616 *vr0type = VR_ANTI_RANGE;
5617 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5619 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5620 build_int_cst (TREE_TYPE (*vr0min), 1));
5621 *vr0min = vr1min;
5623 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5625 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5626 build_int_cst (TREE_TYPE (*vr0max), 1));
5627 *vr0max = vr1max;
5629 else
5630 goto give_up;
5632 else if (*vr0type == VR_ANTI_RANGE
5633 && vr1type == VR_RANGE)
5634 /* The result covers everything. */
5635 goto give_up;
5636 else
5637 gcc_unreachable ();
5639 else if ((operand_less_p (vr1min, *vr0max) == 1
5640 || operand_equal_p (vr1min, *vr0max, 0))
5641 && operand_less_p (*vr0min, vr1min) == 1
5642 && operand_less_p (*vr0max, vr1max) == 1)
5644 /* [ ( ] ) or [ ]( ) */
5645 if (*vr0type == VR_RANGE
5646 && vr1type == VR_RANGE)
5647 *vr0max = vr1max;
5648 else if (*vr0type == VR_ANTI_RANGE
5649 && vr1type == VR_ANTI_RANGE)
5650 *vr0min = vr1min;
5651 else if (*vr0type == VR_ANTI_RANGE
5652 && vr1type == VR_RANGE)
5654 if (TREE_CODE (vr1min) == INTEGER_CST)
5655 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5656 build_int_cst (TREE_TYPE (vr1min), 1));
5657 else
5658 goto give_up;
5660 else if (*vr0type == VR_RANGE
5661 && vr1type == VR_ANTI_RANGE)
5663 if (TREE_CODE (*vr0max) == INTEGER_CST)
5665 *vr0type = vr1type;
5666 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5667 build_int_cst (TREE_TYPE (*vr0max), 1));
5668 *vr0max = vr1max;
5670 else
5671 goto give_up;
5673 else
5674 gcc_unreachable ();
5676 else if ((operand_less_p (*vr0min, vr1max) == 1
5677 || operand_equal_p (*vr0min, vr1max, 0))
5678 && operand_less_p (vr1min, *vr0min) == 1
5679 && operand_less_p (vr1max, *vr0max) == 1)
5681 /* ( [ ) ] or ( )[ ] */
5682 if (*vr0type == VR_RANGE
5683 && vr1type == VR_RANGE)
5684 *vr0min = vr1min;
5685 else if (*vr0type == VR_ANTI_RANGE
5686 && vr1type == VR_ANTI_RANGE)
5687 *vr0max = vr1max;
5688 else if (*vr0type == VR_ANTI_RANGE
5689 && vr1type == VR_RANGE)
5691 if (TREE_CODE (vr1max) == INTEGER_CST)
5692 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5693 build_int_cst (TREE_TYPE (vr1max), 1));
5694 else
5695 goto give_up;
5697 else if (*vr0type == VR_RANGE
5698 && vr1type == VR_ANTI_RANGE)
5700 if (TREE_CODE (*vr0min) == INTEGER_CST)
5702 *vr0type = vr1type;
5703 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5704 build_int_cst (TREE_TYPE (*vr0min), 1));
5705 *vr0min = vr1min;
5707 else
5708 goto give_up;
5710 else
5711 gcc_unreachable ();
5713 else
5714 goto give_up;
5716 return;
5718 give_up:
5719 *vr0type = VR_VARYING;
5720 *vr0min = NULL_TREE;
5721 *vr0max = NULL_TREE;
5724 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5725 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5726 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5727 possible such range. The resulting range is not canonicalized. */
5729 static void
5730 intersect_ranges (enum value_range_kind *vr0type,
5731 tree *vr0min, tree *vr0max,
5732 enum value_range_kind vr1type,
5733 tree vr1min, tree vr1max)
5735 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5736 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5738 /* [] is vr0, () is vr1 in the following classification comments. */
5739 if (mineq && maxeq)
5741 /* [( )] */
5742 if (*vr0type == vr1type)
5743 /* Nothing to do for equal ranges. */
5745 else if ((*vr0type == VR_RANGE
5746 && vr1type == VR_ANTI_RANGE)
5747 || (*vr0type == VR_ANTI_RANGE
5748 && vr1type == VR_RANGE))
5750 /* For anti-range with range intersection the result is empty. */
5751 *vr0type = VR_UNDEFINED;
5752 *vr0min = NULL_TREE;
5753 *vr0max = NULL_TREE;
5755 else
5756 gcc_unreachable ();
5758 else if (operand_less_p (*vr0max, vr1min) == 1
5759 || operand_less_p (vr1max, *vr0min) == 1)
5761 /* [ ] ( ) or ( ) [ ]
5762 If the ranges have an empty intersection, the result of the
5763 intersect operation is the range for intersecting an
5764 anti-range with a range or empty when intersecting two ranges. */
5765 if (*vr0type == VR_RANGE
5766 && vr1type == VR_ANTI_RANGE)
5768 else if (*vr0type == VR_ANTI_RANGE
5769 && vr1type == VR_RANGE)
5771 *vr0type = vr1type;
5772 *vr0min = vr1min;
5773 *vr0max = vr1max;
5775 else if (*vr0type == VR_RANGE
5776 && vr1type == VR_RANGE)
5778 *vr0type = VR_UNDEFINED;
5779 *vr0min = NULL_TREE;
5780 *vr0max = NULL_TREE;
5782 else if (*vr0type == VR_ANTI_RANGE
5783 && vr1type == VR_ANTI_RANGE)
5785 /* If the anti-ranges are adjacent to each other merge them. */
5786 if (TREE_CODE (*vr0max) == INTEGER_CST
5787 && TREE_CODE (vr1min) == INTEGER_CST
5788 && operand_less_p (*vr0max, vr1min) == 1
5789 && integer_onep (int_const_binop (MINUS_EXPR,
5790 vr1min, *vr0max)))
5791 *vr0max = vr1max;
5792 else if (TREE_CODE (vr1max) == INTEGER_CST
5793 && TREE_CODE (*vr0min) == INTEGER_CST
5794 && operand_less_p (vr1max, *vr0min) == 1
5795 && integer_onep (int_const_binop (MINUS_EXPR,
5796 *vr0min, vr1max)))
5797 *vr0min = vr1min;
5798 /* Else arbitrarily take VR0. */
5801 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5802 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5804 /* [ ( ) ] or [( ) ] or [ ( )] */
5805 if (*vr0type == VR_RANGE
5806 && vr1type == VR_RANGE)
5808 /* If both are ranges the result is the inner one. */
5809 *vr0type = vr1type;
5810 *vr0min = vr1min;
5811 *vr0max = vr1max;
5813 else if (*vr0type == VR_RANGE
5814 && vr1type == VR_ANTI_RANGE)
5816 /* Choose the right gap if the left one is empty. */
5817 if (mineq)
5819 if (TREE_CODE (vr1max) != INTEGER_CST)
5820 *vr0min = vr1max;
5821 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5822 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5823 *vr0min
5824 = int_const_binop (MINUS_EXPR, vr1max,
5825 build_int_cst (TREE_TYPE (vr1max), -1));
5826 else
5827 *vr0min
5828 = int_const_binop (PLUS_EXPR, vr1max,
5829 build_int_cst (TREE_TYPE (vr1max), 1));
5831 /* Choose the left gap if the right one is empty. */
5832 else if (maxeq)
5834 if (TREE_CODE (vr1min) != INTEGER_CST)
5835 *vr0max = vr1min;
5836 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5837 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5838 *vr0max
5839 = int_const_binop (PLUS_EXPR, vr1min,
5840 build_int_cst (TREE_TYPE (vr1min), -1));
5841 else
5842 *vr0max
5843 = int_const_binop (MINUS_EXPR, vr1min,
5844 build_int_cst (TREE_TYPE (vr1min), 1));
5846 /* Choose the anti-range if the range is effectively varying. */
5847 else if (vrp_val_is_min (*vr0min)
5848 && vrp_val_is_max (*vr0max))
5850 *vr0type = vr1type;
5851 *vr0min = vr1min;
5852 *vr0max = vr1max;
5854 /* Else choose the range. */
5856 else if (*vr0type == VR_ANTI_RANGE
5857 && vr1type == VR_ANTI_RANGE)
5858 /* If both are anti-ranges the result is the outer one. */
5860 else if (*vr0type == VR_ANTI_RANGE
5861 && vr1type == VR_RANGE)
5863 /* The intersection is empty. */
5864 *vr0type = VR_UNDEFINED;
5865 *vr0min = NULL_TREE;
5866 *vr0max = NULL_TREE;
5868 else
5869 gcc_unreachable ();
5871 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5872 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5874 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5875 if (*vr0type == VR_RANGE
5876 && vr1type == VR_RANGE)
5877 /* Choose the inner range. */
5879 else if (*vr0type == VR_ANTI_RANGE
5880 && vr1type == VR_RANGE)
5882 /* Choose the right gap if the left is empty. */
5883 if (mineq)
5885 *vr0type = VR_RANGE;
5886 if (TREE_CODE (*vr0max) != INTEGER_CST)
5887 *vr0min = *vr0max;
5888 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5889 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5890 *vr0min
5891 = int_const_binop (MINUS_EXPR, *vr0max,
5892 build_int_cst (TREE_TYPE (*vr0max), -1));
5893 else
5894 *vr0min
5895 = int_const_binop (PLUS_EXPR, *vr0max,
5896 build_int_cst (TREE_TYPE (*vr0max), 1));
5897 *vr0max = vr1max;
5899 /* Choose the left gap if the right is empty. */
5900 else if (maxeq)
5902 *vr0type = VR_RANGE;
5903 if (TREE_CODE (*vr0min) != INTEGER_CST)
5904 *vr0max = *vr0min;
5905 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5906 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5907 *vr0max
5908 = int_const_binop (PLUS_EXPR, *vr0min,
5909 build_int_cst (TREE_TYPE (*vr0min), -1));
5910 else
5911 *vr0max
5912 = int_const_binop (MINUS_EXPR, *vr0min,
5913 build_int_cst (TREE_TYPE (*vr0min), 1));
5914 *vr0min = vr1min;
5916 /* Choose the anti-range if the range is effectively varying. */
5917 else if (vrp_val_is_min (vr1min)
5918 && vrp_val_is_max (vr1max))
5920 /* Choose the anti-range if it is ~[0,0], that range is special
5921 enough to special case when vr1's range is relatively wide.
5922 At least for types bigger than int - this covers pointers
5923 and arguments to functions like ctz. */
5924 else if (*vr0min == *vr0max
5925 && integer_zerop (*vr0min)
5926 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5927 >= TYPE_PRECISION (integer_type_node))
5928 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5929 && TREE_CODE (vr1max) == INTEGER_CST
5930 && TREE_CODE (vr1min) == INTEGER_CST
5931 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5932 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5934 /* Else choose the range. */
5935 else
5937 *vr0type = vr1type;
5938 *vr0min = vr1min;
5939 *vr0max = vr1max;
5942 else if (*vr0type == VR_ANTI_RANGE
5943 && vr1type == VR_ANTI_RANGE)
5945 /* If both are anti-ranges the result is the outer one. */
5946 *vr0type = vr1type;
5947 *vr0min = vr1min;
5948 *vr0max = vr1max;
5950 else if (vr1type == VR_ANTI_RANGE
5951 && *vr0type == VR_RANGE)
5953 /* The intersection is empty. */
5954 *vr0type = VR_UNDEFINED;
5955 *vr0min = NULL_TREE;
5956 *vr0max = NULL_TREE;
5958 else
5959 gcc_unreachable ();
5961 else if ((operand_less_p (vr1min, *vr0max) == 1
5962 || operand_equal_p (vr1min, *vr0max, 0))
5963 && operand_less_p (*vr0min, vr1min) == 1)
5965 /* [ ( ] ) or [ ]( ) */
5966 if (*vr0type == VR_ANTI_RANGE
5967 && vr1type == VR_ANTI_RANGE)
5968 *vr0max = vr1max;
5969 else if (*vr0type == VR_RANGE
5970 && vr1type == VR_RANGE)
5971 *vr0min = vr1min;
5972 else if (*vr0type == VR_RANGE
5973 && vr1type == VR_ANTI_RANGE)
5975 if (TREE_CODE (vr1min) == INTEGER_CST)
5976 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5977 build_int_cst (TREE_TYPE (vr1min), 1));
5978 else
5979 *vr0max = vr1min;
5981 else if (*vr0type == VR_ANTI_RANGE
5982 && vr1type == VR_RANGE)
5984 *vr0type = VR_RANGE;
5985 if (TREE_CODE (*vr0max) == INTEGER_CST)
5986 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5987 build_int_cst (TREE_TYPE (*vr0max), 1));
5988 else
5989 *vr0min = *vr0max;
5990 *vr0max = vr1max;
5992 else
5993 gcc_unreachable ();
5995 else if ((operand_less_p (*vr0min, vr1max) == 1
5996 || operand_equal_p (*vr0min, vr1max, 0))
5997 && operand_less_p (vr1min, *vr0min) == 1)
5999 /* ( [ ) ] or ( )[ ] */
6000 if (*vr0type == VR_ANTI_RANGE
6001 && vr1type == VR_ANTI_RANGE)
6002 *vr0min = vr1min;
6003 else if (*vr0type == VR_RANGE
6004 && vr1type == VR_RANGE)
6005 *vr0max = vr1max;
6006 else if (*vr0type == VR_RANGE
6007 && vr1type == VR_ANTI_RANGE)
6009 if (TREE_CODE (vr1max) == INTEGER_CST)
6010 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6011 build_int_cst (TREE_TYPE (vr1max), 1));
6012 else
6013 *vr0min = vr1max;
6015 else if (*vr0type == VR_ANTI_RANGE
6016 && vr1type == VR_RANGE)
6018 *vr0type = VR_RANGE;
6019 if (TREE_CODE (*vr0min) == INTEGER_CST)
6020 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6021 build_int_cst (TREE_TYPE (*vr0min), 1));
6022 else
6023 *vr0max = *vr0min;
6024 *vr0min = vr1min;
6026 else
6027 gcc_unreachable ();
6030 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6031 result for the intersection. That's always a conservative
6032 correct estimate unless VR1 is a constant singleton range
6033 in which case we choose that. */
6034 if (vr1type == VR_RANGE
6035 && is_gimple_min_invariant (vr1min)
6036 && vrp_operand_equal_p (vr1min, vr1max))
6038 *vr0type = vr1type;
6039 *vr0min = vr1min;
6040 *vr0max = vr1max;
6045 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6046 in *VR0. This may not be the smallest possible such range. */
6048 void
6049 value_range::intersect_helper (value_range *vr0, const value_range *vr1)
6051 /* If either range is VR_VARYING the other one wins. */
6052 if (vr1->varying_p ())
6053 return;
6054 if (vr0->varying_p ())
6056 vr0->deep_copy (vr1);
6057 return;
6060 /* When either range is VR_UNDEFINED the resulting range is
6061 VR_UNDEFINED, too. */
6062 if (vr0->undefined_p ())
6063 return;
6064 if (vr1->undefined_p ())
6066 vr0->set_undefined ();
6067 return;
6070 value_range_kind vr0type = vr0->kind ();
6071 tree vr0min = vr0->min ();
6072 tree vr0max = vr0->max ();
6073 intersect_ranges (&vr0type, &vr0min, &vr0max,
6074 vr1->kind (), vr1->min (), vr1->max ());
6075 /* Make sure to canonicalize the result though as the inversion of a
6076 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
6077 fall back to vr0 when this turns things to varying. */
6078 value_range tem;
6079 tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6080 /* If that failed, use the saved original VR0. */
6081 if (tem.varying_p ())
6082 return;
6083 vr0->update (tem.kind (), tem.min (), tem.max ());
6085 /* If the result is VR_UNDEFINED there is no need to mess with
6086 the equivalencies. */
6087 if (vr0->undefined_p ())
6088 return;
6090 /* The resulting set of equivalences for range intersection is the union of
6091 the two sets. */
6092 if (vr0->m_equiv && vr1->m_equiv && vr0->m_equiv != vr1->m_equiv)
6093 bitmap_ior_into (vr0->m_equiv, vr1->m_equiv);
6094 else if (vr1->m_equiv && !vr0->m_equiv)
6096 /* All equivalence bitmaps are allocated from the same obstack. So
6097 we can use the obstack associated with VR to allocate vr0->equiv. */
6098 vr0->m_equiv = BITMAP_ALLOC (vr1->m_equiv->obstack);
6099 bitmap_copy (m_equiv, vr1->m_equiv);
6103 void
6104 value_range::intersect (const value_range *other)
6106 if (dump_file && (dump_flags & TDF_DETAILS))
6108 fprintf (dump_file, "Intersecting\n ");
6109 dump_value_range (dump_file, this);
6110 fprintf (dump_file, "\nand\n ");
6111 dump_value_range (dump_file, other);
6112 fprintf (dump_file, "\n");
6114 intersect_helper (this, other);
6115 if (dump_file && (dump_flags & TDF_DETAILS))
6117 fprintf (dump_file, "to\n ");
6118 dump_value_range (dump_file, this);
6119 fprintf (dump_file, "\n");
6123 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
6124 VR1, return a range that contains both VR0 and VR1. This may not be the
6125 smallest possible such range. */
6127 value_range_base
6128 value_range_base::union_helper (const value_range_base *vr0,
6129 const value_range_base *vr1)
6131 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
6132 if (vr1->undefined_p ()
6133 || vr0->varying_p ())
6134 return *vr0;
6136 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
6137 if (vr0->undefined_p ()
6138 || vr1->varying_p ())
6139 return *vr1;
6141 value_range_kind vr0type = vr0->kind ();
6142 tree vr0min = vr0->min ();
6143 tree vr0max = vr0->max ();
6144 union_ranges (&vr0type, &vr0min, &vr0max,
6145 vr1->kind (), vr1->min (), vr1->max ());
6147 /* Work on a temporary so we can still use vr0 when union returns varying. */
6148 value_range tem;
6149 tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6151 /* Failed to find an efficient meet. Before giving up and setting
6152 the result to VARYING, see if we can at least derive a useful
6153 anti-range. */
6154 if (tem.varying_p ()
6155 && range_includes_zero_p (vr0) == 0
6156 && range_includes_zero_p (vr1) == 0)
6158 tem.set_nonnull (vr0->type ());
6159 return tem;
6162 return tem;
6166 /* Meet operation for value ranges. Given two value ranges VR0 and
6167 VR1, store in VR0 a range that contains both VR0 and VR1. This
6168 may not be the smallest possible such range. */
6170 void
6171 value_range_base::union_ (const value_range_base *other)
6173 if (dump_file && (dump_flags & TDF_DETAILS))
6175 fprintf (dump_file, "Meeting\n ");
6176 dump_value_range (dump_file, this);
6177 fprintf (dump_file, "\nand\n ");
6178 dump_value_range (dump_file, other);
6179 fprintf (dump_file, "\n");
6182 *this = union_helper (this, other);
6184 if (dump_file && (dump_flags & TDF_DETAILS))
6186 fprintf (dump_file, "to\n ");
6187 dump_value_range (dump_file, this);
6188 fprintf (dump_file, "\n");
6192 void
6193 value_range::union_ (const value_range *other)
6195 if (dump_file && (dump_flags & TDF_DETAILS))
6197 fprintf (dump_file, "Meeting\n ");
6198 dump_value_range (dump_file, this);
6199 fprintf (dump_file, "\nand\n ");
6200 dump_value_range (dump_file, other);
6201 fprintf (dump_file, "\n");
6204 /* If THIS is undefined we want to pick up equivalences from OTHER.
6205 Just special-case this here rather than trying to fixup after the fact. */
6206 if (this->undefined_p ())
6207 this->deep_copy (other);
6208 else
6210 value_range_base tem = union_helper (this, other);
6211 this->update (tem.kind (), tem.min (), tem.max ());
6213 /* The resulting set of equivalences is always the intersection of
6214 the two sets. */
6215 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6216 bitmap_and_into (this->m_equiv, other->m_equiv);
6217 else if (this->m_equiv && !other->m_equiv)
6218 bitmap_clear (this->m_equiv);
6221 if (dump_file && (dump_flags & TDF_DETAILS))
6223 fprintf (dump_file, "to\n ");
6224 dump_value_range (dump_file, this);
6225 fprintf (dump_file, "\n");
6229 /* Visit all arguments for PHI node PHI that flow through executable
6230 edges. If a valid value range can be derived from all the incoming
6231 value ranges, set a new range for the LHS of PHI. */
6233 enum ssa_prop_result
6234 vrp_prop::visit_phi (gphi *phi)
6236 tree lhs = PHI_RESULT (phi);
6237 value_range vr_result;
6238 extract_range_from_phi_node (phi, &vr_result);
6239 if (update_value_range (lhs, &vr_result))
6241 if (dump_file && (dump_flags & TDF_DETAILS))
6243 fprintf (dump_file, "Found new range for ");
6244 print_generic_expr (dump_file, lhs);
6245 fprintf (dump_file, ": ");
6246 dump_value_range (dump_file, &vr_result);
6247 fprintf (dump_file, "\n");
6250 if (vr_result.varying_p ())
6251 return SSA_PROP_VARYING;
6253 return SSA_PROP_INTERESTING;
6256 /* Nothing changed, don't add outgoing edges. */
6257 return SSA_PROP_NOT_INTERESTING;
6260 class vrp_folder : public substitute_and_fold_engine
6262 public:
6263 tree get_value (tree) FINAL OVERRIDE;
6264 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6265 bool fold_predicate_in (gimple_stmt_iterator *);
6267 class vr_values *vr_values;
6269 /* Delegators. */
6270 tree vrp_evaluate_conditional (tree_code code, tree op0,
6271 tree op1, gimple *stmt)
6272 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6273 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6274 { return vr_values->simplify_stmt_using_ranges (gsi); }
6275 tree op_with_constant_singleton_value_range (tree op)
6276 { return vr_values->op_with_constant_singleton_value_range (op); }
6279 /* If the statement pointed by SI has a predicate whose value can be
6280 computed using the value range information computed by VRP, compute
6281 its value and return true. Otherwise, return false. */
6283 bool
6284 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6286 bool assignment_p = false;
6287 tree val;
6288 gimple *stmt = gsi_stmt (*si);
6290 if (is_gimple_assign (stmt)
6291 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6293 assignment_p = true;
6294 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6295 gimple_assign_rhs1 (stmt),
6296 gimple_assign_rhs2 (stmt),
6297 stmt);
6299 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6300 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6301 gimple_cond_lhs (cond_stmt),
6302 gimple_cond_rhs (cond_stmt),
6303 stmt);
6304 else
6305 return false;
6307 if (val)
6309 if (assignment_p)
6310 val = fold_convert (gimple_expr_type (stmt), val);
6312 if (dump_file)
6314 fprintf (dump_file, "Folding predicate ");
6315 print_gimple_expr (dump_file, stmt, 0);
6316 fprintf (dump_file, " to ");
6317 print_generic_expr (dump_file, val);
6318 fprintf (dump_file, "\n");
6321 if (is_gimple_assign (stmt))
6322 gimple_assign_set_rhs_from_tree (si, val);
6323 else
6325 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6326 gcond *cond_stmt = as_a <gcond *> (stmt);
6327 if (integer_zerop (val))
6328 gimple_cond_make_false (cond_stmt);
6329 else if (integer_onep (val))
6330 gimple_cond_make_true (cond_stmt);
6331 else
6332 gcc_unreachable ();
6335 return true;
6338 return false;
6341 /* Callback for substitute_and_fold folding the stmt at *SI. */
6343 bool
6344 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6346 if (fold_predicate_in (si))
6347 return true;
6349 return simplify_stmt_using_ranges (si);
6352 /* If OP has a value range with a single constant value return that,
6353 otherwise return NULL_TREE. This returns OP itself if OP is a
6354 constant.
6356 Implemented as a pure wrapper right now, but this will change. */
6358 tree
6359 vrp_folder::get_value (tree op)
6361 return op_with_constant_singleton_value_range (op);
6364 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6365 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6366 BB. If no such ASSERT_EXPR is found, return OP. */
6368 static tree
6369 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6371 imm_use_iterator imm_iter;
6372 gimple *use_stmt;
6373 use_operand_p use_p;
6375 if (TREE_CODE (op) == SSA_NAME)
6377 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6379 use_stmt = USE_STMT (use_p);
6380 if (use_stmt != stmt
6381 && gimple_assign_single_p (use_stmt)
6382 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6383 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6384 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6385 return gimple_assign_lhs (use_stmt);
6388 return op;
6391 /* A hack. */
6392 static class vr_values *x_vr_values;
6394 /* A trivial wrapper so that we can present the generic jump threading
6395 code with a simple API for simplifying statements. STMT is the
6396 statement we want to simplify, WITHIN_STMT provides the location
6397 for any overflow warnings. */
6399 static tree
6400 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6401 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6402 basic_block bb)
6404 /* First see if the conditional is in the hash table. */
6405 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6406 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6407 return cached_lhs;
6409 vr_values *vr_values = x_vr_values;
6410 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6412 tree op0 = gimple_cond_lhs (cond_stmt);
6413 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6415 tree op1 = gimple_cond_rhs (cond_stmt);
6416 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6418 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6419 op0, op1, within_stmt);
6422 /* We simplify a switch statement by trying to determine which case label
6423 will be taken. If we are successful then we return the corresponding
6424 CASE_LABEL_EXPR. */
6425 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6427 tree op = gimple_switch_index (switch_stmt);
6428 if (TREE_CODE (op) != SSA_NAME)
6429 return NULL_TREE;
6431 op = lhs_of_dominating_assert (op, bb, stmt);
6433 const value_range *vr = vr_values->get_value_range (op);
6434 if (vr->undefined_p ()
6435 || vr->varying_p ()
6436 || vr->symbolic_p ())
6437 return NULL_TREE;
6439 if (vr->kind () == VR_RANGE)
6441 size_t i, j;
6442 /* Get the range of labels that contain a part of the operand's
6443 value range. */
6444 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6446 /* Is there only one such label? */
6447 if (i == j)
6449 tree label = gimple_switch_label (switch_stmt, i);
6451 /* The i'th label will be taken only if the value range of the
6452 operand is entirely within the bounds of this label. */
6453 if (CASE_HIGH (label) != NULL_TREE
6454 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6455 && tree_int_cst_compare (CASE_HIGH (label),
6456 vr->max ()) >= 0)
6457 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6458 && tree_int_cst_equal (vr->min (), vr->max ())))
6459 return label;
6462 /* If there are no such labels then the default label will be
6463 taken. */
6464 if (i > j)
6465 return gimple_switch_label (switch_stmt, 0);
6468 if (vr->kind () == VR_ANTI_RANGE)
6470 unsigned n = gimple_switch_num_labels (switch_stmt);
6471 tree min_label = gimple_switch_label (switch_stmt, 1);
6472 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6474 /* The default label will be taken only if the anti-range of the
6475 operand is entirely outside the bounds of all the (non-default)
6476 case labels. */
6477 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6478 && (CASE_HIGH (max_label) != NULL_TREE
6479 ? tree_int_cst_compare (vr->max (),
6480 CASE_HIGH (max_label)) >= 0
6481 : tree_int_cst_compare (vr->max (),
6482 CASE_LOW (max_label)) >= 0))
6483 return gimple_switch_label (switch_stmt, 0);
6486 return NULL_TREE;
6489 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6491 tree lhs = gimple_assign_lhs (assign_stmt);
6492 if (TREE_CODE (lhs) == SSA_NAME
6493 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6494 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6495 && stmt_interesting_for_vrp (stmt))
6497 edge dummy_e;
6498 tree dummy_tree;
6499 value_range new_vr;
6500 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6501 &dummy_tree, &new_vr);
6502 tree singleton;
6503 if (new_vr.singleton_p (&singleton))
6504 return singleton;
6508 return NULL_TREE;
6511 class vrp_dom_walker : public dom_walker
6513 public:
6514 vrp_dom_walker (cdi_direction direction,
6515 class const_and_copies *const_and_copies,
6516 class avail_exprs_stack *avail_exprs_stack)
6517 : dom_walker (direction, REACHABLE_BLOCKS),
6518 m_const_and_copies (const_and_copies),
6519 m_avail_exprs_stack (avail_exprs_stack),
6520 m_dummy_cond (NULL) {}
6522 virtual edge before_dom_children (basic_block);
6523 virtual void after_dom_children (basic_block);
6525 class vr_values *vr_values;
6527 private:
6528 class const_and_copies *m_const_and_copies;
6529 class avail_exprs_stack *m_avail_exprs_stack;
6531 gcond *m_dummy_cond;
6535 /* Called before processing dominator children of BB. We want to look
6536 at ASSERT_EXPRs and record information from them in the appropriate
6537 tables.
6539 We could look at other statements here. It's not seen as likely
6540 to significantly increase the jump threads we discover. */
6542 edge
6543 vrp_dom_walker::before_dom_children (basic_block bb)
6545 gimple_stmt_iterator gsi;
6547 m_avail_exprs_stack->push_marker ();
6548 m_const_and_copies->push_marker ();
6549 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6551 gimple *stmt = gsi_stmt (gsi);
6552 if (gimple_assign_single_p (stmt)
6553 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6555 tree rhs1 = gimple_assign_rhs1 (stmt);
6556 tree cond = TREE_OPERAND (rhs1, 1);
6557 tree inverted = invert_truthvalue (cond);
6558 vec<cond_equivalence> p;
6559 p.create (3);
6560 record_conditions (&p, cond, inverted);
6561 for (unsigned int i = 0; i < p.length (); i++)
6562 m_avail_exprs_stack->record_cond (&p[i]);
6564 tree lhs = gimple_assign_lhs (stmt);
6565 m_const_and_copies->record_const_or_copy (lhs,
6566 TREE_OPERAND (rhs1, 0));
6567 p.release ();
6568 continue;
6570 break;
6572 return NULL;
6575 /* Called after processing dominator children of BB. This is where we
6576 actually call into the threader. */
6577 void
6578 vrp_dom_walker::after_dom_children (basic_block bb)
6580 if (!m_dummy_cond)
6581 m_dummy_cond = gimple_build_cond (NE_EXPR,
6582 integer_zero_node, integer_zero_node,
6583 NULL, NULL);
6585 x_vr_values = vr_values;
6586 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6587 m_avail_exprs_stack, NULL,
6588 simplify_stmt_for_jump_threading);
6589 x_vr_values = NULL;
6591 m_avail_exprs_stack->pop_to_marker ();
6592 m_const_and_copies->pop_to_marker ();
6595 /* Blocks which have more than one predecessor and more than
6596 one successor present jump threading opportunities, i.e.,
6597 when the block is reached from a specific predecessor, we
6598 may be able to determine which of the outgoing edges will
6599 be traversed. When this optimization applies, we are able
6600 to avoid conditionals at runtime and we may expose secondary
6601 optimization opportunities.
6603 This routine is effectively a driver for the generic jump
6604 threading code. It basically just presents the generic code
6605 with edges that may be suitable for jump threading.
6607 Unlike DOM, we do not iterate VRP if jump threading was successful.
6608 While iterating may expose new opportunities for VRP, it is expected
6609 those opportunities would be very limited and the compile time cost
6610 to expose those opportunities would be significant.
6612 As jump threading opportunities are discovered, they are registered
6613 for later realization. */
6615 static void
6616 identify_jump_threads (class vr_values *vr_values)
6618 /* Ugh. When substituting values earlier in this pass we can
6619 wipe the dominance information. So rebuild the dominator
6620 information as we need it within the jump threading code. */
6621 calculate_dominance_info (CDI_DOMINATORS);
6623 /* We do not allow VRP information to be used for jump threading
6624 across a back edge in the CFG. Otherwise it becomes too
6625 difficult to avoid eliminating loop exit tests. Of course
6626 EDGE_DFS_BACK is not accurate at this time so we have to
6627 recompute it. */
6628 mark_dfs_back_edges ();
6630 /* Allocate our unwinder stack to unwind any temporary equivalences
6631 that might be recorded. */
6632 const_and_copies *equiv_stack = new const_and_copies ();
6634 hash_table<expr_elt_hasher> *avail_exprs
6635 = new hash_table<expr_elt_hasher> (1024);
6636 avail_exprs_stack *avail_exprs_stack
6637 = new class avail_exprs_stack (avail_exprs);
6639 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6640 walker.vr_values = vr_values;
6641 walker.walk (cfun->cfg->x_entry_block_ptr);
6643 /* We do not actually update the CFG or SSA graphs at this point as
6644 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6645 handle ASSERT_EXPRs gracefully. */
6646 delete equiv_stack;
6647 delete avail_exprs;
6648 delete avail_exprs_stack;
6651 /* Traverse all the blocks folding conditionals with known ranges. */
6653 void
6654 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6656 size_t i;
6658 /* We have completed propagating through the lattice. */
6659 vr_values.set_lattice_propagation_complete ();
6661 if (dump_file)
6663 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6664 vr_values.dump_all_value_ranges (dump_file);
6665 fprintf (dump_file, "\n");
6668 /* Set value range to non pointer SSA_NAMEs. */
6669 for (i = 0; i < num_ssa_names; i++)
6671 tree name = ssa_name (i);
6672 if (!name)
6673 continue;
6675 const value_range *vr = get_value_range (name);
6676 if (!name || !vr->constant_p ())
6677 continue;
6679 if (POINTER_TYPE_P (TREE_TYPE (name))
6680 && range_includes_zero_p (vr) == 0)
6681 set_ptr_nonnull (name);
6682 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6683 set_range_info (name, *vr);
6686 /* If we're checking array refs, we want to merge information on
6687 the executability of each edge between vrp_folder and the
6688 check_array_bounds_dom_walker: each can clear the
6689 EDGE_EXECUTABLE flag on edges, in different ways.
6691 Hence, if we're going to call check_all_array_refs, set
6692 the flag on every edge now, rather than in
6693 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6694 it from some edges. */
6695 if (warn_array_bounds && warn_array_bounds_p)
6696 set_all_edges_as_executable (cfun);
6698 class vrp_folder vrp_folder;
6699 vrp_folder.vr_values = &vr_values;
6700 vrp_folder.substitute_and_fold ();
6702 if (warn_array_bounds && warn_array_bounds_p)
6703 check_all_array_refs ();
6706 /* Main entry point to VRP (Value Range Propagation). This pass is
6707 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6708 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6709 Programming Language Design and Implementation, pp. 67-78, 1995.
6710 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6712 This is essentially an SSA-CCP pass modified to deal with ranges
6713 instead of constants.
6715 While propagating ranges, we may find that two or more SSA name
6716 have equivalent, though distinct ranges. For instance,
6718 1 x_9 = p_3->a;
6719 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6720 3 if (p_4 == q_2)
6721 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6722 5 endif
6723 6 if (q_2)
6725 In the code above, pointer p_5 has range [q_2, q_2], but from the
6726 code we can also determine that p_5 cannot be NULL and, if q_2 had
6727 a non-varying range, p_5's range should also be compatible with it.
6729 These equivalences are created by two expressions: ASSERT_EXPR and
6730 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6731 result of another assertion, then we can use the fact that p_5 and
6732 p_4 are equivalent when evaluating p_5's range.
6734 Together with value ranges, we also propagate these equivalences
6735 between names so that we can take advantage of information from
6736 multiple ranges when doing final replacement. Note that this
6737 equivalency relation is transitive but not symmetric.
6739 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6740 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6741 in contexts where that assertion does not hold (e.g., in line 6).
6743 TODO, the main difference between this pass and Patterson's is that
6744 we do not propagate edge probabilities. We only compute whether
6745 edges can be taken or not. That is, instead of having a spectrum
6746 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6747 DON'T KNOW. In the future, it may be worthwhile to propagate
6748 probabilities to aid branch prediction. */
6750 static unsigned int
6751 execute_vrp (bool warn_array_bounds_p)
6754 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6755 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6756 scev_initialize ();
6758 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6759 Inserting assertions may split edges which will invalidate
6760 EDGE_DFS_BACK. */
6761 insert_range_assertions ();
6763 threadedge_initialize_values ();
6765 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6766 mark_dfs_back_edges ();
6768 class vrp_prop vrp_prop;
6769 vrp_prop.vrp_initialize ();
6770 vrp_prop.ssa_propagate ();
6771 vrp_prop.vrp_finalize (warn_array_bounds_p);
6773 /* We must identify jump threading opportunities before we release
6774 the datastructures built by VRP. */
6775 identify_jump_threads (&vrp_prop.vr_values);
6777 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6778 was set by a type conversion can often be rewritten to use the
6779 RHS of the type conversion.
6781 However, doing so inhibits jump threading through the comparison.
6782 So that transformation is not performed until after jump threading
6783 is complete. */
6784 basic_block bb;
6785 FOR_EACH_BB_FN (bb, cfun)
6787 gimple *last = last_stmt (bb);
6788 if (last && gimple_code (last) == GIMPLE_COND)
6789 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6792 free_numbers_of_iterations_estimates (cfun);
6794 /* ASSERT_EXPRs must be removed before finalizing jump threads
6795 as finalizing jump threads calls the CFG cleanup code which
6796 does not properly handle ASSERT_EXPRs. */
6797 remove_range_assertions ();
6799 /* If we exposed any new variables, go ahead and put them into
6800 SSA form now, before we handle jump threading. This simplifies
6801 interactions between rewriting of _DECL nodes into SSA form
6802 and rewriting SSA_NAME nodes into SSA form after block
6803 duplication and CFG manipulation. */
6804 update_ssa (TODO_update_ssa);
6806 /* We identified all the jump threading opportunities earlier, but could
6807 not transform the CFG at that time. This routine transforms the
6808 CFG and arranges for the dominator tree to be rebuilt if necessary.
6810 Note the SSA graph update will occur during the normal TODO
6811 processing by the pass manager. */
6812 thread_through_all_blocks (false);
6814 vrp_prop.vr_values.cleanup_edges_and_switches ();
6815 threadedge_finalize_values ();
6817 scev_finalize ();
6818 loop_optimizer_finalize ();
6819 return 0;
6822 namespace {
6824 const pass_data pass_data_vrp =
6826 GIMPLE_PASS, /* type */
6827 "vrp", /* name */
6828 OPTGROUP_NONE, /* optinfo_flags */
6829 TV_TREE_VRP, /* tv_id */
6830 PROP_ssa, /* properties_required */
6831 0, /* properties_provided */
6832 0, /* properties_destroyed */
6833 0, /* todo_flags_start */
6834 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6837 class pass_vrp : public gimple_opt_pass
6839 public:
6840 pass_vrp (gcc::context *ctxt)
6841 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6844 /* opt_pass methods: */
6845 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6846 void set_pass_param (unsigned int n, bool param)
6848 gcc_assert (n == 0);
6849 warn_array_bounds_p = param;
6851 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6852 virtual unsigned int execute (function *)
6853 { return execute_vrp (warn_array_bounds_p); }
6855 private:
6856 bool warn_array_bounds_p;
6857 }; // class pass_vrp
6859 } // anon namespace
6861 gimple_opt_pass *
6862 make_pass_vrp (gcc::context *ctxt)
6864 return new pass_vrp (ctxt);
6868 /* Worker for determine_value_range. */
6870 static void
6871 determine_value_range_1 (value_range_base *vr, tree expr)
6873 if (BINARY_CLASS_P (expr))
6875 value_range_base vr0, vr1;
6876 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6877 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6878 extract_range_from_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6879 &vr0, &vr1);
6881 else if (UNARY_CLASS_P (expr))
6883 value_range_base vr0;
6884 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6885 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6886 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6888 else if (TREE_CODE (expr) == INTEGER_CST)
6889 vr->set (expr);
6890 else
6892 value_range_kind kind;
6893 wide_int min, max;
6894 /* For SSA names try to extract range info computed by VRP. Otherwise
6895 fall back to varying. */
6896 if (TREE_CODE (expr) == SSA_NAME
6897 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6898 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6899 vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
6900 wide_int_to_tree (TREE_TYPE (expr), max));
6901 else
6902 vr->set_varying ();
6906 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6907 the determined range type. */
6909 value_range_kind
6910 determine_value_range (tree expr, wide_int *min, wide_int *max)
6912 value_range_base vr;
6913 determine_value_range_1 (&vr, expr);
6914 if (vr.constant_p ())
6916 *min = wi::to_wide (vr.min ());
6917 *max = wi::to_wide (vr.max ());
6918 return vr.kind ();
6921 return VR_VARYING;