Daily bump.
[official-gcc.git] / gcc / tree-vrp.c
blobf95437b304004acbd28382eb4140359644dd1617
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Location information for ASSERT_EXPRs. Each instance of this
86 structure describes an ASSERT_EXPR for an SSA name. Since a single
87 SSA name may have more than one assertion associated with it, these
88 locations are kept in a linked list attached to the corresponding
89 SSA name. */
90 struct assert_locus
92 /* Basic block where the assertion would be inserted. */
93 basic_block bb;
95 /* Some assertions need to be inserted on an edge (e.g., assertions
96 generated by COND_EXPRs). In those cases, BB will be NULL. */
97 edge e;
99 /* Pointer to the statement that generated this assertion. */
100 gimple_stmt_iterator si;
102 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
103 enum tree_code comp_code;
105 /* Value being compared against. */
106 tree val;
108 /* Expression to compare. */
109 tree expr;
111 /* Next node in the linked list. */
112 assert_locus *next;
115 /* If bit I is present, it means that SSA name N_i has a list of
116 assertions that should be inserted in the IL. */
117 static bitmap need_assert_for;
119 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
120 holds a list of ASSERT_LOCUS_T nodes that describe where
121 ASSERT_EXPRs for SSA name N_I should be inserted. */
122 static assert_locus **asserts_for;
124 /* Return the maximum value for TYPE. */
126 tree
127 vrp_val_max (const_tree type)
129 if (!INTEGRAL_TYPE_P (type))
130 return NULL_TREE;
132 return TYPE_MAX_VALUE (type);
135 /* Return the minimum value for TYPE. */
137 tree
138 vrp_val_min (const_tree type)
140 if (!INTEGRAL_TYPE_P (type))
141 return NULL_TREE;
143 return TYPE_MIN_VALUE (type);
146 /* Return whether VAL is equal to the maximum value of its type.
147 We can't do a simple equality comparison with TYPE_MAX_VALUE because
148 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
149 is not == to the integer constant with the same value in the type. */
151 bool
152 vrp_val_is_max (const_tree val)
154 tree type_max = vrp_val_max (TREE_TYPE (val));
155 return (val == type_max
156 || (type_max != NULL_TREE
157 && operand_equal_p (val, type_max, 0)));
160 /* Return whether VAL is equal to the minimum value of its type. */
162 bool
163 vrp_val_is_min (const_tree val)
165 tree type_min = vrp_val_min (TREE_TYPE (val));
166 return (val == type_min
167 || (type_min != NULL_TREE
168 && operand_equal_p (val, type_min, 0)));
171 /* VR_TYPE describes a range with mininum value *MIN and maximum
172 value *MAX. Restrict the range to the set of values that have
173 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
174 return the new range type.
176 SGN gives the sign of the values described by the range. */
178 enum value_range_type
179 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
180 wide_int *min, wide_int *max,
181 const wide_int &nonzero_bits,
182 signop sgn)
184 if (vr_type == VR_ANTI_RANGE)
186 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
187 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
188 to create an inclusive upper bound for A and an inclusive lower
189 bound for B. */
190 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
191 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
193 /* If the calculation of A_MAX wrapped, A is effectively empty
194 and A_MAX is the highest value that satisfies NONZERO_BITS.
195 Likewise if the calculation of B_MIN wrapped, B is effectively
196 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
197 bool a_empty = wi::ge_p (a_max, *min, sgn);
198 bool b_empty = wi::le_p (b_min, *max, sgn);
200 /* If both A and B are empty, there are no valid values. */
201 if (a_empty && b_empty)
202 return VR_UNDEFINED;
204 /* If exactly one of A or B is empty, return a VR_RANGE for the
205 other one. */
206 if (a_empty || b_empty)
208 *min = b_min;
209 *max = a_max;
210 gcc_checking_assert (wi::le_p (*min, *max, sgn));
211 return VR_RANGE;
214 /* Update the VR_ANTI_RANGE bounds. */
215 *min = a_max + 1;
216 *max = b_min - 1;
217 gcc_checking_assert (wi::le_p (*min, *max, sgn));
219 /* Now check whether the excluded range includes any values that
220 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
221 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
223 unsigned int precision = min->get_precision ();
224 *min = wi::min_value (precision, sgn);
225 *max = wi::max_value (precision, sgn);
226 vr_type = VR_RANGE;
229 if (vr_type == VR_RANGE)
231 *max = wi::round_down_for_mask (*max, nonzero_bits);
233 /* Check that the range contains at least one valid value. */
234 if (wi::gt_p (*min, *max, sgn))
235 return VR_UNDEFINED;
237 *min = wi::round_up_for_mask (*min, nonzero_bits);
238 gcc_checking_assert (wi::le_p (*min, *max, sgn));
240 return vr_type;
243 /* Set value range VR to VR_UNDEFINED. */
245 static inline void
246 set_value_range_to_undefined (value_range *vr)
248 vr->type = VR_UNDEFINED;
249 vr->min = vr->max = NULL_TREE;
250 if (vr->equiv)
251 bitmap_clear (vr->equiv);
254 /* Set value range VR to VR_VARYING. */
256 void
257 set_value_range_to_varying (value_range *vr)
259 vr->type = VR_VARYING;
260 vr->min = vr->max = NULL_TREE;
261 if (vr->equiv)
262 bitmap_clear (vr->equiv);
265 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
267 void
268 set_value_range (value_range *vr, enum value_range_type t, tree min,
269 tree max, bitmap equiv)
271 /* Check the validity of the range. */
272 if (flag_checking
273 && (t == VR_RANGE || t == VR_ANTI_RANGE))
275 int cmp;
277 gcc_assert (min && max);
279 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
281 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
282 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
284 cmp = compare_values (min, max);
285 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
288 if (flag_checking
289 && (t == VR_UNDEFINED || t == VR_VARYING))
291 gcc_assert (min == NULL_TREE && max == NULL_TREE);
292 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
295 vr->type = t;
296 vr->min = min;
297 vr->max = max;
299 /* Since updating the equivalence set involves deep copying the
300 bitmaps, only do it if absolutely necessary.
302 All equivalence bitmaps are allocated from the same obstack. So
303 we can use the obstack associated with EQUIV to allocate vr->equiv. */
304 if (vr->equiv == NULL
305 && equiv != NULL)
306 vr->equiv = BITMAP_ALLOC (equiv->obstack);
308 if (equiv != vr->equiv)
310 if (equiv && !bitmap_empty_p (equiv))
311 bitmap_copy (vr->equiv, equiv);
312 else
313 bitmap_clear (vr->equiv);
318 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
319 This means adjusting T, MIN and MAX representing the case of a
320 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
321 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
322 In corner cases where MAX+1 or MIN-1 wraps this will fall back
323 to varying.
324 This routine exists to ease canonicalization in the case where we
325 extract ranges from var + CST op limit. */
327 void
328 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
329 tree min, tree max, bitmap equiv)
331 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
332 if (t == VR_UNDEFINED)
334 set_value_range_to_undefined (vr);
335 return;
337 else if (t == VR_VARYING)
339 set_value_range_to_varying (vr);
340 return;
343 /* Nothing to canonicalize for symbolic ranges. */
344 if (TREE_CODE (min) != INTEGER_CST
345 || TREE_CODE (max) != INTEGER_CST)
347 set_value_range (vr, t, min, max, equiv);
348 return;
351 /* Wrong order for min and max, to swap them and the VR type we need
352 to adjust them. */
353 if (tree_int_cst_lt (max, min))
355 tree one, tmp;
357 /* For one bit precision if max < min, then the swapped
358 range covers all values, so for VR_RANGE it is varying and
359 for VR_ANTI_RANGE empty range, so drop to varying as well. */
360 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
362 set_value_range_to_varying (vr);
363 return;
366 one = build_int_cst (TREE_TYPE (min), 1);
367 tmp = int_const_binop (PLUS_EXPR, max, one);
368 max = int_const_binop (MINUS_EXPR, min, one);
369 min = tmp;
371 /* There's one corner case, if we had [C+1, C] before we now have
372 that again. But this represents an empty value range, so drop
373 to varying in this case. */
374 if (tree_int_cst_lt (max, min))
376 set_value_range_to_varying (vr);
377 return;
380 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
383 /* Anti-ranges that can be represented as ranges should be so. */
384 if (t == VR_ANTI_RANGE)
386 /* For -fstrict-enums we may receive out-of-range ranges so consider
387 values < -INF and values > INF as -INF/INF as well. */
388 tree type = TREE_TYPE (min);
389 bool is_min = (INTEGRAL_TYPE_P (type)
390 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
391 bool is_max = (INTEGRAL_TYPE_P (type)
392 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
394 if (is_min && is_max)
396 /* We cannot deal with empty ranges, drop to varying.
397 ??? This could be VR_UNDEFINED instead. */
398 set_value_range_to_varying (vr);
399 return;
401 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
402 && (is_min || is_max))
404 /* Non-empty boolean ranges can always be represented
405 as a singleton range. */
406 if (is_min)
407 min = max = vrp_val_max (TREE_TYPE (min));
408 else
409 min = max = vrp_val_min (TREE_TYPE (min));
410 t = VR_RANGE;
412 else if (is_min
413 /* As a special exception preserve non-null ranges. */
414 && !(TYPE_UNSIGNED (TREE_TYPE (min))
415 && integer_zerop (max)))
417 tree one = build_int_cst (TREE_TYPE (max), 1);
418 min = int_const_binop (PLUS_EXPR, max, one);
419 max = vrp_val_max (TREE_TYPE (max));
420 t = VR_RANGE;
422 else if (is_max)
424 tree one = build_int_cst (TREE_TYPE (min), 1);
425 max = int_const_binop (MINUS_EXPR, min, one);
426 min = vrp_val_min (TREE_TYPE (min));
427 t = VR_RANGE;
431 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
432 to make sure VRP iteration terminates, otherwise we can get into
433 oscillations. */
435 set_value_range (vr, t, min, max, equiv);
438 /* Copy value range FROM into value range TO. */
440 void
441 copy_value_range (value_range *to, const value_range *from)
443 set_value_range (to, from->type, from->min, from->max, from->equiv);
446 /* Set value range VR to a single value. This function is only called
447 with values we get from statements, and exists to clear the
448 TREE_OVERFLOW flag. */
450 void
451 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
453 gcc_assert (is_gimple_min_invariant (val));
454 if (TREE_OVERFLOW_P (val))
455 val = drop_tree_overflow (val);
456 set_value_range (vr, VR_RANGE, val, val, equiv);
459 /* Set value range VR to a non-NULL range of type TYPE. */
461 void
462 set_value_range_to_nonnull (value_range *vr, tree type)
464 tree zero = build_int_cst (type, 0);
465 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
469 /* Set value range VR to a NULL range of type TYPE. */
471 void
472 set_value_range_to_null (value_range *vr, tree type)
474 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
477 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
479 bool
480 vrp_operand_equal_p (const_tree val1, const_tree val2)
482 if (val1 == val2)
483 return true;
484 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
485 return false;
486 return true;
489 /* Return true, if the bitmaps B1 and B2 are equal. */
491 bool
492 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
494 return (b1 == b2
495 || ((!b1 || bitmap_empty_p (b1))
496 && (!b2 || bitmap_empty_p (b2)))
497 || (b1 && b2
498 && bitmap_equal_p (b1, b2)));
501 /* Return true if VR is [0, 0]. */
503 static inline bool
504 range_is_null (const value_range *vr)
506 return vr->type == VR_RANGE
507 && integer_zerop (vr->min)
508 && integer_zerop (vr->max);
511 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
512 a singleton. */
514 bool
515 range_int_cst_p (const value_range *vr)
517 return (vr->type == VR_RANGE
518 && TREE_CODE (vr->max) == INTEGER_CST
519 && TREE_CODE (vr->min) == INTEGER_CST);
522 /* Return true if VR is a INTEGER_CST singleton. */
524 bool
525 range_int_cst_singleton_p (const value_range *vr)
527 return (range_int_cst_p (vr)
528 && tree_int_cst_equal (vr->min, vr->max));
531 /* Return true if value range VR involves at least one symbol. */
533 bool
534 symbolic_range_p (const value_range *vr)
536 return (!is_gimple_min_invariant (vr->min)
537 || !is_gimple_min_invariant (vr->max));
540 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
541 otherwise. We only handle additive operations and set NEG to true if the
542 symbol is negated and INV to the invariant part, if any. */
544 tree
545 get_single_symbol (tree t, bool *neg, tree *inv)
547 bool neg_;
548 tree inv_;
550 *inv = NULL_TREE;
551 *neg = false;
553 if (TREE_CODE (t) == PLUS_EXPR
554 || TREE_CODE (t) == POINTER_PLUS_EXPR
555 || TREE_CODE (t) == MINUS_EXPR)
557 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
559 neg_ = (TREE_CODE (t) == MINUS_EXPR);
560 inv_ = TREE_OPERAND (t, 0);
561 t = TREE_OPERAND (t, 1);
563 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
565 neg_ = false;
566 inv_ = TREE_OPERAND (t, 1);
567 t = TREE_OPERAND (t, 0);
569 else
570 return NULL_TREE;
572 else
574 neg_ = false;
575 inv_ = NULL_TREE;
578 if (TREE_CODE (t) == NEGATE_EXPR)
580 t = TREE_OPERAND (t, 0);
581 neg_ = !neg_;
584 if (TREE_CODE (t) != SSA_NAME)
585 return NULL_TREE;
587 if (inv_ && TREE_OVERFLOW_P (inv_))
588 inv_ = drop_tree_overflow (inv_);
590 *neg = neg_;
591 *inv = inv_;
592 return t;
595 /* The reverse operation: build a symbolic expression with TYPE
596 from symbol SYM, negated according to NEG, and invariant INV. */
598 static tree
599 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
601 const bool pointer_p = POINTER_TYPE_P (type);
602 tree t = sym;
604 if (neg)
605 t = build1 (NEGATE_EXPR, type, t);
607 if (integer_zerop (inv))
608 return t;
610 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
613 /* Return
614 1 if VAL < VAL2
615 0 if !(VAL < VAL2)
616 -2 if those are incomparable. */
618 operand_less_p (tree val, tree val2)
620 /* LT is folded faster than GE and others. Inline the common case. */
621 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
622 return tree_int_cst_lt (val, val2);
623 else
625 tree tcmp;
627 fold_defer_overflow_warnings ();
629 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
631 fold_undefer_and_ignore_overflow_warnings ();
633 if (!tcmp
634 || TREE_CODE (tcmp) != INTEGER_CST)
635 return -2;
637 if (!integer_zerop (tcmp))
638 return 1;
641 return 0;
644 /* Compare two values VAL1 and VAL2. Return
646 -2 if VAL1 and VAL2 cannot be compared at compile-time,
647 -1 if VAL1 < VAL2,
648 0 if VAL1 == VAL2,
649 +1 if VAL1 > VAL2, and
650 +2 if VAL1 != VAL2
652 This is similar to tree_int_cst_compare but supports pointer values
653 and values that cannot be compared at compile time.
655 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
656 true if the return value is only valid if we assume that signed
657 overflow is undefined. */
660 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
662 if (val1 == val2)
663 return 0;
665 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
666 both integers. */
667 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
668 == POINTER_TYPE_P (TREE_TYPE (val2)));
670 /* Convert the two values into the same type. This is needed because
671 sizetype causes sign extension even for unsigned types. */
672 val2 = fold_convert (TREE_TYPE (val1), val2);
673 STRIP_USELESS_TYPE_CONVERSION (val2);
675 const bool overflow_undefined
676 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
677 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
678 tree inv1, inv2;
679 bool neg1, neg2;
680 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
681 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
683 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
684 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
685 if (sym1 && sym2)
687 /* Both values must use the same name with the same sign. */
688 if (sym1 != sym2 || neg1 != neg2)
689 return -2;
691 /* [-]NAME + CST == [-]NAME + CST. */
692 if (inv1 == inv2)
693 return 0;
695 /* If overflow is defined we cannot simplify more. */
696 if (!overflow_undefined)
697 return -2;
699 if (strict_overflow_p != NULL
700 /* Symbolic range building sets TREE_NO_WARNING to declare
701 that overflow doesn't happen. */
702 && (!inv1 || !TREE_NO_WARNING (val1))
703 && (!inv2 || !TREE_NO_WARNING (val2)))
704 *strict_overflow_p = true;
706 if (!inv1)
707 inv1 = build_int_cst (TREE_TYPE (val1), 0);
708 if (!inv2)
709 inv2 = build_int_cst (TREE_TYPE (val2), 0);
711 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
712 TYPE_SIGN (TREE_TYPE (val1)));
715 const bool cst1 = is_gimple_min_invariant (val1);
716 const bool cst2 = is_gimple_min_invariant (val2);
718 /* If one is of the form '[-]NAME + CST' and the other is constant, then
719 it might be possible to say something depending on the constants. */
720 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
722 if (!overflow_undefined)
723 return -2;
725 if (strict_overflow_p != NULL
726 /* Symbolic range building sets TREE_NO_WARNING to declare
727 that overflow doesn't happen. */
728 && (!sym1 || !TREE_NO_WARNING (val1))
729 && (!sym2 || !TREE_NO_WARNING (val2)))
730 *strict_overflow_p = true;
732 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
733 tree cst = cst1 ? val1 : val2;
734 tree inv = cst1 ? inv2 : inv1;
736 /* Compute the difference between the constants. If it overflows or
737 underflows, this means that we can trivially compare the NAME with
738 it and, consequently, the two values with each other. */
739 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
740 if (wi::cmp (0, wi::to_wide (inv), sgn)
741 != wi::cmp (diff, wi::to_wide (cst), sgn))
743 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
744 return cst1 ? res : -res;
747 return -2;
750 /* We cannot say anything more for non-constants. */
751 if (!cst1 || !cst2)
752 return -2;
754 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
756 /* We cannot compare overflowed values. */
757 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
758 return -2;
760 if (TREE_CODE (val1) == INTEGER_CST
761 && TREE_CODE (val2) == INTEGER_CST)
762 return tree_int_cst_compare (val1, val2);
764 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
766 if (known_eq (wi::to_poly_widest (val1),
767 wi::to_poly_widest (val2)))
768 return 0;
769 if (known_lt (wi::to_poly_widest (val1),
770 wi::to_poly_widest (val2)))
771 return -1;
772 if (known_gt (wi::to_poly_widest (val1),
773 wi::to_poly_widest (val2)))
774 return 1;
777 return -2;
779 else
781 tree t;
783 /* First see if VAL1 and VAL2 are not the same. */
784 if (val1 == val2 || operand_equal_p (val1, val2, 0))
785 return 0;
787 /* If VAL1 is a lower address than VAL2, return -1. */
788 if (operand_less_p (val1, val2) == 1)
789 return -1;
791 /* If VAL1 is a higher address than VAL2, return +1. */
792 if (operand_less_p (val2, val1) == 1)
793 return 1;
795 /* If VAL1 is different than VAL2, return +2.
796 For integer constants we either have already returned -1 or 1
797 or they are equivalent. We still might succeed in proving
798 something about non-trivial operands. */
799 if (TREE_CODE (val1) != INTEGER_CST
800 || TREE_CODE (val2) != INTEGER_CST)
802 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
803 if (t && integer_onep (t))
804 return 2;
807 return -2;
811 /* Compare values like compare_values_warnv. */
814 compare_values (tree val1, tree val2)
816 bool sop;
817 return compare_values_warnv (val1, val2, &sop);
821 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
822 0 if VAL is not inside [MIN, MAX],
823 -2 if we cannot tell either way.
825 Benchmark compile/20001226-1.c compilation time after changing this
826 function. */
829 value_inside_range (tree val, tree min, tree max)
831 int cmp1, cmp2;
833 cmp1 = operand_less_p (val, min);
834 if (cmp1 == -2)
835 return -2;
836 if (cmp1 == 1)
837 return 0;
839 cmp2 = operand_less_p (max, val);
840 if (cmp2 == -2)
841 return -2;
843 return !cmp2;
847 /* Return TRUE if *VR includes the value zero. */
849 bool
850 range_includes_zero_p (const value_range *vr)
852 if (vr->type == VR_VARYING)
853 return true;
855 /* Ughh, we don't know. We choose not to optimize. */
856 if (vr->type == VR_UNDEFINED)
857 return true;
859 tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
860 if (vr->type == VR_ANTI_RANGE)
862 int res = value_inside_range (zero, vr->min, vr->max);
863 return res == 0 || res == -2;
865 return value_inside_range (zero, vr->min, vr->max) != 0;
868 /* If *VR has a value rante that is a single constant value return that,
869 otherwise return NULL_TREE. */
871 tree
872 value_range_constant_singleton (const value_range *vr)
874 if (vr->type == VR_RANGE
875 && vrp_operand_equal_p (vr->min, vr->max)
876 && is_gimple_min_invariant (vr->min))
877 return vr->min;
879 return NULL_TREE;
882 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
884 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
886 Return TRUE if VR was a constant range and we were able to compute
887 the bit masks. */
889 bool
890 vrp_set_zero_nonzero_bits (const tree expr_type,
891 const value_range *vr,
892 wide_int *may_be_nonzero,
893 wide_int *must_be_nonzero)
895 if (!range_int_cst_p (vr))
897 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
898 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
899 return false;
901 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
902 wi::to_wide (vr->min),
903 wi::to_wide (vr->max),
904 *may_be_nonzero, *must_be_nonzero);
905 return true;
908 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
909 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
910 false otherwise. If *AR can be represented with a single range
911 *VR1 will be VR_UNDEFINED. */
913 static bool
914 ranges_from_anti_range (const value_range *ar,
915 value_range *vr0, value_range *vr1)
917 tree type = TREE_TYPE (ar->min);
919 vr0->type = VR_UNDEFINED;
920 vr1->type = VR_UNDEFINED;
922 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
923 [A+1, +INF]. Not sure if this helps in practice, though. */
925 if (ar->type != VR_ANTI_RANGE
926 || TREE_CODE (ar->min) != INTEGER_CST
927 || TREE_CODE (ar->max) != INTEGER_CST
928 || !vrp_val_min (type)
929 || !vrp_val_max (type))
930 return false;
932 if (!vrp_val_is_min (ar->min))
934 vr0->type = VR_RANGE;
935 vr0->min = vrp_val_min (type);
936 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
938 if (!vrp_val_is_max (ar->max))
940 vr1->type = VR_RANGE;
941 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
942 vr1->max = vrp_val_max (type);
944 if (vr0->type == VR_UNDEFINED)
946 *vr0 = *vr1;
947 vr1->type = VR_UNDEFINED;
950 return vr0->type != VR_UNDEFINED;
953 /* Extract the components of a value range into a pair of wide ints in
954 [WMIN, WMAX].
956 If the value range is anything but a VR_*RANGE of constants, the
957 resulting wide ints are set to [-MIN, +MAX] for the type. */
959 static void inline
960 extract_range_into_wide_ints (const value_range *vr,
961 signop sign, unsigned prec,
962 wide_int &wmin, wide_int &wmax)
964 if ((vr->type == VR_RANGE
965 || vr->type == VR_ANTI_RANGE)
966 && TREE_CODE (vr->min) == INTEGER_CST
967 && TREE_CODE (vr->max) == INTEGER_CST)
969 wmin = wi::to_wide (vr->min);
970 wmax = wi::to_wide (vr->max);
972 else
974 wmin = wi::min_value (prec, sign);
975 wmax = wi::max_value (prec, sign);
979 /* Value range wrapper for wide_int_range_multiplicative_op:
981 *VR = *VR0 .CODE. *VR1. */
983 static void
984 extract_range_from_multiplicative_op (value_range *vr,
985 enum tree_code code,
986 const value_range *vr0,
987 const value_range *vr1)
989 gcc_assert (code == MULT_EXPR
990 || code == TRUNC_DIV_EXPR
991 || code == FLOOR_DIV_EXPR
992 || code == CEIL_DIV_EXPR
993 || code == EXACT_DIV_EXPR
994 || code == ROUND_DIV_EXPR
995 || code == RSHIFT_EXPR
996 || code == LSHIFT_EXPR);
997 gcc_assert (vr0->type == VR_RANGE && vr0->type == vr1->type);
999 tree type = TREE_TYPE (vr0->min);
1000 wide_int res_lb, res_ub;
1001 wide_int vr0_lb = wi::to_wide (vr0->min);
1002 wide_int vr0_ub = wi::to_wide (vr0->max);
1003 wide_int vr1_lb = wi::to_wide (vr1->min);
1004 wide_int vr1_ub = wi::to_wide (vr1->max);
1005 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1006 bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
1007 unsigned prec = TYPE_PRECISION (type);
1009 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1010 code, TYPE_SIGN (type), prec,
1011 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1012 overflow_undefined, overflow_wraps))
1013 set_and_canonicalize_value_range (vr, VR_RANGE,
1014 wide_int_to_tree (type, res_lb),
1015 wide_int_to_tree (type, res_ub), NULL);
1016 else
1017 set_value_range_to_varying (vr);
1020 /* If BOUND will include a symbolic bound, adjust it accordingly,
1021 otherwise leave it as is.
1023 CODE is the original operation that combined the bounds (PLUS_EXPR
1024 or MINUS_EXPR).
1026 TYPE is the type of the original operation.
1028 SYM_OPn is the symbolic for OPn if it has a symbolic.
1030 NEG_OPn is TRUE if the OPn was negated. */
1032 static void
1033 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1034 tree sym_op0, tree sym_op1,
1035 bool neg_op0, bool neg_op1)
1037 bool minus_p = (code == MINUS_EXPR);
1038 /* If the result bound is constant, we're done; otherwise, build the
1039 symbolic lower bound. */
1040 if (sym_op0 == sym_op1)
1042 else if (sym_op0)
1043 bound = build_symbolic_expr (type, sym_op0,
1044 neg_op0, bound);
1045 else if (sym_op1)
1047 /* We may not negate if that might introduce
1048 undefined overflow. */
1049 if (!minus_p
1050 || neg_op1
1051 || TYPE_OVERFLOW_WRAPS (type))
1052 bound = build_symbolic_expr (type, sym_op1,
1053 neg_op1 ^ minus_p, bound);
1054 else
1055 bound = NULL_TREE;
1059 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1060 int bound according to CODE. CODE is the operation combining the
1061 bound (either a PLUS_EXPR or a MINUS_EXPR).
1063 TYPE is the type of the combine operation.
1065 WI is the wide int to store the result.
1067 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1068 if over/underflow occurred. */
1070 static void
1071 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1072 tree type, tree op0, tree op1)
1074 bool minus_p = (code == MINUS_EXPR);
1075 const signop sgn = TYPE_SIGN (type);
1076 const unsigned int prec = TYPE_PRECISION (type);
1078 /* Combine the bounds, if any. */
1079 if (op0 && op1)
1081 if (minus_p)
1082 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1083 else
1084 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1086 else if (op0)
1087 wi = wi::to_wide (op0);
1088 else if (op1)
1090 if (minus_p)
1091 wi = wi::neg (wi::to_wide (op1), &ovf);
1092 else
1093 wi = wi::to_wide (op1);
1095 else
1096 wi = wi::shwi (0, prec);
1099 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1100 put the result in VR.
1102 TYPE is the type of the range.
1104 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1105 occurred while originally calculating WMIN or WMAX. -1 indicates
1106 underflow. +1 indicates overflow. 0 indicates neither. */
1108 static void
1109 set_value_range_with_overflow (value_range &vr,
1110 tree type,
1111 const wide_int &wmin, const wide_int &wmax,
1112 wi::overflow_type min_ovf,
1113 wi::overflow_type max_ovf)
1115 const signop sgn = TYPE_SIGN (type);
1116 const unsigned int prec = TYPE_PRECISION (type);
1117 vr.type = VR_RANGE;
1118 vr.equiv = NULL;
1119 if (TYPE_OVERFLOW_WRAPS (type))
1121 /* If overflow wraps, truncate the values and adjust the
1122 range kind and bounds appropriately. */
1123 wide_int tmin = wide_int::from (wmin, prec, sgn);
1124 wide_int tmax = wide_int::from (wmax, prec, sgn);
1125 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1127 /* No overflow or both overflow or underflow. The
1128 range kind stays VR_RANGE. */
1129 vr.min = wide_int_to_tree (type, tmin);
1130 vr.max = wide_int_to_tree (type, tmax);
1132 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1133 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1135 /* Min underflow or max overflow. The range kind
1136 changes to VR_ANTI_RANGE. */
1137 bool covers = false;
1138 wide_int tem = tmin;
1139 vr.type = VR_ANTI_RANGE;
1140 tmin = tmax + 1;
1141 if (wi::cmp (tmin, tmax, sgn) < 0)
1142 covers = true;
1143 tmax = tem - 1;
1144 if (wi::cmp (tmax, tem, sgn) > 0)
1145 covers = true;
1146 /* If the anti-range would cover nothing, drop to varying.
1147 Likewise if the anti-range bounds are outside of the
1148 types values. */
1149 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1151 set_value_range_to_varying (&vr);
1152 return;
1154 vr.min = wide_int_to_tree (type, tmin);
1155 vr.max = wide_int_to_tree (type, tmax);
1157 else
1159 /* Other underflow and/or overflow, drop to VR_VARYING. */
1160 set_value_range_to_varying (&vr);
1161 return;
1164 else
1166 /* If overflow does not wrap, saturate to the types min/max
1167 value. */
1168 wide_int type_min = wi::min_value (prec, sgn);
1169 wide_int type_max = wi::max_value (prec, sgn);
1170 if (min_ovf == wi::OVF_UNDERFLOW)
1171 vr.min = wide_int_to_tree (type, type_min);
1172 else if (min_ovf == wi::OVF_OVERFLOW)
1173 vr.min = wide_int_to_tree (type, type_max);
1174 else
1175 vr.min = wide_int_to_tree (type, wmin);
1177 if (max_ovf == wi::OVF_UNDERFLOW)
1178 vr.max = wide_int_to_tree (type, type_min);
1179 else if (max_ovf == wi::OVF_OVERFLOW)
1180 vr.max = wide_int_to_tree (type, type_max);
1181 else
1182 vr.max = wide_int_to_tree (type, wmax);
1186 /* Extract range information from a binary operation CODE based on
1187 the ranges of each of its operands *VR0 and *VR1 with resulting
1188 type EXPR_TYPE. The resulting range is stored in *VR. */
1190 void
1191 extract_range_from_binary_expr_1 (value_range *vr,
1192 enum tree_code code, tree expr_type,
1193 const value_range *vr0_,
1194 const value_range *vr1_)
1196 signop sign = TYPE_SIGN (expr_type);
1197 unsigned int prec = TYPE_PRECISION (expr_type);
1198 value_range vr0 = *vr0_, vr1 = *vr1_;
1199 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1200 enum value_range_type type;
1201 tree min = NULL_TREE, max = NULL_TREE;
1202 int cmp;
1204 if (!INTEGRAL_TYPE_P (expr_type)
1205 && !POINTER_TYPE_P (expr_type))
1207 set_value_range_to_varying (vr);
1208 return;
1211 /* Not all binary expressions can be applied to ranges in a
1212 meaningful way. Handle only arithmetic operations. */
1213 if (code != PLUS_EXPR
1214 && code != MINUS_EXPR
1215 && code != POINTER_PLUS_EXPR
1216 && code != MULT_EXPR
1217 && code != TRUNC_DIV_EXPR
1218 && code != FLOOR_DIV_EXPR
1219 && code != CEIL_DIV_EXPR
1220 && code != EXACT_DIV_EXPR
1221 && code != ROUND_DIV_EXPR
1222 && code != TRUNC_MOD_EXPR
1223 && code != RSHIFT_EXPR
1224 && code != LSHIFT_EXPR
1225 && code != MIN_EXPR
1226 && code != MAX_EXPR
1227 && code != BIT_AND_EXPR
1228 && code != BIT_IOR_EXPR
1229 && code != BIT_XOR_EXPR)
1231 set_value_range_to_varying (vr);
1232 return;
1235 /* If both ranges are UNDEFINED, so is the result. */
1236 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1238 set_value_range_to_undefined (vr);
1239 return;
1241 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1242 code. At some point we may want to special-case operations that
1243 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1244 operand. */
1245 else if (vr0.type == VR_UNDEFINED)
1246 set_value_range_to_varying (&vr0);
1247 else if (vr1.type == VR_UNDEFINED)
1248 set_value_range_to_varying (&vr1);
1250 /* We get imprecise results from ranges_from_anti_range when
1251 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1252 range, but then we also need to hack up vrp_meet. It's just
1253 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1254 if (code == EXACT_DIV_EXPR
1255 && vr0.type == VR_ANTI_RANGE
1256 && vr0.min == vr0.max
1257 && integer_zerop (vr0.min))
1259 set_value_range_to_nonnull (vr, expr_type);
1260 return;
1263 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1264 and express ~[] op X as ([]' op X) U ([]'' op X). */
1265 if (vr0.type == VR_ANTI_RANGE
1266 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1268 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1269 if (vrtem1.type != VR_UNDEFINED)
1271 value_range vrres = VR_INITIALIZER;
1272 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1273 &vrtem1, vr1_);
1274 vrp_meet (vr, &vrres);
1276 return;
1278 /* Likewise for X op ~[]. */
1279 if (vr1.type == VR_ANTI_RANGE
1280 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1282 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1283 if (vrtem1.type != VR_UNDEFINED)
1285 value_range vrres = VR_INITIALIZER;
1286 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1287 vr0_, &vrtem1);
1288 vrp_meet (vr, &vrres);
1290 return;
1293 /* The type of the resulting value range defaults to VR0.TYPE. */
1294 type = vr0.type;
1296 /* Refuse to operate on VARYING ranges, ranges of different kinds
1297 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1298 because we may be able to derive a useful range even if one of
1299 the operands is VR_VARYING or symbolic range. Similarly for
1300 divisions, MIN/MAX and PLUS/MINUS.
1302 TODO, we may be able to derive anti-ranges in some cases. */
1303 if (code != BIT_AND_EXPR
1304 && code != BIT_IOR_EXPR
1305 && code != TRUNC_DIV_EXPR
1306 && code != FLOOR_DIV_EXPR
1307 && code != CEIL_DIV_EXPR
1308 && code != EXACT_DIV_EXPR
1309 && code != ROUND_DIV_EXPR
1310 && code != TRUNC_MOD_EXPR
1311 && code != MIN_EXPR
1312 && code != MAX_EXPR
1313 && code != PLUS_EXPR
1314 && code != MINUS_EXPR
1315 && code != RSHIFT_EXPR
1316 && code != POINTER_PLUS_EXPR
1317 && (vr0.type == VR_VARYING
1318 || vr1.type == VR_VARYING
1319 || vr0.type != vr1.type
1320 || symbolic_range_p (&vr0)
1321 || symbolic_range_p (&vr1)))
1323 set_value_range_to_varying (vr);
1324 return;
1327 /* Now evaluate the expression to determine the new range. */
1328 if (POINTER_TYPE_P (expr_type))
1330 if (code == MIN_EXPR || code == MAX_EXPR)
1332 /* For MIN/MAX expressions with pointers, we only care about
1333 nullness, if both are non null, then the result is nonnull.
1334 If both are null, then the result is null. Otherwise they
1335 are varying. */
1336 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1337 set_value_range_to_nonnull (vr, expr_type);
1338 else if (range_is_null (&vr0) && range_is_null (&vr1))
1339 set_value_range_to_null (vr, expr_type);
1340 else
1341 set_value_range_to_varying (vr);
1343 else if (code == POINTER_PLUS_EXPR)
1345 /* For pointer types, we are really only interested in asserting
1346 whether the expression evaluates to non-NULL. */
1347 if (!range_includes_zero_p (&vr0)
1348 || !range_includes_zero_p (&vr1))
1349 set_value_range_to_nonnull (vr, expr_type);
1350 else if (range_is_null (&vr0) && range_is_null (&vr1))
1351 set_value_range_to_null (vr, expr_type);
1352 else
1353 set_value_range_to_varying (vr);
1355 else if (code == BIT_AND_EXPR)
1357 /* For pointer types, we are really only interested in asserting
1358 whether the expression evaluates to non-NULL. */
1359 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1360 set_value_range_to_nonnull (vr, expr_type);
1361 else if (range_is_null (&vr0) || range_is_null (&vr1))
1362 set_value_range_to_null (vr, expr_type);
1363 else
1364 set_value_range_to_varying (vr);
1366 else
1367 set_value_range_to_varying (vr);
1369 return;
1372 /* For integer ranges, apply the operation to each end of the
1373 range and see what we end up with. */
1374 if (code == PLUS_EXPR || code == MINUS_EXPR)
1376 /* This will normalize things such that calculating
1377 [0,0] - VR_VARYING is not dropped to varying, but is
1378 calculated as [MIN+1, MAX]. */
1379 if (vr0.type == VR_VARYING)
1381 vr0.type = VR_RANGE;
1382 vr0.min = vrp_val_min (expr_type);
1383 vr0.max = vrp_val_max (expr_type);
1385 if (vr1.type == VR_VARYING)
1387 vr1.type = VR_RANGE;
1388 vr1.min = vrp_val_min (expr_type);
1389 vr1.max = vrp_val_max (expr_type);
1392 const bool minus_p = (code == MINUS_EXPR);
1393 tree min_op0 = vr0.min;
1394 tree min_op1 = minus_p ? vr1.max : vr1.min;
1395 tree max_op0 = vr0.max;
1396 tree max_op1 = minus_p ? vr1.min : vr1.max;
1397 tree sym_min_op0 = NULL_TREE;
1398 tree sym_min_op1 = NULL_TREE;
1399 tree sym_max_op0 = NULL_TREE;
1400 tree sym_max_op1 = NULL_TREE;
1401 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1403 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1405 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1406 single-symbolic ranges, try to compute the precise resulting range,
1407 but only if we know that this resulting range will also be constant
1408 or single-symbolic. */
1409 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1410 && (TREE_CODE (min_op0) == INTEGER_CST
1411 || (sym_min_op0
1412 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1413 && (TREE_CODE (min_op1) == INTEGER_CST
1414 || (sym_min_op1
1415 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1416 && (!(sym_min_op0 && sym_min_op1)
1417 || (sym_min_op0 == sym_min_op1
1418 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1419 && (TREE_CODE (max_op0) == INTEGER_CST
1420 || (sym_max_op0
1421 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1422 && (TREE_CODE (max_op1) == INTEGER_CST
1423 || (sym_max_op1
1424 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1425 && (!(sym_max_op0 && sym_max_op1)
1426 || (sym_max_op0 == sym_max_op1
1427 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1429 wide_int wmin, wmax;
1430 wi::overflow_type min_ovf = wi::OVF_NONE;
1431 wi::overflow_type max_ovf = wi::OVF_NONE;
1433 /* Build the bounds. */
1434 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1435 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1437 /* If we have overflow for the constant part and the resulting
1438 range will be symbolic, drop to VR_VARYING. */
1439 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1440 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1442 set_value_range_to_varying (vr);
1443 return;
1446 /* Adjust the range for possible overflow. */
1447 set_value_range_with_overflow (*vr, expr_type,
1448 wmin, wmax, min_ovf, max_ovf);
1449 if (vr->type == VR_VARYING)
1450 return;
1452 /* Build the symbolic bounds if needed. */
1453 adjust_symbolic_bound (vr->min, code, expr_type,
1454 sym_min_op0, sym_min_op1,
1455 neg_min_op0, neg_min_op1);
1456 adjust_symbolic_bound (vr->max, code, expr_type,
1457 sym_max_op0, sym_max_op1,
1458 neg_max_op0, neg_max_op1);
1459 /* ?? It would probably be cleaner to eliminate min/max/type
1460 entirely and hold these values in VR directly. */
1461 min = vr->min;
1462 max = vr->max;
1463 type = vr->type;
1465 else
1467 /* For other cases, for example if we have a PLUS_EXPR with two
1468 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1469 to compute a precise range for such a case.
1470 ??? General even mixed range kind operations can be expressed
1471 by for example transforming ~[3, 5] + [1, 2] to range-only
1472 operations and a union primitive:
1473 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1474 [-INF+1, 4] U [6, +INF(OVF)]
1475 though usually the union is not exactly representable with
1476 a single range or anti-range as the above is
1477 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1478 but one could use a scheme similar to equivalences for this. */
1479 set_value_range_to_varying (vr);
1480 return;
1483 else if (code == MIN_EXPR
1484 || code == MAX_EXPR)
1486 wide_int wmin, wmax;
1487 wide_int vr0_min, vr0_max;
1488 wide_int vr1_min, vr1_max;
1489 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1490 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1491 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1492 vr0_min, vr0_max, vr1_min, vr1_max))
1493 set_value_range (vr, VR_RANGE,
1494 wide_int_to_tree (expr_type, wmin),
1495 wide_int_to_tree (expr_type, wmax), NULL);
1496 else
1497 set_value_range_to_varying (vr);
1498 return;
1500 else if (code == MULT_EXPR)
1502 if (!range_int_cst_p (&vr0)
1503 || !range_int_cst_p (&vr1))
1505 set_value_range_to_varying (vr);
1506 return;
1508 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1509 return;
1511 else if (code == RSHIFT_EXPR
1512 || code == LSHIFT_EXPR)
1514 if (range_int_cst_p (&vr1)
1515 && !wide_int_range_shift_undefined_p (prec,
1516 wi::to_wide (vr1.min),
1517 wi::to_wide (vr1.max)))
1519 if (code == RSHIFT_EXPR)
1521 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1522 useful ranges just from the shift count. E.g.
1523 x >> 63 for signed 64-bit x is always [-1, 0]. */
1524 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1526 vr0.type = type = VR_RANGE;
1527 vr0.min = vrp_val_min (expr_type);
1528 vr0.max = vrp_val_max (expr_type);
1530 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1531 return;
1533 else if (code == LSHIFT_EXPR
1534 && range_int_cst_p (&vr0))
1536 wide_int res_lb, res_ub;
1537 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1538 wi::to_wide (vr0.min),
1539 wi::to_wide (vr0.max),
1540 wi::to_wide (vr1.min),
1541 wi::to_wide (vr1.max),
1542 TYPE_OVERFLOW_UNDEFINED (expr_type),
1543 TYPE_OVERFLOW_WRAPS (expr_type)))
1545 min = wide_int_to_tree (expr_type, res_lb);
1546 max = wide_int_to_tree (expr_type, res_ub);
1547 set_and_canonicalize_value_range (vr, VR_RANGE,
1548 min, max, NULL);
1549 return;
1553 set_value_range_to_varying (vr);
1554 return;
1556 else if (code == TRUNC_DIV_EXPR
1557 || code == FLOOR_DIV_EXPR
1558 || code == CEIL_DIV_EXPR
1559 || code == EXACT_DIV_EXPR
1560 || code == ROUND_DIV_EXPR)
1562 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1563 wide_int wmin, wmax, extra_min, extra_max;
1564 bool extra_range_p;
1566 /* Special case explicit division by zero as undefined. */
1567 if (range_is_null (&vr1))
1569 set_value_range_to_undefined (vr);
1570 return;
1573 /* First, normalize ranges into constants we can handle. Note
1574 that VR_ANTI_RANGE's of constants were already normalized
1575 before arriving here.
1577 NOTE: As a future improvement, we may be able to do better
1578 with mixed symbolic (anti-)ranges like [0, A]. See note in
1579 ranges_from_anti_range. */
1580 extract_range_into_wide_ints (&vr0, sign, prec,
1581 dividend_min, dividend_max);
1582 extract_range_into_wide_ints (&vr1, sign, prec,
1583 divisor_min, divisor_max);
1584 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1585 dividend_min, dividend_max,
1586 divisor_min, divisor_max,
1587 TYPE_OVERFLOW_UNDEFINED (expr_type),
1588 TYPE_OVERFLOW_WRAPS (expr_type),
1589 extra_range_p, extra_min, extra_max))
1591 set_value_range_to_varying (vr);
1592 return;
1594 set_value_range (vr, VR_RANGE,
1595 wide_int_to_tree (expr_type, wmin),
1596 wide_int_to_tree (expr_type, wmax), NULL);
1597 if (extra_range_p)
1599 value_range extra_range = VR_INITIALIZER;
1600 set_value_range (&extra_range, VR_RANGE,
1601 wide_int_to_tree (expr_type, extra_min),
1602 wide_int_to_tree (expr_type, extra_max), NULL);
1603 vrp_meet (vr, &extra_range);
1605 return;
1607 else if (code == TRUNC_MOD_EXPR)
1609 if (range_is_null (&vr1))
1611 set_value_range_to_undefined (vr);
1612 return;
1614 wide_int wmin, wmax, tmp;
1615 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1616 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1617 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1618 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1619 vr0_min, vr0_max, vr1_min, vr1_max);
1620 min = wide_int_to_tree (expr_type, wmin);
1621 max = wide_int_to_tree (expr_type, wmax);
1622 set_value_range (vr, VR_RANGE, min, max, NULL);
1623 return;
1625 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1627 wide_int may_be_nonzero0, may_be_nonzero1;
1628 wide_int must_be_nonzero0, must_be_nonzero1;
1629 wide_int wmin, wmax;
1630 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1631 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1632 &may_be_nonzero0, &must_be_nonzero0);
1633 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1634 &may_be_nonzero1, &must_be_nonzero1);
1635 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1636 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1637 if (code == BIT_AND_EXPR)
1639 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1640 vr0_min, vr0_max,
1641 vr1_min, vr1_max,
1642 must_be_nonzero0,
1643 may_be_nonzero0,
1644 must_be_nonzero1,
1645 may_be_nonzero1))
1647 min = wide_int_to_tree (expr_type, wmin);
1648 max = wide_int_to_tree (expr_type, wmax);
1649 set_value_range (vr, VR_RANGE, min, max, NULL);
1651 else
1652 set_value_range_to_varying (vr);
1653 return;
1655 else if (code == BIT_IOR_EXPR)
1657 if (wide_int_range_bit_ior (wmin, wmax, sign,
1658 vr0_min, vr0_max,
1659 vr1_min, vr1_max,
1660 must_be_nonzero0,
1661 may_be_nonzero0,
1662 must_be_nonzero1,
1663 may_be_nonzero1))
1665 min = wide_int_to_tree (expr_type, wmin);
1666 max = wide_int_to_tree (expr_type, wmax);
1667 set_value_range (vr, VR_RANGE, min, max, NULL);
1669 else
1670 set_value_range_to_varying (vr);
1671 return;
1673 else if (code == BIT_XOR_EXPR)
1675 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
1676 must_be_nonzero0,
1677 may_be_nonzero0,
1678 must_be_nonzero1,
1679 may_be_nonzero1))
1681 min = wide_int_to_tree (expr_type, wmin);
1682 max = wide_int_to_tree (expr_type, wmax);
1683 set_value_range (vr, VR_RANGE, min, max, NULL);
1685 else
1686 set_value_range_to_varying (vr);
1687 return;
1690 else
1691 gcc_unreachable ();
1693 /* If either MIN or MAX overflowed, then set the resulting range to
1694 VARYING. */
1695 if (min == NULL_TREE
1696 || TREE_OVERFLOW_P (min)
1697 || max == NULL_TREE
1698 || TREE_OVERFLOW_P (max))
1700 set_value_range_to_varying (vr);
1701 return;
1704 /* We punt for [-INF, +INF].
1705 We learn nothing when we have INF on both sides.
1706 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1707 if (vrp_val_is_min (min) && vrp_val_is_max (max))
1709 set_value_range_to_varying (vr);
1710 return;
1713 cmp = compare_values (min, max);
1714 if (cmp == -2 || cmp == 1)
1716 /* If the new range has its limits swapped around (MIN > MAX),
1717 then the operation caused one of them to wrap around, mark
1718 the new range VARYING. */
1719 set_value_range_to_varying (vr);
1721 else
1722 set_value_range (vr, type, min, max, NULL);
1725 /* Extract range information from a unary operation CODE based on
1726 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
1727 The resulting range is stored in *VR. */
1729 void
1730 extract_range_from_unary_expr (value_range *vr,
1731 enum tree_code code, tree type,
1732 const value_range *vr0_, tree op0_type)
1734 signop sign = TYPE_SIGN (type);
1735 unsigned int prec = TYPE_PRECISION (type);
1736 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1738 /* VRP only operates on integral and pointer types. */
1739 if (!(INTEGRAL_TYPE_P (op0_type)
1740 || POINTER_TYPE_P (op0_type))
1741 || !(INTEGRAL_TYPE_P (type)
1742 || POINTER_TYPE_P (type)))
1744 set_value_range_to_varying (vr);
1745 return;
1748 /* If VR0 is UNDEFINED, so is the result. */
1749 if (vr0.type == VR_UNDEFINED)
1751 set_value_range_to_undefined (vr);
1752 return;
1755 /* Handle operations that we express in terms of others. */
1756 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
1758 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
1759 copy_value_range (vr, &vr0);
1760 return;
1762 else if (code == NEGATE_EXPR)
1764 /* -X is simply 0 - X, so re-use existing code that also handles
1765 anti-ranges fine. */
1766 value_range zero = VR_INITIALIZER;
1767 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
1768 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
1769 return;
1771 else if (code == BIT_NOT_EXPR)
1773 /* ~X is simply -1 - X, so re-use existing code that also handles
1774 anti-ranges fine. */
1775 value_range minusone = VR_INITIALIZER;
1776 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
1777 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
1778 type, &minusone, &vr0);
1779 return;
1782 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1783 and express op ~[] as (op []') U (op []''). */
1784 if (vr0.type == VR_ANTI_RANGE
1785 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1787 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
1788 if (vrtem1.type != VR_UNDEFINED)
1790 value_range vrres = VR_INITIALIZER;
1791 extract_range_from_unary_expr (&vrres, code, type,
1792 &vrtem1, op0_type);
1793 vrp_meet (vr, &vrres);
1795 return;
1798 if (CONVERT_EXPR_CODE_P (code))
1800 tree inner_type = op0_type;
1801 tree outer_type = type;
1803 /* If the expression evaluates to a pointer, we are only interested in
1804 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
1805 if (POINTER_TYPE_P (type))
1807 if (!range_includes_zero_p (&vr0))
1808 set_value_range_to_nonnull (vr, type);
1809 else if (range_is_null (&vr0))
1810 set_value_range_to_null (vr, type);
1811 else
1812 set_value_range_to_varying (vr);
1813 return;
1816 /* We normalize everything to a VR_RANGE, but for constant
1817 anti-ranges we must handle them by leaving the final result
1818 as an anti range. This allows us to convert things like
1819 ~[0,5] seamlessly. */
1820 value_range_type vr_type = VR_RANGE;
1821 if (vr0.type == VR_ANTI_RANGE
1822 && TREE_CODE (vr0.min) == INTEGER_CST
1823 && TREE_CODE (vr0.max) == INTEGER_CST)
1824 vr_type = VR_ANTI_RANGE;
1826 /* NOTES: Previously we were returning VARYING for all symbolics, but
1827 we can do better by treating them as [-MIN, +MAX]. For
1828 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
1829 we can return: ~[0x8000000, 0xffffffff7fffffff].
1831 We were also failing to convert ~[0,0] from char* to unsigned,
1832 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
1833 wide_int vr0_min, vr0_max, wmin, wmax;
1834 signop inner_sign = TYPE_SIGN (inner_type);
1835 signop outer_sign = TYPE_SIGN (outer_type);
1836 unsigned inner_prec = TYPE_PRECISION (inner_type);
1837 unsigned outer_prec = TYPE_PRECISION (outer_type);
1838 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
1839 vr0_min, vr0_max);
1840 if (wide_int_range_convert (wmin, wmax,
1841 inner_sign, inner_prec,
1842 outer_sign, outer_prec,
1843 vr0_min, vr0_max))
1845 tree min = wide_int_to_tree (outer_type, wmin);
1846 tree max = wide_int_to_tree (outer_type, wmax);
1847 set_and_canonicalize_value_range (vr, vr_type, min, max, NULL);
1849 else
1850 set_value_range_to_varying (vr);
1851 return;
1853 else if (code == ABS_EXPR)
1855 wide_int wmin, wmax;
1856 wide_int vr0_min, vr0_max;
1857 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1858 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
1859 TYPE_OVERFLOW_UNDEFINED (type)))
1860 set_value_range (vr, VR_RANGE,
1861 wide_int_to_tree (type, wmin),
1862 wide_int_to_tree (type, wmax), NULL);
1863 else
1864 set_value_range_to_varying (vr);
1865 return;
1868 /* For unhandled operations fall back to varying. */
1869 set_value_range_to_varying (vr);
1870 return;
1873 /* Debugging dumps. */
1875 void dump_value_range (FILE *, const value_range *);
1876 void debug_value_range (const value_range *);
1877 void dump_all_value_ranges (FILE *);
1878 void dump_vr_equiv (FILE *, bitmap);
1879 void debug_vr_equiv (bitmap);
1882 /* Dump value range VR to FILE. */
1884 void
1885 dump_value_range (FILE *file, const value_range *vr)
1887 if (vr == NULL)
1888 fprintf (file, "[]");
1889 else if (vr->type == VR_UNDEFINED)
1890 fprintf (file, "UNDEFINED");
1891 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
1893 tree type = TREE_TYPE (vr->min);
1895 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
1897 if (INTEGRAL_TYPE_P (type)
1898 && !TYPE_UNSIGNED (type)
1899 && vrp_val_is_min (vr->min))
1900 fprintf (file, "-INF");
1901 else
1902 print_generic_expr (file, vr->min);
1904 fprintf (file, ", ");
1906 if (INTEGRAL_TYPE_P (type)
1907 && vrp_val_is_max (vr->max))
1908 fprintf (file, "+INF");
1909 else
1910 print_generic_expr (file, vr->max);
1912 fprintf (file, "]");
1914 if (vr->equiv)
1916 bitmap_iterator bi;
1917 unsigned i, c = 0;
1919 fprintf (file, " EQUIVALENCES: { ");
1921 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
1923 print_generic_expr (file, ssa_name (i));
1924 fprintf (file, " ");
1925 c++;
1928 fprintf (file, "} (%u elements)", c);
1931 else if (vr->type == VR_VARYING)
1932 fprintf (file, "VARYING");
1933 else
1934 fprintf (file, "INVALID RANGE");
1938 /* Dump value range VR to stderr. */
1940 DEBUG_FUNCTION void
1941 debug_value_range (const value_range *vr)
1943 dump_value_range (stderr, vr);
1944 fprintf (stderr, "\n");
1947 void
1948 value_range::dump () const
1950 debug_value_range (this);
1954 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1955 create a new SSA name N and return the assertion assignment
1956 'N = ASSERT_EXPR <V, V OP W>'. */
1958 static gimple *
1959 build_assert_expr_for (tree cond, tree v)
1961 tree a;
1962 gassign *assertion;
1964 gcc_assert (TREE_CODE (v) == SSA_NAME
1965 && COMPARISON_CLASS_P (cond));
1967 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1968 assertion = gimple_build_assign (NULL_TREE, a);
1970 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1971 operand of the ASSERT_EXPR. Create it so the new name and the old one
1972 are registered in the replacement table so that we can fix the SSA web
1973 after adding all the ASSERT_EXPRs. */
1974 tree new_def = create_new_def_for (v, assertion, NULL);
1975 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1976 given we have to be able to fully propagate those out to re-create
1977 valid SSA when removing the asserts. */
1978 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1979 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1981 return assertion;
1985 /* Return false if EXPR is a predicate expression involving floating
1986 point values. */
1988 static inline bool
1989 fp_predicate (gimple *stmt)
1991 GIMPLE_CHECK (stmt, GIMPLE_COND);
1993 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
1996 /* If the range of values taken by OP can be inferred after STMT executes,
1997 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1998 describes the inferred range. Return true if a range could be
1999 inferred. */
2001 bool
2002 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2004 *val_p = NULL_TREE;
2005 *comp_code_p = ERROR_MARK;
2007 /* Do not attempt to infer anything in names that flow through
2008 abnormal edges. */
2009 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2010 return false;
2012 /* If STMT is the last statement of a basic block with no normal
2013 successors, there is no point inferring anything about any of its
2014 operands. We would not be able to find a proper insertion point
2015 for the assertion, anyway. */
2016 if (stmt_ends_bb_p (stmt))
2018 edge_iterator ei;
2019 edge e;
2021 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2022 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2023 break;
2024 if (e == NULL)
2025 return false;
2028 if (infer_nonnull_range (stmt, op))
2030 *val_p = build_int_cst (TREE_TYPE (op), 0);
2031 *comp_code_p = NE_EXPR;
2032 return true;
2035 return false;
2039 void dump_asserts_for (FILE *, tree);
2040 void debug_asserts_for (tree);
2041 void dump_all_asserts (FILE *);
2042 void debug_all_asserts (void);
2044 /* Dump all the registered assertions for NAME to FILE. */
2046 void
2047 dump_asserts_for (FILE *file, tree name)
2049 assert_locus *loc;
2051 fprintf (file, "Assertions to be inserted for ");
2052 print_generic_expr (file, name);
2053 fprintf (file, "\n");
2055 loc = asserts_for[SSA_NAME_VERSION (name)];
2056 while (loc)
2058 fprintf (file, "\t");
2059 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2060 fprintf (file, "\n\tBB #%d", loc->bb->index);
2061 if (loc->e)
2063 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2064 loc->e->dest->index);
2065 dump_edge_info (file, loc->e, dump_flags, 0);
2067 fprintf (file, "\n\tPREDICATE: ");
2068 print_generic_expr (file, loc->expr);
2069 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2070 print_generic_expr (file, loc->val);
2071 fprintf (file, "\n\n");
2072 loc = loc->next;
2075 fprintf (file, "\n");
2079 /* Dump all the registered assertions for NAME to stderr. */
2081 DEBUG_FUNCTION void
2082 debug_asserts_for (tree name)
2084 dump_asserts_for (stderr, name);
2088 /* Dump all the registered assertions for all the names to FILE. */
2090 void
2091 dump_all_asserts (FILE *file)
2093 unsigned i;
2094 bitmap_iterator bi;
2096 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2097 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2098 dump_asserts_for (file, ssa_name (i));
2099 fprintf (file, "\n");
2103 /* Dump all the registered assertions for all the names to stderr. */
2105 DEBUG_FUNCTION void
2106 debug_all_asserts (void)
2108 dump_all_asserts (stderr);
2111 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2113 static void
2114 add_assert_info (vec<assert_info> &asserts,
2115 tree name, tree expr, enum tree_code comp_code, tree val)
2117 assert_info info;
2118 info.comp_code = comp_code;
2119 info.name = name;
2120 if (TREE_OVERFLOW_P (val))
2121 val = drop_tree_overflow (val);
2122 info.val = val;
2123 info.expr = expr;
2124 asserts.safe_push (info);
2127 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2128 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2129 E->DEST, then register this location as a possible insertion point
2130 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2132 BB, E and SI provide the exact insertion point for the new
2133 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2134 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2135 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2136 must not be NULL. */
2138 static void
2139 register_new_assert_for (tree name, tree expr,
2140 enum tree_code comp_code,
2141 tree val,
2142 basic_block bb,
2143 edge e,
2144 gimple_stmt_iterator si)
2146 assert_locus *n, *loc, *last_loc;
2147 basic_block dest_bb;
2149 gcc_checking_assert (bb == NULL || e == NULL);
2151 if (e == NULL)
2152 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2153 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2155 /* Never build an assert comparing against an integer constant with
2156 TREE_OVERFLOW set. This confuses our undefined overflow warning
2157 machinery. */
2158 if (TREE_OVERFLOW_P (val))
2159 val = drop_tree_overflow (val);
2161 /* The new assertion A will be inserted at BB or E. We need to
2162 determine if the new location is dominated by a previously
2163 registered location for A. If we are doing an edge insertion,
2164 assume that A will be inserted at E->DEST. Note that this is not
2165 necessarily true.
2167 If E is a critical edge, it will be split. But even if E is
2168 split, the new block will dominate the same set of blocks that
2169 E->DEST dominates.
2171 The reverse, however, is not true, blocks dominated by E->DEST
2172 will not be dominated by the new block created to split E. So,
2173 if the insertion location is on a critical edge, we will not use
2174 the new location to move another assertion previously registered
2175 at a block dominated by E->DEST. */
2176 dest_bb = (bb) ? bb : e->dest;
2178 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2179 VAL at a block dominating DEST_BB, then we don't need to insert a new
2180 one. Similarly, if the same assertion already exists at a block
2181 dominated by DEST_BB and the new location is not on a critical
2182 edge, then update the existing location for the assertion (i.e.,
2183 move the assertion up in the dominance tree).
2185 Note, this is implemented as a simple linked list because there
2186 should not be more than a handful of assertions registered per
2187 name. If this becomes a performance problem, a table hashed by
2188 COMP_CODE and VAL could be implemented. */
2189 loc = asserts_for[SSA_NAME_VERSION (name)];
2190 last_loc = loc;
2191 while (loc)
2193 if (loc->comp_code == comp_code
2194 && (loc->val == val
2195 || operand_equal_p (loc->val, val, 0))
2196 && (loc->expr == expr
2197 || operand_equal_p (loc->expr, expr, 0)))
2199 /* If E is not a critical edge and DEST_BB
2200 dominates the existing location for the assertion, move
2201 the assertion up in the dominance tree by updating its
2202 location information. */
2203 if ((e == NULL || !EDGE_CRITICAL_P (e))
2204 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2206 loc->bb = dest_bb;
2207 loc->e = e;
2208 loc->si = si;
2209 return;
2213 /* Update the last node of the list and move to the next one. */
2214 last_loc = loc;
2215 loc = loc->next;
2218 /* If we didn't find an assertion already registered for
2219 NAME COMP_CODE VAL, add a new one at the end of the list of
2220 assertions associated with NAME. */
2221 n = XNEW (struct assert_locus);
2222 n->bb = dest_bb;
2223 n->e = e;
2224 n->si = si;
2225 n->comp_code = comp_code;
2226 n->val = val;
2227 n->expr = expr;
2228 n->next = NULL;
2230 if (last_loc)
2231 last_loc->next = n;
2232 else
2233 asserts_for[SSA_NAME_VERSION (name)] = n;
2235 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2238 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2239 Extract a suitable test code and value and store them into *CODE_P and
2240 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2242 If no extraction was possible, return FALSE, otherwise return TRUE.
2244 If INVERT is true, then we invert the result stored into *CODE_P. */
2246 static bool
2247 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2248 tree cond_op0, tree cond_op1,
2249 bool invert, enum tree_code *code_p,
2250 tree *val_p)
2252 enum tree_code comp_code;
2253 tree val;
2255 /* Otherwise, we have a comparison of the form NAME COMP VAL
2256 or VAL COMP NAME. */
2257 if (name == cond_op1)
2259 /* If the predicate is of the form VAL COMP NAME, flip
2260 COMP around because we need to register NAME as the
2261 first operand in the predicate. */
2262 comp_code = swap_tree_comparison (cond_code);
2263 val = cond_op0;
2265 else if (name == cond_op0)
2267 /* The comparison is of the form NAME COMP VAL, so the
2268 comparison code remains unchanged. */
2269 comp_code = cond_code;
2270 val = cond_op1;
2272 else
2273 gcc_unreachable ();
2275 /* Invert the comparison code as necessary. */
2276 if (invert)
2277 comp_code = invert_tree_comparison (comp_code, 0);
2279 /* VRP only handles integral and pointer types. */
2280 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2281 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2282 return false;
2284 /* Do not register always-false predicates.
2285 FIXME: this works around a limitation in fold() when dealing with
2286 enumerations. Given 'enum { N1, N2 } x;', fold will not
2287 fold 'if (x > N2)' to 'if (0)'. */
2288 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2289 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2291 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2292 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2294 if (comp_code == GT_EXPR
2295 && (!max
2296 || compare_values (val, max) == 0))
2297 return false;
2299 if (comp_code == LT_EXPR
2300 && (!min
2301 || compare_values (val, min) == 0))
2302 return false;
2304 *code_p = comp_code;
2305 *val_p = val;
2306 return true;
2309 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2310 (otherwise return VAL). VAL and MASK must be zero-extended for
2311 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2312 (to transform signed values into unsigned) and at the end xor
2313 SGNBIT back. */
2315 static wide_int
2316 masked_increment (const wide_int &val_in, const wide_int &mask,
2317 const wide_int &sgnbit, unsigned int prec)
2319 wide_int bit = wi::one (prec), res;
2320 unsigned int i;
2322 wide_int val = val_in ^ sgnbit;
2323 for (i = 0; i < prec; i++, bit += bit)
2325 res = mask;
2326 if ((res & bit) == 0)
2327 continue;
2328 res = bit - 1;
2329 res = wi::bit_and_not (val + bit, res);
2330 res &= mask;
2331 if (wi::gtu_p (res, val))
2332 return res ^ sgnbit;
2334 return val ^ sgnbit;
2337 /* Helper for overflow_comparison_p
2339 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2340 OP1's defining statement to see if it ultimately has the form
2341 OP0 CODE (OP0 PLUS INTEGER_CST)
2343 If so, return TRUE indicating this is an overflow test and store into
2344 *NEW_CST an updated constant that can be used in a narrowed range test.
2346 REVERSED indicates if the comparison was originally:
2348 OP1 CODE' OP0.
2350 This affects how we build the updated constant. */
2352 static bool
2353 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2354 bool follow_assert_exprs, bool reversed, tree *new_cst)
2356 /* See if this is a relational operation between two SSA_NAMES with
2357 unsigned, overflow wrapping values. If so, check it more deeply. */
2358 if ((code == LT_EXPR || code == LE_EXPR
2359 || code == GE_EXPR || code == GT_EXPR)
2360 && TREE_CODE (op0) == SSA_NAME
2361 && TREE_CODE (op1) == SSA_NAME
2362 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2363 && TYPE_UNSIGNED (TREE_TYPE (op0))
2364 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2366 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2368 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2369 if (follow_assert_exprs)
2371 while (gimple_assign_single_p (op1_def)
2372 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2374 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2375 if (TREE_CODE (op1) != SSA_NAME)
2376 break;
2377 op1_def = SSA_NAME_DEF_STMT (op1);
2381 /* Now look at the defining statement of OP1 to see if it adds
2382 or subtracts a nonzero constant from another operand. */
2383 if (op1_def
2384 && is_gimple_assign (op1_def)
2385 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2386 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2387 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2389 tree target = gimple_assign_rhs1 (op1_def);
2391 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2392 for one where TARGET appears on the RHS. */
2393 if (follow_assert_exprs)
2395 /* Now see if that "other operand" is op0, following the chain
2396 of ASSERT_EXPRs if necessary. */
2397 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2398 while (op0 != target
2399 && gimple_assign_single_p (op0_def)
2400 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2402 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2403 if (TREE_CODE (op0) != SSA_NAME)
2404 break;
2405 op0_def = SSA_NAME_DEF_STMT (op0);
2409 /* If we did not find our target SSA_NAME, then this is not
2410 an overflow test. */
2411 if (op0 != target)
2412 return false;
2414 tree type = TREE_TYPE (op0);
2415 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2416 tree inc = gimple_assign_rhs2 (op1_def);
2417 if (reversed)
2418 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2419 else
2420 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2421 return true;
2424 return false;
2427 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2428 OP1's defining statement to see if it ultimately has the form
2429 OP0 CODE (OP0 PLUS INTEGER_CST)
2431 If so, return TRUE indicating this is an overflow test and store into
2432 *NEW_CST an updated constant that can be used in a narrowed range test.
2434 These statements are left as-is in the IL to facilitate discovery of
2435 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2436 the alternate range representation is often useful within VRP. */
2438 bool
2439 overflow_comparison_p (tree_code code, tree name, tree val,
2440 bool use_equiv_p, tree *new_cst)
2442 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2443 return true;
2444 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2445 use_equiv_p, true, new_cst);
2449 /* Try to register an edge assertion for SSA name NAME on edge E for
2450 the condition COND contributing to the conditional jump pointed to by BSI.
2451 Invert the condition COND if INVERT is true. */
2453 static void
2454 register_edge_assert_for_2 (tree name, edge e,
2455 enum tree_code cond_code,
2456 tree cond_op0, tree cond_op1, bool invert,
2457 vec<assert_info> &asserts)
2459 tree val;
2460 enum tree_code comp_code;
2462 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2463 cond_op0,
2464 cond_op1,
2465 invert, &comp_code, &val))
2466 return;
2468 /* Queue the assert. */
2469 tree x;
2470 if (overflow_comparison_p (comp_code, name, val, false, &x))
2472 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2473 ? GT_EXPR : LE_EXPR);
2474 add_assert_info (asserts, name, name, new_code, x);
2476 add_assert_info (asserts, name, name, comp_code, val);
2478 /* In the case of NAME <= CST and NAME being defined as
2479 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2480 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2481 This catches range and anti-range tests. */
2482 if ((comp_code == LE_EXPR
2483 || comp_code == GT_EXPR)
2484 && TREE_CODE (val) == INTEGER_CST
2485 && TYPE_UNSIGNED (TREE_TYPE (val)))
2487 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2488 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2490 /* Extract CST2 from the (optional) addition. */
2491 if (is_gimple_assign (def_stmt)
2492 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2494 name2 = gimple_assign_rhs1 (def_stmt);
2495 cst2 = gimple_assign_rhs2 (def_stmt);
2496 if (TREE_CODE (name2) == SSA_NAME
2497 && TREE_CODE (cst2) == INTEGER_CST)
2498 def_stmt = SSA_NAME_DEF_STMT (name2);
2501 /* Extract NAME2 from the (optional) sign-changing cast. */
2502 if (gimple_assign_cast_p (def_stmt))
2504 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2505 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2506 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2507 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2508 name3 = gimple_assign_rhs1 (def_stmt);
2511 /* If name3 is used later, create an ASSERT_EXPR for it. */
2512 if (name3 != NULL_TREE
2513 && TREE_CODE (name3) == SSA_NAME
2514 && (cst2 == NULL_TREE
2515 || TREE_CODE (cst2) == INTEGER_CST)
2516 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2518 tree tmp;
2520 /* Build an expression for the range test. */
2521 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2522 if (cst2 != NULL_TREE)
2523 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2525 if (dump_file)
2527 fprintf (dump_file, "Adding assert for ");
2528 print_generic_expr (dump_file, name3);
2529 fprintf (dump_file, " from ");
2530 print_generic_expr (dump_file, tmp);
2531 fprintf (dump_file, "\n");
2534 add_assert_info (asserts, name3, tmp, comp_code, val);
2537 /* If name2 is used later, create an ASSERT_EXPR for it. */
2538 if (name2 != NULL_TREE
2539 && TREE_CODE (name2) == SSA_NAME
2540 && TREE_CODE (cst2) == INTEGER_CST
2541 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2543 tree tmp;
2545 /* Build an expression for the range test. */
2546 tmp = name2;
2547 if (TREE_TYPE (name) != TREE_TYPE (name2))
2548 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2549 if (cst2 != NULL_TREE)
2550 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2552 if (dump_file)
2554 fprintf (dump_file, "Adding assert for ");
2555 print_generic_expr (dump_file, name2);
2556 fprintf (dump_file, " from ");
2557 print_generic_expr (dump_file, tmp);
2558 fprintf (dump_file, "\n");
2561 add_assert_info (asserts, name2, tmp, comp_code, val);
2565 /* In the case of post-in/decrement tests like if (i++) ... and uses
2566 of the in/decremented value on the edge the extra name we want to
2567 assert for is not on the def chain of the name compared. Instead
2568 it is in the set of use stmts.
2569 Similar cases happen for conversions that were simplified through
2570 fold_{sign_changed,widened}_comparison. */
2571 if ((comp_code == NE_EXPR
2572 || comp_code == EQ_EXPR)
2573 && TREE_CODE (val) == INTEGER_CST)
2575 imm_use_iterator ui;
2576 gimple *use_stmt;
2577 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2579 if (!is_gimple_assign (use_stmt))
2580 continue;
2582 /* Cut off to use-stmts that are dominating the predecessor. */
2583 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2584 continue;
2586 tree name2 = gimple_assign_lhs (use_stmt);
2587 if (TREE_CODE (name2) != SSA_NAME)
2588 continue;
2590 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2591 tree cst;
2592 if (code == PLUS_EXPR
2593 || code == MINUS_EXPR)
2595 cst = gimple_assign_rhs2 (use_stmt);
2596 if (TREE_CODE (cst) != INTEGER_CST)
2597 continue;
2598 cst = int_const_binop (code, val, cst);
2600 else if (CONVERT_EXPR_CODE_P (code))
2602 /* For truncating conversions we cannot record
2603 an inequality. */
2604 if (comp_code == NE_EXPR
2605 && (TYPE_PRECISION (TREE_TYPE (name2))
2606 < TYPE_PRECISION (TREE_TYPE (name))))
2607 continue;
2608 cst = fold_convert (TREE_TYPE (name2), val);
2610 else
2611 continue;
2613 if (TREE_OVERFLOW_P (cst))
2614 cst = drop_tree_overflow (cst);
2615 add_assert_info (asserts, name2, name2, comp_code, cst);
2619 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2620 && TREE_CODE (val) == INTEGER_CST)
2622 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2623 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2624 tree val2 = NULL_TREE;
2625 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2626 wide_int mask = wi::zero (prec);
2627 unsigned int nprec = prec;
2628 enum tree_code rhs_code = ERROR_MARK;
2630 if (is_gimple_assign (def_stmt))
2631 rhs_code = gimple_assign_rhs_code (def_stmt);
2633 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2634 assert that A != CST1 -+ CST2. */
2635 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2636 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2638 tree op0 = gimple_assign_rhs1 (def_stmt);
2639 tree op1 = gimple_assign_rhs2 (def_stmt);
2640 if (TREE_CODE (op0) == SSA_NAME
2641 && TREE_CODE (op1) == INTEGER_CST)
2643 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2644 ? MINUS_EXPR : PLUS_EXPR);
2645 op1 = int_const_binop (reverse_op, val, op1);
2646 if (TREE_OVERFLOW (op1))
2647 op1 = drop_tree_overflow (op1);
2648 add_assert_info (asserts, op0, op0, comp_code, op1);
2652 /* Add asserts for NAME cmp CST and NAME being defined
2653 as NAME = (int) NAME2. */
2654 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2655 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2656 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2657 && gimple_assign_cast_p (def_stmt))
2659 name2 = gimple_assign_rhs1 (def_stmt);
2660 if (CONVERT_EXPR_CODE_P (rhs_code)
2661 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2662 && TYPE_UNSIGNED (TREE_TYPE (name2))
2663 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2664 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2665 || !tree_int_cst_equal (val,
2666 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2668 tree tmp, cst;
2669 enum tree_code new_comp_code = comp_code;
2671 cst = fold_convert (TREE_TYPE (name2),
2672 TYPE_MIN_VALUE (TREE_TYPE (val)));
2673 /* Build an expression for the range test. */
2674 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2675 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2676 fold_convert (TREE_TYPE (name2), val));
2677 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2679 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2680 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2681 build_int_cst (TREE_TYPE (name2), 1));
2684 if (dump_file)
2686 fprintf (dump_file, "Adding assert for ");
2687 print_generic_expr (dump_file, name2);
2688 fprintf (dump_file, " from ");
2689 print_generic_expr (dump_file, tmp);
2690 fprintf (dump_file, "\n");
2693 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2697 /* Add asserts for NAME cmp CST and NAME being defined as
2698 NAME = NAME2 >> CST2.
2700 Extract CST2 from the right shift. */
2701 if (rhs_code == RSHIFT_EXPR)
2703 name2 = gimple_assign_rhs1 (def_stmt);
2704 cst2 = gimple_assign_rhs2 (def_stmt);
2705 if (TREE_CODE (name2) == SSA_NAME
2706 && tree_fits_uhwi_p (cst2)
2707 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2708 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2709 && type_has_mode_precision_p (TREE_TYPE (val)))
2711 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2712 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2715 if (val2 != NULL_TREE
2716 && TREE_CODE (val2) == INTEGER_CST
2717 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2718 TREE_TYPE (val),
2719 val2, cst2), val))
2721 enum tree_code new_comp_code = comp_code;
2722 tree tmp, new_val;
2724 tmp = name2;
2725 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2727 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2729 tree type = build_nonstandard_integer_type (prec, 1);
2730 tmp = build1 (NOP_EXPR, type, name2);
2731 val2 = fold_convert (type, val2);
2733 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2734 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2735 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2737 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2739 wide_int minval
2740 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2741 new_val = val2;
2742 if (minval == wi::to_wide (new_val))
2743 new_val = NULL_TREE;
2745 else
2747 wide_int maxval
2748 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2749 mask |= wi::to_wide (val2);
2750 if (wi::eq_p (mask, maxval))
2751 new_val = NULL_TREE;
2752 else
2753 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2756 if (new_val)
2758 if (dump_file)
2760 fprintf (dump_file, "Adding assert for ");
2761 print_generic_expr (dump_file, name2);
2762 fprintf (dump_file, " from ");
2763 print_generic_expr (dump_file, tmp);
2764 fprintf (dump_file, "\n");
2767 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2771 /* Add asserts for NAME cmp CST and NAME being defined as
2772 NAME = NAME2 & CST2.
2774 Extract CST2 from the and.
2776 Also handle
2777 NAME = (unsigned) NAME2;
2778 casts where NAME's type is unsigned and has smaller precision
2779 than NAME2's type as if it was NAME = NAME2 & MASK. */
2780 names[0] = NULL_TREE;
2781 names[1] = NULL_TREE;
2782 cst2 = NULL_TREE;
2783 if (rhs_code == BIT_AND_EXPR
2784 || (CONVERT_EXPR_CODE_P (rhs_code)
2785 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2786 && TYPE_UNSIGNED (TREE_TYPE (val))
2787 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2788 > prec))
2790 name2 = gimple_assign_rhs1 (def_stmt);
2791 if (rhs_code == BIT_AND_EXPR)
2792 cst2 = gimple_assign_rhs2 (def_stmt);
2793 else
2795 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2796 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2798 if (TREE_CODE (name2) == SSA_NAME
2799 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2800 && TREE_CODE (cst2) == INTEGER_CST
2801 && !integer_zerop (cst2)
2802 && (nprec > 1
2803 || TYPE_UNSIGNED (TREE_TYPE (val))))
2805 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2806 if (gimple_assign_cast_p (def_stmt2))
2808 names[1] = gimple_assign_rhs1 (def_stmt2);
2809 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2810 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2811 || (TYPE_PRECISION (TREE_TYPE (name2))
2812 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2813 names[1] = NULL_TREE;
2815 names[0] = name2;
2818 if (names[0] || names[1])
2820 wide_int minv, maxv, valv, cst2v;
2821 wide_int tem, sgnbit;
2822 bool valid_p = false, valn, cst2n;
2823 enum tree_code ccode = comp_code;
2825 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2826 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2827 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2828 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2829 /* If CST2 doesn't have most significant bit set,
2830 but VAL is negative, we have comparison like
2831 if ((x & 0x123) > -4) (always true). Just give up. */
2832 if (!cst2n && valn)
2833 ccode = ERROR_MARK;
2834 if (cst2n)
2835 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2836 else
2837 sgnbit = wi::zero (nprec);
2838 minv = valv & cst2v;
2839 switch (ccode)
2841 case EQ_EXPR:
2842 /* Minimum unsigned value for equality is VAL & CST2
2843 (should be equal to VAL, otherwise we probably should
2844 have folded the comparison into false) and
2845 maximum unsigned value is VAL | ~CST2. */
2846 maxv = valv | ~cst2v;
2847 valid_p = true;
2848 break;
2850 case NE_EXPR:
2851 tem = valv | ~cst2v;
2852 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2853 if (valv == 0)
2855 cst2n = false;
2856 sgnbit = wi::zero (nprec);
2857 goto gt_expr;
2859 /* If (VAL | ~CST2) is all ones, handle it as
2860 (X & CST2) < VAL. */
2861 if (tem == -1)
2863 cst2n = false;
2864 valn = false;
2865 sgnbit = wi::zero (nprec);
2866 goto lt_expr;
2868 if (!cst2n && wi::neg_p (cst2v))
2869 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2870 if (sgnbit != 0)
2872 if (valv == sgnbit)
2874 cst2n = true;
2875 valn = true;
2876 goto gt_expr;
2878 if (tem == wi::mask (nprec - 1, false, nprec))
2880 cst2n = true;
2881 goto lt_expr;
2883 if (!cst2n)
2884 sgnbit = wi::zero (nprec);
2886 break;
2888 case GE_EXPR:
2889 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2890 is VAL and maximum unsigned value is ~0. For signed
2891 comparison, if CST2 doesn't have most significant bit
2892 set, handle it similarly. If CST2 has MSB set,
2893 the minimum is the same, and maximum is ~0U/2. */
2894 if (minv != valv)
2896 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2897 VAL. */
2898 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2899 if (minv == valv)
2900 break;
2902 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2903 valid_p = true;
2904 break;
2906 case GT_EXPR:
2907 gt_expr:
2908 /* Find out smallest MINV where MINV > VAL
2909 && (MINV & CST2) == MINV, if any. If VAL is signed and
2910 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2911 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2912 if (minv == valv)
2913 break;
2914 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2915 valid_p = true;
2916 break;
2918 case LE_EXPR:
2919 /* Minimum unsigned value for <= is 0 and maximum
2920 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2921 Otherwise, find smallest VAL2 where VAL2 > VAL
2922 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2923 as maximum.
2924 For signed comparison, if CST2 doesn't have most
2925 significant bit set, handle it similarly. If CST2 has
2926 MSB set, the maximum is the same and minimum is INT_MIN. */
2927 if (minv == valv)
2928 maxv = valv;
2929 else
2931 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2932 if (maxv == valv)
2933 break;
2934 maxv -= 1;
2936 maxv |= ~cst2v;
2937 minv = sgnbit;
2938 valid_p = true;
2939 break;
2941 case LT_EXPR:
2942 lt_expr:
2943 /* Minimum unsigned value for < is 0 and maximum
2944 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2945 Otherwise, find smallest VAL2 where VAL2 > VAL
2946 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2947 as maximum.
2948 For signed comparison, if CST2 doesn't have most
2949 significant bit set, handle it similarly. If CST2 has
2950 MSB set, the maximum is the same and minimum is INT_MIN. */
2951 if (minv == valv)
2953 if (valv == sgnbit)
2954 break;
2955 maxv = valv;
2957 else
2959 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2960 if (maxv == valv)
2961 break;
2963 maxv -= 1;
2964 maxv |= ~cst2v;
2965 minv = sgnbit;
2966 valid_p = true;
2967 break;
2969 default:
2970 break;
2972 if (valid_p
2973 && (maxv - minv) != -1)
2975 tree tmp, new_val, type;
2976 int i;
2978 for (i = 0; i < 2; i++)
2979 if (names[i])
2981 wide_int maxv2 = maxv;
2982 tmp = names[i];
2983 type = TREE_TYPE (names[i]);
2984 if (!TYPE_UNSIGNED (type))
2986 type = build_nonstandard_integer_type (nprec, 1);
2987 tmp = build1 (NOP_EXPR, type, names[i]);
2989 if (minv != 0)
2991 tmp = build2 (PLUS_EXPR, type, tmp,
2992 wide_int_to_tree (type, -minv));
2993 maxv2 = maxv - minv;
2995 new_val = wide_int_to_tree (type, maxv2);
2997 if (dump_file)
2999 fprintf (dump_file, "Adding assert for ");
3000 print_generic_expr (dump_file, names[i]);
3001 fprintf (dump_file, " from ");
3002 print_generic_expr (dump_file, tmp);
3003 fprintf (dump_file, "\n");
3006 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3013 /* OP is an operand of a truth value expression which is known to have
3014 a particular value. Register any asserts for OP and for any
3015 operands in OP's defining statement.
3017 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3018 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3020 static void
3021 register_edge_assert_for_1 (tree op, enum tree_code code,
3022 edge e, vec<assert_info> &asserts)
3024 gimple *op_def;
3025 tree val;
3026 enum tree_code rhs_code;
3028 /* We only care about SSA_NAMEs. */
3029 if (TREE_CODE (op) != SSA_NAME)
3030 return;
3032 /* We know that OP will have a zero or nonzero value. */
3033 val = build_int_cst (TREE_TYPE (op), 0);
3034 add_assert_info (asserts, op, op, code, val);
3036 /* Now look at how OP is set. If it's set from a comparison,
3037 a truth operation or some bit operations, then we may be able
3038 to register information about the operands of that assignment. */
3039 op_def = SSA_NAME_DEF_STMT (op);
3040 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3041 return;
3043 rhs_code = gimple_assign_rhs_code (op_def);
3045 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3047 bool invert = (code == EQ_EXPR ? true : false);
3048 tree op0 = gimple_assign_rhs1 (op_def);
3049 tree op1 = gimple_assign_rhs2 (op_def);
3051 if (TREE_CODE (op0) == SSA_NAME)
3052 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3053 if (TREE_CODE (op1) == SSA_NAME)
3054 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3056 else if ((code == NE_EXPR
3057 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3058 || (code == EQ_EXPR
3059 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3061 /* Recurse on each operand. */
3062 tree op0 = gimple_assign_rhs1 (op_def);
3063 tree op1 = gimple_assign_rhs2 (op_def);
3064 if (TREE_CODE (op0) == SSA_NAME
3065 && has_single_use (op0))
3066 register_edge_assert_for_1 (op0, code, e, asserts);
3067 if (TREE_CODE (op1) == SSA_NAME
3068 && has_single_use (op1))
3069 register_edge_assert_for_1 (op1, code, e, asserts);
3071 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3072 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3074 /* Recurse, flipping CODE. */
3075 code = invert_tree_comparison (code, false);
3076 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3078 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3080 /* Recurse through the copy. */
3081 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3083 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3085 /* Recurse through the type conversion, unless it is a narrowing
3086 conversion or conversion from non-integral type. */
3087 tree rhs = gimple_assign_rhs1 (op_def);
3088 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3089 && (TYPE_PRECISION (TREE_TYPE (rhs))
3090 <= TYPE_PRECISION (TREE_TYPE (op))))
3091 register_edge_assert_for_1 (rhs, code, e, asserts);
3095 /* Check if comparison
3096 NAME COND_OP INTEGER_CST
3097 has a form of
3098 (X & 11...100..0) COND_OP XX...X00...0
3099 Such comparison can yield assertions like
3100 X >= XX...X00...0
3101 X <= XX...X11...1
3102 in case of COND_OP being EQ_EXPR or
3103 X < XX...X00...0
3104 X > XX...X11...1
3105 in case of NE_EXPR. */
3107 static bool
3108 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3109 tree *new_name, tree *low, enum tree_code *low_code,
3110 tree *high, enum tree_code *high_code)
3112 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3114 if (!is_gimple_assign (def_stmt)
3115 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3116 return false;
3118 tree t = gimple_assign_rhs1 (def_stmt);
3119 tree maskt = gimple_assign_rhs2 (def_stmt);
3120 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3121 return false;
3123 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3124 wide_int inv_mask = ~mask;
3125 /* Must have been removed by now so don't bother optimizing. */
3126 if (mask == 0 || inv_mask == 0)
3127 return false;
3129 /* Assume VALT is INTEGER_CST. */
3130 wi::tree_to_wide_ref val = wi::to_wide (valt);
3132 if ((inv_mask & (inv_mask + 1)) != 0
3133 || (val & mask) != val)
3134 return false;
3136 bool is_range = cond_code == EQ_EXPR;
3138 tree type = TREE_TYPE (t);
3139 wide_int min = wi::min_value (type),
3140 max = wi::max_value (type);
3142 if (is_range)
3144 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3145 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3147 else
3149 /* We can still generate assertion if one of alternatives
3150 is known to always be false. */
3151 if (val == min)
3153 *low_code = (enum tree_code) 0;
3154 *high_code = GT_EXPR;
3156 else if ((val | inv_mask) == max)
3158 *low_code = LT_EXPR;
3159 *high_code = (enum tree_code) 0;
3161 else
3162 return false;
3165 *new_name = t;
3166 *low = wide_int_to_tree (type, val);
3167 *high = wide_int_to_tree (type, val | inv_mask);
3169 return true;
3172 /* Try to register an edge assertion for SSA name NAME on edge E for
3173 the condition COND contributing to the conditional jump pointed to by
3174 SI. */
3176 void
3177 register_edge_assert_for (tree name, edge e,
3178 enum tree_code cond_code, tree cond_op0,
3179 tree cond_op1, vec<assert_info> &asserts)
3181 tree val;
3182 enum tree_code comp_code;
3183 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3185 /* Do not attempt to infer anything in names that flow through
3186 abnormal edges. */
3187 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3188 return;
3190 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3191 cond_op0, cond_op1,
3192 is_else_edge,
3193 &comp_code, &val))
3194 return;
3196 /* Register ASSERT_EXPRs for name. */
3197 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3198 cond_op1, is_else_edge, asserts);
3201 /* If COND is effectively an equality test of an SSA_NAME against
3202 the value zero or one, then we may be able to assert values
3203 for SSA_NAMEs which flow into COND. */
3205 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3206 statement of NAME we can assert both operands of the BIT_AND_EXPR
3207 have nonzero value. */
3208 if (((comp_code == EQ_EXPR && integer_onep (val))
3209 || (comp_code == NE_EXPR && integer_zerop (val))))
3211 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3213 if (is_gimple_assign (def_stmt)
3214 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3216 tree op0 = gimple_assign_rhs1 (def_stmt);
3217 tree op1 = gimple_assign_rhs2 (def_stmt);
3218 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3219 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3223 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3224 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3225 have zero value. */
3226 if (((comp_code == EQ_EXPR && integer_zerop (val))
3227 || (comp_code == NE_EXPR && integer_onep (val))))
3229 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3231 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3232 necessarily zero value, or if type-precision is one. */
3233 if (is_gimple_assign (def_stmt)
3234 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3235 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3236 || comp_code == EQ_EXPR)))
3238 tree op0 = gimple_assign_rhs1 (def_stmt);
3239 tree op1 = gimple_assign_rhs2 (def_stmt);
3240 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3241 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3245 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3246 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3247 && TREE_CODE (val) == INTEGER_CST)
3249 enum tree_code low_code, high_code;
3250 tree low, high;
3251 if (is_masked_range_test (name, val, comp_code, &name, &low,
3252 &low_code, &high, &high_code))
3254 if (low_code != ERROR_MARK)
3255 register_edge_assert_for_2 (name, e, low_code, name,
3256 low, /*invert*/false, asserts);
3257 if (high_code != ERROR_MARK)
3258 register_edge_assert_for_2 (name, e, high_code, name,
3259 high, /*invert*/false, asserts);
3264 /* Finish found ASSERTS for E and register them at GSI. */
3266 static void
3267 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3268 vec<assert_info> &asserts)
3270 for (unsigned i = 0; i < asserts.length (); ++i)
3271 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3272 reachable from E. */
3273 if (live_on_edge (e, asserts[i].name))
3274 register_new_assert_for (asserts[i].name, asserts[i].expr,
3275 asserts[i].comp_code, asserts[i].val,
3276 NULL, e, gsi);
3281 /* Determine whether the outgoing edges of BB should receive an
3282 ASSERT_EXPR for each of the operands of BB's LAST statement.
3283 The last statement of BB must be a COND_EXPR.
3285 If any of the sub-graphs rooted at BB have an interesting use of
3286 the predicate operands, an assert location node is added to the
3287 list of assertions for the corresponding operands. */
3289 static void
3290 find_conditional_asserts (basic_block bb, gcond *last)
3292 gimple_stmt_iterator bsi;
3293 tree op;
3294 edge_iterator ei;
3295 edge e;
3296 ssa_op_iter iter;
3298 bsi = gsi_for_stmt (last);
3300 /* Look for uses of the operands in each of the sub-graphs
3301 rooted at BB. We need to check each of the outgoing edges
3302 separately, so that we know what kind of ASSERT_EXPR to
3303 insert. */
3304 FOR_EACH_EDGE (e, ei, bb->succs)
3306 if (e->dest == bb)
3307 continue;
3309 /* Register the necessary assertions for each operand in the
3310 conditional predicate. */
3311 auto_vec<assert_info, 8> asserts;
3312 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3313 register_edge_assert_for (op, e,
3314 gimple_cond_code (last),
3315 gimple_cond_lhs (last),
3316 gimple_cond_rhs (last), asserts);
3317 finish_register_edge_assert_for (e, bsi, asserts);
3321 struct case_info
3323 tree expr;
3324 basic_block bb;
3327 /* Compare two case labels sorting first by the destination bb index
3328 and then by the case value. */
3330 static int
3331 compare_case_labels (const void *p1, const void *p2)
3333 const struct case_info *ci1 = (const struct case_info *) p1;
3334 const struct case_info *ci2 = (const struct case_info *) p2;
3335 int idx1 = ci1->bb->index;
3336 int idx2 = ci2->bb->index;
3338 if (idx1 < idx2)
3339 return -1;
3340 else if (idx1 == idx2)
3342 /* Make sure the default label is first in a group. */
3343 if (!CASE_LOW (ci1->expr))
3344 return -1;
3345 else if (!CASE_LOW (ci2->expr))
3346 return 1;
3347 else
3348 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3349 CASE_LOW (ci2->expr));
3351 else
3352 return 1;
3355 /* Determine whether the outgoing edges of BB should receive an
3356 ASSERT_EXPR for each of the operands of BB's LAST statement.
3357 The last statement of BB must be a SWITCH_EXPR.
3359 If any of the sub-graphs rooted at BB have an interesting use of
3360 the predicate operands, an assert location node is added to the
3361 list of assertions for the corresponding operands. */
3363 static void
3364 find_switch_asserts (basic_block bb, gswitch *last)
3366 gimple_stmt_iterator bsi;
3367 tree op;
3368 edge e;
3369 struct case_info *ci;
3370 size_t n = gimple_switch_num_labels (last);
3371 #if GCC_VERSION >= 4000
3372 unsigned int idx;
3373 #else
3374 /* Work around GCC 3.4 bug (PR 37086). */
3375 volatile unsigned int idx;
3376 #endif
3378 bsi = gsi_for_stmt (last);
3379 op = gimple_switch_index (last);
3380 if (TREE_CODE (op) != SSA_NAME)
3381 return;
3383 /* Build a vector of case labels sorted by destination label. */
3384 ci = XNEWVEC (struct case_info, n);
3385 for (idx = 0; idx < n; ++idx)
3387 ci[idx].expr = gimple_switch_label (last, idx);
3388 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3390 edge default_edge = find_edge (bb, ci[0].bb);
3391 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3393 for (idx = 0; idx < n; ++idx)
3395 tree min, max;
3396 tree cl = ci[idx].expr;
3397 basic_block cbb = ci[idx].bb;
3399 min = CASE_LOW (cl);
3400 max = CASE_HIGH (cl);
3402 /* If there are multiple case labels with the same destination
3403 we need to combine them to a single value range for the edge. */
3404 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3406 /* Skip labels until the last of the group. */
3407 do {
3408 ++idx;
3409 } while (idx < n && cbb == ci[idx].bb);
3410 --idx;
3412 /* Pick up the maximum of the case label range. */
3413 if (CASE_HIGH (ci[idx].expr))
3414 max = CASE_HIGH (ci[idx].expr);
3415 else
3416 max = CASE_LOW (ci[idx].expr);
3419 /* Can't extract a useful assertion out of a range that includes the
3420 default label. */
3421 if (min == NULL_TREE)
3422 continue;
3424 /* Find the edge to register the assert expr on. */
3425 e = find_edge (bb, cbb);
3427 /* Register the necessary assertions for the operand in the
3428 SWITCH_EXPR. */
3429 auto_vec<assert_info, 8> asserts;
3430 register_edge_assert_for (op, e,
3431 max ? GE_EXPR : EQ_EXPR,
3432 op, fold_convert (TREE_TYPE (op), min),
3433 asserts);
3434 if (max)
3435 register_edge_assert_for (op, e, LE_EXPR, op,
3436 fold_convert (TREE_TYPE (op), max),
3437 asserts);
3438 finish_register_edge_assert_for (e, bsi, asserts);
3441 XDELETEVEC (ci);
3443 if (!live_on_edge (default_edge, op))
3444 return;
3446 /* Now register along the default label assertions that correspond to the
3447 anti-range of each label. */
3448 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3449 if (insertion_limit == 0)
3450 return;
3452 /* We can't do this if the default case shares a label with another case. */
3453 tree default_cl = gimple_switch_default_label (last);
3454 for (idx = 1; idx < n; idx++)
3456 tree min, max;
3457 tree cl = gimple_switch_label (last, idx);
3458 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3459 continue;
3461 min = CASE_LOW (cl);
3462 max = CASE_HIGH (cl);
3464 /* Combine contiguous case ranges to reduce the number of assertions
3465 to insert. */
3466 for (idx = idx + 1; idx < n; idx++)
3468 tree next_min, next_max;
3469 tree next_cl = gimple_switch_label (last, idx);
3470 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3471 break;
3473 next_min = CASE_LOW (next_cl);
3474 next_max = CASE_HIGH (next_cl);
3476 wide_int difference = (wi::to_wide (next_min)
3477 - wi::to_wide (max ? max : min));
3478 if (wi::eq_p (difference, 1))
3479 max = next_max ? next_max : next_min;
3480 else
3481 break;
3483 idx--;
3485 if (max == NULL_TREE)
3487 /* Register the assertion OP != MIN. */
3488 auto_vec<assert_info, 8> asserts;
3489 min = fold_convert (TREE_TYPE (op), min);
3490 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3491 asserts);
3492 finish_register_edge_assert_for (default_edge, bsi, asserts);
3494 else
3496 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3497 which will give OP the anti-range ~[MIN,MAX]. */
3498 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3499 min = fold_convert (TREE_TYPE (uop), min);
3500 max = fold_convert (TREE_TYPE (uop), max);
3502 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3503 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3504 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3505 NULL, default_edge, bsi);
3508 if (--insertion_limit == 0)
3509 break;
3514 /* Traverse all the statements in block BB looking for statements that
3515 may generate useful assertions for the SSA names in their operand.
3516 If a statement produces a useful assertion A for name N_i, then the
3517 list of assertions already generated for N_i is scanned to
3518 determine if A is actually needed.
3520 If N_i already had the assertion A at a location dominating the
3521 current location, then nothing needs to be done. Otherwise, the
3522 new location for A is recorded instead.
3524 1- For every statement S in BB, all the variables used by S are
3525 added to bitmap FOUND_IN_SUBGRAPH.
3527 2- If statement S uses an operand N in a way that exposes a known
3528 value range for N, then if N was not already generated by an
3529 ASSERT_EXPR, create a new assert location for N. For instance,
3530 if N is a pointer and the statement dereferences it, we can
3531 assume that N is not NULL.
3533 3- COND_EXPRs are a special case of #2. We can derive range
3534 information from the predicate but need to insert different
3535 ASSERT_EXPRs for each of the sub-graphs rooted at the
3536 conditional block. If the last statement of BB is a conditional
3537 expression of the form 'X op Y', then
3539 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3541 b) If the conditional is the only entry point to the sub-graph
3542 corresponding to the THEN_CLAUSE, recurse into it. On
3543 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3544 an ASSERT_EXPR is added for the corresponding variable.
3546 c) Repeat step (b) on the ELSE_CLAUSE.
3548 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3550 For instance,
3552 if (a == 9)
3553 b = a;
3554 else
3555 b = c + 1;
3557 In this case, an assertion on the THEN clause is useful to
3558 determine that 'a' is always 9 on that edge. However, an assertion
3559 on the ELSE clause would be unnecessary.
3561 4- If BB does not end in a conditional expression, then we recurse
3562 into BB's dominator children.
3564 At the end of the recursive traversal, every SSA name will have a
3565 list of locations where ASSERT_EXPRs should be added. When a new
3566 location for name N is found, it is registered by calling
3567 register_new_assert_for. That function keeps track of all the
3568 registered assertions to prevent adding unnecessary assertions.
3569 For instance, if a pointer P_4 is dereferenced more than once in a
3570 dominator tree, only the location dominating all the dereference of
3571 P_4 will receive an ASSERT_EXPR. */
3573 static void
3574 find_assert_locations_1 (basic_block bb, sbitmap live)
3576 gimple *last;
3578 last = last_stmt (bb);
3580 /* If BB's last statement is a conditional statement involving integer
3581 operands, determine if we need to add ASSERT_EXPRs. */
3582 if (last
3583 && gimple_code (last) == GIMPLE_COND
3584 && !fp_predicate (last)
3585 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3586 find_conditional_asserts (bb, as_a <gcond *> (last));
3588 /* If BB's last statement is a switch statement involving integer
3589 operands, determine if we need to add ASSERT_EXPRs. */
3590 if (last
3591 && gimple_code (last) == GIMPLE_SWITCH
3592 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3593 find_switch_asserts (bb, as_a <gswitch *> (last));
3595 /* Traverse all the statements in BB marking used names and looking
3596 for statements that may infer assertions for their used operands. */
3597 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3598 gsi_prev (&si))
3600 gimple *stmt;
3601 tree op;
3602 ssa_op_iter i;
3604 stmt = gsi_stmt (si);
3606 if (is_gimple_debug (stmt))
3607 continue;
3609 /* See if we can derive an assertion for any of STMT's operands. */
3610 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3612 tree value;
3613 enum tree_code comp_code;
3615 /* If op is not live beyond this stmt, do not bother to insert
3616 asserts for it. */
3617 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3618 continue;
3620 /* If OP is used in such a way that we can infer a value
3621 range for it, and we don't find a previous assertion for
3622 it, create a new assertion location node for OP. */
3623 if (infer_value_range (stmt, op, &comp_code, &value))
3625 /* If we are able to infer a nonzero value range for OP,
3626 then walk backwards through the use-def chain to see if OP
3627 was set via a typecast.
3629 If so, then we can also infer a nonzero value range
3630 for the operand of the NOP_EXPR. */
3631 if (comp_code == NE_EXPR && integer_zerop (value))
3633 tree t = op;
3634 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3636 while (is_gimple_assign (def_stmt)
3637 && CONVERT_EXPR_CODE_P
3638 (gimple_assign_rhs_code (def_stmt))
3639 && TREE_CODE
3640 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3641 && POINTER_TYPE_P
3642 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3644 t = gimple_assign_rhs1 (def_stmt);
3645 def_stmt = SSA_NAME_DEF_STMT (t);
3647 /* Note we want to register the assert for the
3648 operand of the NOP_EXPR after SI, not after the
3649 conversion. */
3650 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3651 register_new_assert_for (t, t, comp_code, value,
3652 bb, NULL, si);
3656 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3660 /* Update live. */
3661 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3662 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3663 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3664 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3667 /* Traverse all PHI nodes in BB, updating live. */
3668 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3669 gsi_next (&si))
3671 use_operand_p arg_p;
3672 ssa_op_iter i;
3673 gphi *phi = si.phi ();
3674 tree res = gimple_phi_result (phi);
3676 if (virtual_operand_p (res))
3677 continue;
3679 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3681 tree arg = USE_FROM_PTR (arg_p);
3682 if (TREE_CODE (arg) == SSA_NAME)
3683 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3686 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3690 /* Do an RPO walk over the function computing SSA name liveness
3691 on-the-fly and deciding on assert expressions to insert. */
3693 static void
3694 find_assert_locations (void)
3696 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3697 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3698 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3699 int rpo_cnt, i;
3701 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3702 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3703 for (i = 0; i < rpo_cnt; ++i)
3704 bb_rpo[rpo[i]] = i;
3706 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3707 the order we compute liveness and insert asserts we otherwise
3708 fail to insert asserts into the loop latch. */
3709 loop_p loop;
3710 FOR_EACH_LOOP (loop, 0)
3712 i = loop->latch->index;
3713 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3714 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3715 !gsi_end_p (gsi); gsi_next (&gsi))
3717 gphi *phi = gsi.phi ();
3718 if (virtual_operand_p (gimple_phi_result (phi)))
3719 continue;
3720 tree arg = gimple_phi_arg_def (phi, j);
3721 if (TREE_CODE (arg) == SSA_NAME)
3723 if (live[i] == NULL)
3725 live[i] = sbitmap_alloc (num_ssa_names);
3726 bitmap_clear (live[i]);
3728 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3733 for (i = rpo_cnt - 1; i >= 0; --i)
3735 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3736 edge e;
3737 edge_iterator ei;
3739 if (!live[rpo[i]])
3741 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3742 bitmap_clear (live[rpo[i]]);
3745 /* Process BB and update the live information with uses in
3746 this block. */
3747 find_assert_locations_1 (bb, live[rpo[i]]);
3749 /* Merge liveness into the predecessor blocks and free it. */
3750 if (!bitmap_empty_p (live[rpo[i]]))
3752 int pred_rpo = i;
3753 FOR_EACH_EDGE (e, ei, bb->preds)
3755 int pred = e->src->index;
3756 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3757 continue;
3759 if (!live[pred])
3761 live[pred] = sbitmap_alloc (num_ssa_names);
3762 bitmap_clear (live[pred]);
3764 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3766 if (bb_rpo[pred] < pred_rpo)
3767 pred_rpo = bb_rpo[pred];
3770 /* Record the RPO number of the last visited block that needs
3771 live information from this block. */
3772 last_rpo[rpo[i]] = pred_rpo;
3774 else
3776 sbitmap_free (live[rpo[i]]);
3777 live[rpo[i]] = NULL;
3780 /* We can free all successors live bitmaps if all their
3781 predecessors have been visited already. */
3782 FOR_EACH_EDGE (e, ei, bb->succs)
3783 if (last_rpo[e->dest->index] == i
3784 && live[e->dest->index])
3786 sbitmap_free (live[e->dest->index]);
3787 live[e->dest->index] = NULL;
3791 XDELETEVEC (rpo);
3792 XDELETEVEC (bb_rpo);
3793 XDELETEVEC (last_rpo);
3794 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3795 if (live[i])
3796 sbitmap_free (live[i]);
3797 XDELETEVEC (live);
3800 /* Create an ASSERT_EXPR for NAME and insert it in the location
3801 indicated by LOC. Return true if we made any edge insertions. */
3803 static bool
3804 process_assert_insertions_for (tree name, assert_locus *loc)
3806 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3807 gimple *stmt;
3808 tree cond;
3809 gimple *assert_stmt;
3810 edge_iterator ei;
3811 edge e;
3813 /* If we have X <=> X do not insert an assert expr for that. */
3814 if (loc->expr == loc->val)
3815 return false;
3817 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3818 assert_stmt = build_assert_expr_for (cond, name);
3819 if (loc->e)
3821 /* We have been asked to insert the assertion on an edge. This
3822 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3823 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3824 || (gimple_code (gsi_stmt (loc->si))
3825 == GIMPLE_SWITCH));
3827 gsi_insert_on_edge (loc->e, assert_stmt);
3828 return true;
3831 /* If the stmt iterator points at the end then this is an insertion
3832 at the beginning of a block. */
3833 if (gsi_end_p (loc->si))
3835 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3836 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3837 return false;
3840 /* Otherwise, we can insert right after LOC->SI iff the
3841 statement must not be the last statement in the block. */
3842 stmt = gsi_stmt (loc->si);
3843 if (!stmt_ends_bb_p (stmt))
3845 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3846 return false;
3849 /* If STMT must be the last statement in BB, we can only insert new
3850 assertions on the non-abnormal edge out of BB. Note that since
3851 STMT is not control flow, there may only be one non-abnormal/eh edge
3852 out of BB. */
3853 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3854 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3856 gsi_insert_on_edge (e, assert_stmt);
3857 return true;
3860 gcc_unreachable ();
3863 /* Qsort helper for sorting assert locations. If stable is true, don't
3864 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3865 on the other side some pointers might be NULL. */
3867 template <bool stable>
3868 static int
3869 compare_assert_loc (const void *pa, const void *pb)
3871 assert_locus * const a = *(assert_locus * const *)pa;
3872 assert_locus * const b = *(assert_locus * const *)pb;
3874 /* If stable, some asserts might be optimized away already, sort
3875 them last. */
3876 if (stable)
3878 if (a == NULL)
3879 return b != NULL;
3880 else if (b == NULL)
3881 return -1;
3884 if (a->e == NULL && b->e != NULL)
3885 return 1;
3886 else if (a->e != NULL && b->e == NULL)
3887 return -1;
3889 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3890 no need to test both a->e and b->e. */
3892 /* Sort after destination index. */
3893 if (a->e == NULL)
3895 else if (a->e->dest->index > b->e->dest->index)
3896 return 1;
3897 else if (a->e->dest->index < b->e->dest->index)
3898 return -1;
3900 /* Sort after comp_code. */
3901 if (a->comp_code > b->comp_code)
3902 return 1;
3903 else if (a->comp_code < b->comp_code)
3904 return -1;
3906 hashval_t ha, hb;
3908 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3909 uses DECL_UID of the VAR_DECL, so sorting might differ between
3910 -g and -g0. When doing the removal of redundant assert exprs
3911 and commonization to successors, this does not matter, but for
3912 the final sort needs to be stable. */
3913 if (stable)
3915 ha = 0;
3916 hb = 0;
3918 else
3920 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3921 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3924 /* Break the tie using hashing and source/bb index. */
3925 if (ha == hb)
3926 return (a->e != NULL
3927 ? a->e->src->index - b->e->src->index
3928 : a->bb->index - b->bb->index);
3929 return ha > hb ? 1 : -1;
3932 /* Process all the insertions registered for every name N_i registered
3933 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3934 found in ASSERTS_FOR[i]. */
3936 static void
3937 process_assert_insertions (void)
3939 unsigned i;
3940 bitmap_iterator bi;
3941 bool update_edges_p = false;
3942 int num_asserts = 0;
3944 if (dump_file && (dump_flags & TDF_DETAILS))
3945 dump_all_asserts (dump_file);
3947 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3949 assert_locus *loc = asserts_for[i];
3950 gcc_assert (loc);
3952 auto_vec<assert_locus *, 16> asserts;
3953 for (; loc; loc = loc->next)
3954 asserts.safe_push (loc);
3955 asserts.qsort (compare_assert_loc<false>);
3957 /* Push down common asserts to successors and remove redundant ones. */
3958 unsigned ecnt = 0;
3959 assert_locus *common = NULL;
3960 unsigned commonj = 0;
3961 for (unsigned j = 0; j < asserts.length (); ++j)
3963 loc = asserts[j];
3964 if (! loc->e)
3965 common = NULL;
3966 else if (! common
3967 || loc->e->dest != common->e->dest
3968 || loc->comp_code != common->comp_code
3969 || ! operand_equal_p (loc->val, common->val, 0)
3970 || ! operand_equal_p (loc->expr, common->expr, 0))
3972 commonj = j;
3973 common = loc;
3974 ecnt = 1;
3976 else if (loc->e == asserts[j-1]->e)
3978 /* Remove duplicate asserts. */
3979 if (commonj == j - 1)
3981 commonj = j;
3982 common = loc;
3984 free (asserts[j-1]);
3985 asserts[j-1] = NULL;
3987 else
3989 ecnt++;
3990 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3992 /* We have the same assertion on all incoming edges of a BB.
3993 Insert it at the beginning of that block. */
3994 loc->bb = loc->e->dest;
3995 loc->e = NULL;
3996 loc->si = gsi_none ();
3997 common = NULL;
3998 /* Clear asserts commoned. */
3999 for (; commonj != j; ++commonj)
4000 if (asserts[commonj])
4002 free (asserts[commonj]);
4003 asserts[commonj] = NULL;
4009 /* The asserts vector sorting above might be unstable for
4010 -fcompare-debug, sort again to ensure a stable sort. */
4011 asserts.qsort (compare_assert_loc<true>);
4012 for (unsigned j = 0; j < asserts.length (); ++j)
4014 loc = asserts[j];
4015 if (! loc)
4016 break;
4017 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4018 num_asserts++;
4019 free (loc);
4023 if (update_edges_p)
4024 gsi_commit_edge_inserts ();
4026 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4027 num_asserts);
4031 /* Traverse the flowgraph looking for conditional jumps to insert range
4032 expressions. These range expressions are meant to provide information
4033 to optimizations that need to reason in terms of value ranges. They
4034 will not be expanded into RTL. For instance, given:
4036 x = ...
4037 y = ...
4038 if (x < y)
4039 y = x - 2;
4040 else
4041 x = y + 3;
4043 this pass will transform the code into:
4045 x = ...
4046 y = ...
4047 if (x < y)
4049 x = ASSERT_EXPR <x, x < y>
4050 y = x - 2
4052 else
4054 y = ASSERT_EXPR <y, x >= y>
4055 x = y + 3
4058 The idea is that once copy and constant propagation have run, other
4059 optimizations will be able to determine what ranges of values can 'x'
4060 take in different paths of the code, simply by checking the reaching
4061 definition of 'x'. */
4063 static void
4064 insert_range_assertions (void)
4066 need_assert_for = BITMAP_ALLOC (NULL);
4067 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4069 calculate_dominance_info (CDI_DOMINATORS);
4071 find_assert_locations ();
4072 if (!bitmap_empty_p (need_assert_for))
4074 process_assert_insertions ();
4075 update_ssa (TODO_update_ssa_no_phi);
4078 if (dump_file && (dump_flags & TDF_DETAILS))
4080 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4081 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4084 free (asserts_for);
4085 BITMAP_FREE (need_assert_for);
4088 class vrp_prop : public ssa_propagation_engine
4090 public:
4091 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4092 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4094 void vrp_initialize (void);
4095 void vrp_finalize (bool);
4096 void check_all_array_refs (void);
4097 void check_array_ref (location_t, tree, bool);
4098 void check_mem_ref (location_t, tree, bool);
4099 void search_for_addr_array (tree, location_t);
4101 class vr_values vr_values;
4102 /* Temporary delegator to minimize code churn. */
4103 value_range *get_value_range (const_tree op)
4104 { return vr_values.get_value_range (op); }
4105 void set_defs_to_varying (gimple *stmt)
4106 { return vr_values.set_defs_to_varying (stmt); }
4107 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4108 tree *output_p, value_range *vr)
4109 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4110 bool update_value_range (const_tree op, value_range *vr)
4111 { return vr_values.update_value_range (op, vr); }
4112 void extract_range_basic (value_range *vr, gimple *stmt)
4113 { vr_values.extract_range_basic (vr, stmt); }
4114 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4115 { vr_values.extract_range_from_phi_node (phi, vr); }
4117 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4118 and "struct" hacks. If VRP can determine that the
4119 array subscript is a constant, check if it is outside valid
4120 range. If the array subscript is a RANGE, warn if it is
4121 non-overlapping with valid range.
4122 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4124 void
4125 vrp_prop::check_array_ref (location_t location, tree ref,
4126 bool ignore_off_by_one)
4128 const value_range *vr = NULL;
4129 tree low_sub, up_sub;
4130 tree low_bound, up_bound, up_bound_p1;
4132 if (TREE_NO_WARNING (ref))
4133 return;
4135 low_sub = up_sub = TREE_OPERAND (ref, 1);
4136 up_bound = array_ref_up_bound (ref);
4138 if (!up_bound
4139 || TREE_CODE (up_bound) != INTEGER_CST
4140 || (warn_array_bounds < 2
4141 && array_at_struct_end_p (ref)))
4143 /* Accesses to trailing arrays via pointers may access storage
4144 beyond the types array bounds. For such arrays, or for flexible
4145 array members, as well as for other arrays of an unknown size,
4146 replace the upper bound with a more permissive one that assumes
4147 the size of the largest object is PTRDIFF_MAX. */
4148 tree eltsize = array_ref_element_size (ref);
4150 if (TREE_CODE (eltsize) != INTEGER_CST
4151 || integer_zerop (eltsize))
4153 up_bound = NULL_TREE;
4154 up_bound_p1 = NULL_TREE;
4156 else
4158 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4159 tree arg = TREE_OPERAND (ref, 0);
4160 poly_int64 off;
4162 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4163 maxbound = wide_int_to_tree (sizetype,
4164 wi::sub (wi::to_wide (maxbound),
4165 off));
4166 else
4167 maxbound = fold_convert (sizetype, maxbound);
4169 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4171 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4172 build_int_cst (ptrdiff_type_node, 1));
4175 else
4176 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4177 build_int_cst (TREE_TYPE (up_bound), 1));
4179 low_bound = array_ref_low_bound (ref);
4181 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4183 bool warned = false;
4185 /* Empty array. */
4186 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4187 warned = warning_at (location, OPT_Warray_bounds,
4188 "array subscript %E is above array bounds of %qT",
4189 low_bound, artype);
4191 if (TREE_CODE (low_sub) == SSA_NAME)
4193 vr = get_value_range (low_sub);
4194 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4196 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4197 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4201 if (vr && vr->type == VR_ANTI_RANGE)
4203 if (up_bound
4204 && TREE_CODE (up_sub) == INTEGER_CST
4205 && (ignore_off_by_one
4206 ? tree_int_cst_lt (up_bound, up_sub)
4207 : tree_int_cst_le (up_bound, up_sub))
4208 && TREE_CODE (low_sub) == INTEGER_CST
4209 && tree_int_cst_le (low_sub, low_bound))
4210 warned = warning_at (location, OPT_Warray_bounds,
4211 "array subscript [%E, %E] is outside "
4212 "array bounds of %qT",
4213 low_sub, up_sub, artype);
4215 else if (up_bound
4216 && TREE_CODE (up_sub) == INTEGER_CST
4217 && (ignore_off_by_one
4218 ? !tree_int_cst_le (up_sub, up_bound_p1)
4219 : !tree_int_cst_le (up_sub, up_bound)))
4221 if (dump_file && (dump_flags & TDF_DETAILS))
4223 fprintf (dump_file, "Array bound warning for ");
4224 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4225 fprintf (dump_file, "\n");
4227 warned = warning_at (location, OPT_Warray_bounds,
4228 "array subscript %E is above array bounds of %qT",
4229 up_sub, artype);
4231 else if (TREE_CODE (low_sub) == INTEGER_CST
4232 && tree_int_cst_lt (low_sub, low_bound))
4234 if (dump_file && (dump_flags & TDF_DETAILS))
4236 fprintf (dump_file, "Array bound warning for ");
4237 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4238 fprintf (dump_file, "\n");
4240 warned = warning_at (location, OPT_Warray_bounds,
4241 "array subscript %E is below array bounds of %qT",
4242 low_sub, artype);
4245 if (warned)
4247 ref = TREE_OPERAND (ref, 0);
4249 if (DECL_P (ref))
4250 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4252 TREE_NO_WARNING (ref) = 1;
4256 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4257 references to string constants. If VRP can determine that the array
4258 subscript is a constant, check if it is outside valid range.
4259 If the array subscript is a RANGE, warn if it is non-overlapping
4260 with valid range.
4261 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4262 (used to allow one-past-the-end indices for code that takes
4263 the address of the just-past-the-end element of an array). */
4265 void
4266 vrp_prop::check_mem_ref (location_t location, tree ref,
4267 bool ignore_off_by_one)
4269 if (TREE_NO_WARNING (ref))
4270 return;
4272 tree arg = TREE_OPERAND (ref, 0);
4273 /* The constant and variable offset of the reference. */
4274 tree cstoff = TREE_OPERAND (ref, 1);
4275 tree varoff = NULL_TREE;
4277 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4279 /* The array or string constant bounds in bytes. Initially set
4280 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4281 determined. */
4282 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4284 /* The minimum and maximum intermediate offset. For a reference
4285 to be valid, not only does the final offset/subscript must be
4286 in bounds but all intermediate offsets should be as well.
4287 GCC may be able to deal gracefully with such out-of-bounds
4288 offsets so the checking is only enbaled at -Warray-bounds=2
4289 where it may help detect bugs in uses of the intermediate
4290 offsets that could otherwise not be detectable. */
4291 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4292 offset_int extrema[2] = { 0, wi::abs (ioff) };
4294 /* The range of the byte offset into the reference. */
4295 offset_int offrange[2] = { 0, 0 };
4297 const value_range *vr = NULL;
4299 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4300 The loop computes the the range of the final offset for expressions
4301 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4302 in some range. */
4303 while (TREE_CODE (arg) == SSA_NAME)
4305 gimple *def = SSA_NAME_DEF_STMT (arg);
4306 if (!is_gimple_assign (def))
4307 break;
4309 tree_code code = gimple_assign_rhs_code (def);
4310 if (code == POINTER_PLUS_EXPR)
4312 arg = gimple_assign_rhs1 (def);
4313 varoff = gimple_assign_rhs2 (def);
4315 else if (code == ASSERT_EXPR)
4317 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4318 continue;
4320 else
4321 return;
4323 /* VAROFF should always be a SSA_NAME here (and not even
4324 INTEGER_CST) but there's no point in taking chances. */
4325 if (TREE_CODE (varoff) != SSA_NAME)
4326 break;
4328 vr = get_value_range (varoff);
4329 if (!vr || vr->type == VR_UNDEFINED || !vr->min || !vr->max)
4330 break;
4332 if (TREE_CODE (vr->min) != INTEGER_CST
4333 || TREE_CODE (vr->max) != INTEGER_CST)
4334 break;
4336 if (vr->type == VR_RANGE)
4338 if (tree_int_cst_lt (vr->min, vr->max))
4340 offset_int min
4341 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min));
4342 offset_int max
4343 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max));
4344 if (min < max)
4346 offrange[0] += min;
4347 offrange[1] += max;
4349 else
4351 offrange[0] += max;
4352 offrange[1] += min;
4355 else
4357 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4358 to OFFRANGE. */
4359 offrange[0] += arrbounds[0];
4360 offrange[1] += arrbounds[1];
4363 else
4365 /* For an anti-range, analogously to the above, conservatively
4366 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4367 offrange[0] += arrbounds[0];
4368 offrange[1] += arrbounds[1];
4371 /* Keep track of the minimum and maximum offset. */
4372 if (offrange[1] < 0 && offrange[1] < extrema[0])
4373 extrema[0] = offrange[1];
4374 if (offrange[0] > 0 && offrange[0] > extrema[1])
4375 extrema[1] = offrange[0];
4377 if (offrange[0] < arrbounds[0])
4378 offrange[0] = arrbounds[0];
4380 if (offrange[1] > arrbounds[1])
4381 offrange[1] = arrbounds[1];
4384 if (TREE_CODE (arg) == ADDR_EXPR)
4386 arg = TREE_OPERAND (arg, 0);
4387 if (TREE_CODE (arg) != STRING_CST
4388 && TREE_CODE (arg) != VAR_DECL)
4389 return;
4391 else
4392 return;
4394 /* The type of the object being referred to. It can be an array,
4395 string literal, or a non-array type when the MEM_REF represents
4396 a reference/subscript via a pointer to an object that is not
4397 an element of an array. References to members of structs and
4398 unions are excluded because MEM_REF doesn't make it possible
4399 to identify the member where the reference originated.
4400 Incomplete types are excluded as well because their size is
4401 not known. */
4402 tree reftype = TREE_TYPE (arg);
4403 if (POINTER_TYPE_P (reftype)
4404 || !COMPLETE_TYPE_P (reftype)
4405 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4406 || RECORD_OR_UNION_TYPE_P (reftype))
4407 return;
4409 offset_int eltsize;
4410 if (TREE_CODE (reftype) == ARRAY_TYPE)
4412 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4414 if (tree dom = TYPE_DOMAIN (reftype))
4416 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4417 if (array_at_struct_end_p (arg)
4418 || !bnds[0] || !bnds[1])
4420 arrbounds[0] = 0;
4421 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4423 else
4425 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4426 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4429 else
4431 arrbounds[0] = 0;
4432 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4435 if (TREE_CODE (ref) == MEM_REF)
4437 /* For MEM_REF determine a tighter bound of the non-array
4438 element type. */
4439 tree eltype = TREE_TYPE (reftype);
4440 while (TREE_CODE (eltype) == ARRAY_TYPE)
4441 eltype = TREE_TYPE (eltype);
4442 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4445 else
4447 eltsize = 1;
4448 arrbounds[0] = 0;
4449 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4452 offrange[0] += ioff;
4453 offrange[1] += ioff;
4455 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4456 is set (when taking the address of the one-past-last element
4457 of an array) but always use the stricter bound in diagnostics. */
4458 offset_int ubound = arrbounds[1];
4459 if (ignore_off_by_one)
4460 ubound += 1;
4462 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4464 /* Treat a reference to a non-array object as one to an array
4465 of a single element. */
4466 if (TREE_CODE (reftype) != ARRAY_TYPE)
4467 reftype = build_array_type_nelts (reftype, 1);
4469 if (TREE_CODE (ref) == MEM_REF)
4471 /* Extract the element type out of MEM_REF and use its size
4472 to compute the index to print in the diagnostic; arrays
4473 in MEM_REF don't mean anything. */
4474 tree type = TREE_TYPE (ref);
4475 while (TREE_CODE (type) == ARRAY_TYPE)
4476 type = TREE_TYPE (type);
4477 tree size = TYPE_SIZE_UNIT (type);
4478 offrange[0] = offrange[0] / wi::to_offset (size);
4479 offrange[1] = offrange[1] / wi::to_offset (size);
4481 else
4483 /* For anything other than MEM_REF, compute the index to
4484 print in the diagnostic as the offset over element size. */
4485 offrange[0] = offrange[0] / eltsize;
4486 offrange[1] = offrange[1] / eltsize;
4489 bool warned;
4490 if (offrange[0] == offrange[1])
4491 warned = warning_at (location, OPT_Warray_bounds,
4492 "array subscript %wi is outside array bounds "
4493 "of %qT",
4494 offrange[0].to_shwi (), reftype);
4495 else
4496 warned = warning_at (location, OPT_Warray_bounds,
4497 "array subscript [%wi, %wi] is outside "
4498 "array bounds of %qT",
4499 offrange[0].to_shwi (),
4500 offrange[1].to_shwi (), reftype);
4501 if (warned && DECL_P (arg))
4502 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4504 TREE_NO_WARNING (ref) = 1;
4505 return;
4508 if (warn_array_bounds < 2)
4509 return;
4511 /* At level 2 check also intermediate offsets. */
4512 int i = 0;
4513 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4515 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4517 warning_at (location, OPT_Warray_bounds,
4518 "intermediate array offset %wi is outside array bounds "
4519 "of %qT",
4520 tmpidx, reftype);
4521 TREE_NO_WARNING (ref) = 1;
4525 /* Searches if the expr T, located at LOCATION computes
4526 address of an ARRAY_REF, and call check_array_ref on it. */
4528 void
4529 vrp_prop::search_for_addr_array (tree t, location_t location)
4531 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4534 if (TREE_CODE (t) == ARRAY_REF)
4535 check_array_ref (location, t, true /*ignore_off_by_one*/);
4536 else if (TREE_CODE (t) == MEM_REF)
4537 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4539 t = TREE_OPERAND (t, 0);
4541 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4543 if (TREE_CODE (t) != MEM_REF
4544 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4545 || TREE_NO_WARNING (t))
4546 return;
4548 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4549 tree low_bound, up_bound, el_sz;
4550 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4551 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4552 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4553 return;
4555 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4556 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4557 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4558 if (!low_bound
4559 || TREE_CODE (low_bound) != INTEGER_CST
4560 || !up_bound
4561 || TREE_CODE (up_bound) != INTEGER_CST
4562 || !el_sz
4563 || TREE_CODE (el_sz) != INTEGER_CST)
4564 return;
4566 offset_int idx;
4567 if (!mem_ref_offset (t).is_constant (&idx))
4568 return;
4570 bool warned = false;
4571 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4572 if (idx < 0)
4574 if (dump_file && (dump_flags & TDF_DETAILS))
4576 fprintf (dump_file, "Array bound warning for ");
4577 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4578 fprintf (dump_file, "\n");
4580 warned = warning_at (location, OPT_Warray_bounds,
4581 "array subscript %wi is below "
4582 "array bounds of %qT",
4583 idx.to_shwi (), TREE_TYPE (tem));
4585 else if (idx > (wi::to_offset (up_bound)
4586 - wi::to_offset (low_bound) + 1))
4588 if (dump_file && (dump_flags & TDF_DETAILS))
4590 fprintf (dump_file, "Array bound warning for ");
4591 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4592 fprintf (dump_file, "\n");
4594 warned = warning_at (location, OPT_Warray_bounds,
4595 "array subscript %wu is above "
4596 "array bounds of %qT",
4597 idx.to_uhwi (), TREE_TYPE (tem));
4600 if (warned)
4602 if (DECL_P (t))
4603 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4605 TREE_NO_WARNING (t) = 1;
4609 /* walk_tree() callback that checks if *TP is
4610 an ARRAY_REF inside an ADDR_EXPR (in which an array
4611 subscript one outside the valid range is allowed). Call
4612 check_array_ref for each ARRAY_REF found. The location is
4613 passed in DATA. */
4615 static tree
4616 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4618 tree t = *tp;
4619 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4620 location_t location;
4622 if (EXPR_HAS_LOCATION (t))
4623 location = EXPR_LOCATION (t);
4624 else
4625 location = gimple_location (wi->stmt);
4627 *walk_subtree = TRUE;
4629 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4630 if (TREE_CODE (t) == ARRAY_REF)
4631 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4632 else if (TREE_CODE (t) == MEM_REF)
4633 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4634 else if (TREE_CODE (t) == ADDR_EXPR)
4636 vrp_prop->search_for_addr_array (t, location);
4637 *walk_subtree = FALSE;
4640 return NULL_TREE;
4643 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4644 to walk over all statements of all reachable BBs and call
4645 check_array_bounds on them. */
4647 class check_array_bounds_dom_walker : public dom_walker
4649 public:
4650 check_array_bounds_dom_walker (vrp_prop *prop)
4651 : dom_walker (CDI_DOMINATORS,
4652 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4653 flags, so that we can merge in information on
4654 non-executable edges from vrp_folder . */
4655 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4656 m_prop (prop) {}
4657 ~check_array_bounds_dom_walker () {}
4659 edge before_dom_children (basic_block) FINAL OVERRIDE;
4661 private:
4662 vrp_prop *m_prop;
4665 /* Implementation of dom_walker::before_dom_children.
4667 Walk over all statements of BB and call check_array_bounds on them,
4668 and determine if there's a unique successor edge. */
4670 edge
4671 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4673 gimple_stmt_iterator si;
4674 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4676 gimple *stmt = gsi_stmt (si);
4677 struct walk_stmt_info wi;
4678 if (!gimple_has_location (stmt)
4679 || is_gimple_debug (stmt))
4680 continue;
4682 memset (&wi, 0, sizeof (wi));
4684 wi.info = m_prop;
4686 walk_gimple_op (stmt, check_array_bounds, &wi);
4689 /* Determine if there's a unique successor edge, and if so, return
4690 that back to dom_walker, ensuring that we don't visit blocks that
4691 became unreachable during the VRP propagation
4692 (PR tree-optimization/83312). */
4693 return find_taken_edge (bb, NULL_TREE);
4696 /* Walk over all statements of all reachable BBs and call check_array_bounds
4697 on them. */
4699 void
4700 vrp_prop::check_all_array_refs ()
4702 check_array_bounds_dom_walker w (this);
4703 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4706 /* Return true if all imm uses of VAR are either in STMT, or
4707 feed (optionally through a chain of single imm uses) GIMPLE_COND
4708 in basic block COND_BB. */
4710 static bool
4711 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4713 use_operand_p use_p, use2_p;
4714 imm_use_iterator iter;
4716 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4717 if (USE_STMT (use_p) != stmt)
4719 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4720 if (is_gimple_debug (use_stmt))
4721 continue;
4722 while (is_gimple_assign (use_stmt)
4723 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4724 && single_imm_use (gimple_assign_lhs (use_stmt),
4725 &use2_p, &use_stmt2))
4726 use_stmt = use_stmt2;
4727 if (gimple_code (use_stmt) != GIMPLE_COND
4728 || gimple_bb (use_stmt) != cond_bb)
4729 return false;
4731 return true;
4734 /* Handle
4735 _4 = x_3 & 31;
4736 if (_4 != 0)
4737 goto <bb 6>;
4738 else
4739 goto <bb 7>;
4740 <bb 6>:
4741 __builtin_unreachable ();
4742 <bb 7>:
4743 x_5 = ASSERT_EXPR <x_3, ...>;
4744 If x_3 has no other immediate uses (checked by caller),
4745 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4746 from the non-zero bitmask. */
4748 void
4749 maybe_set_nonzero_bits (edge e, tree var)
4751 basic_block cond_bb = e->src;
4752 gimple *stmt = last_stmt (cond_bb);
4753 tree cst;
4755 if (stmt == NULL
4756 || gimple_code (stmt) != GIMPLE_COND
4757 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4758 ? EQ_EXPR : NE_EXPR)
4759 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4760 || !integer_zerop (gimple_cond_rhs (stmt)))
4761 return;
4763 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4764 if (!is_gimple_assign (stmt)
4765 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4766 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4767 return;
4768 if (gimple_assign_rhs1 (stmt) != var)
4770 gimple *stmt2;
4772 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4773 return;
4774 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4775 if (!gimple_assign_cast_p (stmt2)
4776 || gimple_assign_rhs1 (stmt2) != var
4777 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4778 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4779 != TYPE_PRECISION (TREE_TYPE (var))))
4780 return;
4782 cst = gimple_assign_rhs2 (stmt);
4783 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4784 wi::to_wide (cst)));
4787 /* Convert range assertion expressions into the implied copies and
4788 copy propagate away the copies. Doing the trivial copy propagation
4789 here avoids the need to run the full copy propagation pass after
4790 VRP.
4792 FIXME, this will eventually lead to copy propagation removing the
4793 names that had useful range information attached to them. For
4794 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4795 then N_i will have the range [3, +INF].
4797 However, by converting the assertion into the implied copy
4798 operation N_i = N_j, we will then copy-propagate N_j into the uses
4799 of N_i and lose the range information. We may want to hold on to
4800 ASSERT_EXPRs a little while longer as the ranges could be used in
4801 things like jump threading.
4803 The problem with keeping ASSERT_EXPRs around is that passes after
4804 VRP need to handle them appropriately.
4806 Another approach would be to make the range information a first
4807 class property of the SSA_NAME so that it can be queried from
4808 any pass. This is made somewhat more complex by the need for
4809 multiple ranges to be associated with one SSA_NAME. */
4811 static void
4812 remove_range_assertions (void)
4814 basic_block bb;
4815 gimple_stmt_iterator si;
4816 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4817 a basic block preceeded by GIMPLE_COND branching to it and
4818 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4819 int is_unreachable;
4821 /* Note that the BSI iterator bump happens at the bottom of the
4822 loop and no bump is necessary if we're removing the statement
4823 referenced by the current BSI. */
4824 FOR_EACH_BB_FN (bb, cfun)
4825 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4827 gimple *stmt = gsi_stmt (si);
4829 if (is_gimple_assign (stmt)
4830 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4832 tree lhs = gimple_assign_lhs (stmt);
4833 tree rhs = gimple_assign_rhs1 (stmt);
4834 tree var;
4836 var = ASSERT_EXPR_VAR (rhs);
4838 if (TREE_CODE (var) == SSA_NAME
4839 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4840 && SSA_NAME_RANGE_INFO (lhs))
4842 if (is_unreachable == -1)
4844 is_unreachable = 0;
4845 if (single_pred_p (bb)
4846 && assert_unreachable_fallthru_edge_p
4847 (single_pred_edge (bb)))
4848 is_unreachable = 1;
4850 /* Handle
4851 if (x_7 >= 10 && x_7 < 20)
4852 __builtin_unreachable ();
4853 x_8 = ASSERT_EXPR <x_7, ...>;
4854 if the only uses of x_7 are in the ASSERT_EXPR and
4855 in the condition. In that case, we can copy the
4856 range info from x_8 computed in this pass also
4857 for x_7. */
4858 if (is_unreachable
4859 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4860 single_pred (bb)))
4862 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4863 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4864 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4865 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4869 /* Propagate the RHS into every use of the LHS. For SSA names
4870 also propagate abnormals as it merely restores the original
4871 IL in this case (an replace_uses_by would assert). */
4872 if (TREE_CODE (var) == SSA_NAME)
4874 imm_use_iterator iter;
4875 use_operand_p use_p;
4876 gimple *use_stmt;
4877 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4878 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4879 SET_USE (use_p, var);
4881 else
4882 replace_uses_by (lhs, var);
4884 /* And finally, remove the copy, it is not needed. */
4885 gsi_remove (&si, true);
4886 release_defs (stmt);
4888 else
4890 if (!is_gimple_debug (gsi_stmt (si)))
4891 is_unreachable = 0;
4892 gsi_next (&si);
4897 /* Return true if STMT is interesting for VRP. */
4899 bool
4900 stmt_interesting_for_vrp (gimple *stmt)
4902 if (gimple_code (stmt) == GIMPLE_PHI)
4904 tree res = gimple_phi_result (stmt);
4905 return (!virtual_operand_p (res)
4906 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4907 || POINTER_TYPE_P (TREE_TYPE (res))));
4909 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4911 tree lhs = gimple_get_lhs (stmt);
4913 /* In general, assignments with virtual operands are not useful
4914 for deriving ranges, with the obvious exception of calls to
4915 builtin functions. */
4916 if (lhs && TREE_CODE (lhs) == SSA_NAME
4917 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4918 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4919 && (is_gimple_call (stmt)
4920 || !gimple_vuse (stmt)))
4921 return true;
4922 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4923 switch (gimple_call_internal_fn (stmt))
4925 case IFN_ADD_OVERFLOW:
4926 case IFN_SUB_OVERFLOW:
4927 case IFN_MUL_OVERFLOW:
4928 case IFN_ATOMIC_COMPARE_EXCHANGE:
4929 /* These internal calls return _Complex integer type,
4930 but are interesting to VRP nevertheless. */
4931 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4932 return true;
4933 break;
4934 default:
4935 break;
4938 else if (gimple_code (stmt) == GIMPLE_COND
4939 || gimple_code (stmt) == GIMPLE_SWITCH)
4940 return true;
4942 return false;
4945 /* Initialization required by ssa_propagate engine. */
4947 void
4948 vrp_prop::vrp_initialize ()
4950 basic_block bb;
4952 FOR_EACH_BB_FN (bb, cfun)
4954 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4955 gsi_next (&si))
4957 gphi *phi = si.phi ();
4958 if (!stmt_interesting_for_vrp (phi))
4960 tree lhs = PHI_RESULT (phi);
4961 set_value_range_to_varying (get_value_range (lhs));
4962 prop_set_simulate_again (phi, false);
4964 else
4965 prop_set_simulate_again (phi, true);
4968 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4969 gsi_next (&si))
4971 gimple *stmt = gsi_stmt (si);
4973 /* If the statement is a control insn, then we do not
4974 want to avoid simulating the statement once. Failure
4975 to do so means that those edges will never get added. */
4976 if (stmt_ends_bb_p (stmt))
4977 prop_set_simulate_again (stmt, true);
4978 else if (!stmt_interesting_for_vrp (stmt))
4980 set_defs_to_varying (stmt);
4981 prop_set_simulate_again (stmt, false);
4983 else
4984 prop_set_simulate_again (stmt, true);
4989 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
4990 that includes the value VAL. The search is restricted to the range
4991 [START_IDX, n - 1] where n is the size of VEC.
4993 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
4994 returned.
4996 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
4997 it is placed in IDX and false is returned.
4999 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5000 returned. */
5002 bool
5003 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5005 size_t n = gimple_switch_num_labels (stmt);
5006 size_t low, high;
5008 /* Find case label for minimum of the value range or the next one.
5009 At each iteration we are searching in [low, high - 1]. */
5011 for (low = start_idx, high = n; high != low; )
5013 tree t;
5014 int cmp;
5015 /* Note that i != high, so we never ask for n. */
5016 size_t i = (high + low) / 2;
5017 t = gimple_switch_label (stmt, i);
5019 /* Cache the result of comparing CASE_LOW and val. */
5020 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5022 if (cmp == 0)
5024 /* Ranges cannot be empty. */
5025 *idx = i;
5026 return true;
5028 else if (cmp > 0)
5029 high = i;
5030 else
5032 low = i + 1;
5033 if (CASE_HIGH (t) != NULL
5034 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5036 *idx = i;
5037 return true;
5042 *idx = high;
5043 return false;
5046 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5047 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5048 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5049 then MAX_IDX < MIN_IDX.
5050 Returns true if the default label is not needed. */
5052 bool
5053 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5054 size_t *max_idx)
5056 size_t i, j;
5057 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5058 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5060 if (i == j
5061 && min_take_default
5062 && max_take_default)
5064 /* Only the default case label reached.
5065 Return an empty range. */
5066 *min_idx = 1;
5067 *max_idx = 0;
5068 return false;
5070 else
5072 bool take_default = min_take_default || max_take_default;
5073 tree low, high;
5074 size_t k;
5076 if (max_take_default)
5077 j--;
5079 /* If the case label range is continuous, we do not need
5080 the default case label. Verify that. */
5081 high = CASE_LOW (gimple_switch_label (stmt, i));
5082 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5083 high = CASE_HIGH (gimple_switch_label (stmt, i));
5084 for (k = i + 1; k <= j; ++k)
5086 low = CASE_LOW (gimple_switch_label (stmt, k));
5087 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5089 take_default = true;
5090 break;
5092 high = low;
5093 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5094 high = CASE_HIGH (gimple_switch_label (stmt, k));
5097 *min_idx = i;
5098 *max_idx = j;
5099 return !take_default;
5103 /* Evaluate statement STMT. If the statement produces a useful range,
5104 return SSA_PROP_INTERESTING and record the SSA name with the
5105 interesting range into *OUTPUT_P.
5107 If STMT is a conditional branch and we can determine its truth
5108 value, the taken edge is recorded in *TAKEN_EDGE_P.
5110 If STMT produces a varying value, return SSA_PROP_VARYING. */
5112 enum ssa_prop_result
5113 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5115 value_range vr = VR_INITIALIZER;
5116 tree lhs = gimple_get_lhs (stmt);
5117 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5119 if (*output_p)
5121 if (update_value_range (*output_p, &vr))
5123 if (dump_file && (dump_flags & TDF_DETAILS))
5125 fprintf (dump_file, "Found new range for ");
5126 print_generic_expr (dump_file, *output_p);
5127 fprintf (dump_file, ": ");
5128 dump_value_range (dump_file, &vr);
5129 fprintf (dump_file, "\n");
5132 if (vr.type == VR_VARYING)
5133 return SSA_PROP_VARYING;
5135 return SSA_PROP_INTERESTING;
5137 return SSA_PROP_NOT_INTERESTING;
5140 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5141 switch (gimple_call_internal_fn (stmt))
5143 case IFN_ADD_OVERFLOW:
5144 case IFN_SUB_OVERFLOW:
5145 case IFN_MUL_OVERFLOW:
5146 case IFN_ATOMIC_COMPARE_EXCHANGE:
5147 /* These internal calls return _Complex integer type,
5148 which VRP does not track, but the immediate uses
5149 thereof might be interesting. */
5150 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5152 imm_use_iterator iter;
5153 use_operand_p use_p;
5154 enum ssa_prop_result res = SSA_PROP_VARYING;
5156 set_value_range_to_varying (get_value_range (lhs));
5158 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5160 gimple *use_stmt = USE_STMT (use_p);
5161 if (!is_gimple_assign (use_stmt))
5162 continue;
5163 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5164 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5165 continue;
5166 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5167 tree use_lhs = gimple_assign_lhs (use_stmt);
5168 if (TREE_CODE (rhs1) != rhs_code
5169 || TREE_OPERAND (rhs1, 0) != lhs
5170 || TREE_CODE (use_lhs) != SSA_NAME
5171 || !stmt_interesting_for_vrp (use_stmt)
5172 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5173 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5174 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5175 continue;
5177 /* If there is a change in the value range for any of the
5178 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5179 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5180 or IMAGPART_EXPR immediate uses, but none of them have
5181 a change in their value ranges, return
5182 SSA_PROP_NOT_INTERESTING. If there are no
5183 {REAL,IMAG}PART_EXPR uses at all,
5184 return SSA_PROP_VARYING. */
5185 value_range new_vr = VR_INITIALIZER;
5186 extract_range_basic (&new_vr, use_stmt);
5187 const value_range *old_vr = get_value_range (use_lhs);
5188 if (old_vr->type != new_vr.type
5189 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5190 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5191 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5192 res = SSA_PROP_INTERESTING;
5193 else
5194 res = SSA_PROP_NOT_INTERESTING;
5195 BITMAP_FREE (new_vr.equiv);
5196 if (res == SSA_PROP_INTERESTING)
5198 *output_p = lhs;
5199 return res;
5203 return res;
5205 break;
5206 default:
5207 break;
5210 /* All other statements produce nothing of interest for VRP, so mark
5211 their outputs varying and prevent further simulation. */
5212 set_defs_to_varying (stmt);
5214 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5217 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5218 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5219 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5220 possible such range. The resulting range is not canonicalized. */
5222 static void
5223 union_ranges (enum value_range_type *vr0type,
5224 tree *vr0min, tree *vr0max,
5225 enum value_range_type vr1type,
5226 tree vr1min, tree vr1max)
5228 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5229 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5231 /* [] is vr0, () is vr1 in the following classification comments. */
5232 if (mineq && maxeq)
5234 /* [( )] */
5235 if (*vr0type == vr1type)
5236 /* Nothing to do for equal ranges. */
5238 else if ((*vr0type == VR_RANGE
5239 && vr1type == VR_ANTI_RANGE)
5240 || (*vr0type == VR_ANTI_RANGE
5241 && vr1type == VR_RANGE))
5243 /* For anti-range with range union the result is varying. */
5244 goto give_up;
5246 else
5247 gcc_unreachable ();
5249 else if (operand_less_p (*vr0max, vr1min) == 1
5250 || operand_less_p (vr1max, *vr0min) == 1)
5252 /* [ ] ( ) or ( ) [ ]
5253 If the ranges have an empty intersection, result of the union
5254 operation is the anti-range or if both are anti-ranges
5255 it covers all. */
5256 if (*vr0type == VR_ANTI_RANGE
5257 && vr1type == VR_ANTI_RANGE)
5258 goto give_up;
5259 else if (*vr0type == VR_ANTI_RANGE
5260 && vr1type == VR_RANGE)
5262 else if (*vr0type == VR_RANGE
5263 && vr1type == VR_ANTI_RANGE)
5265 *vr0type = vr1type;
5266 *vr0min = vr1min;
5267 *vr0max = vr1max;
5269 else if (*vr0type == VR_RANGE
5270 && vr1type == VR_RANGE)
5272 /* The result is the convex hull of both ranges. */
5273 if (operand_less_p (*vr0max, vr1min) == 1)
5275 /* If the result can be an anti-range, create one. */
5276 if (TREE_CODE (*vr0max) == INTEGER_CST
5277 && TREE_CODE (vr1min) == INTEGER_CST
5278 && vrp_val_is_min (*vr0min)
5279 && vrp_val_is_max (vr1max))
5281 tree min = int_const_binop (PLUS_EXPR,
5282 *vr0max,
5283 build_int_cst (TREE_TYPE (*vr0max), 1));
5284 tree max = int_const_binop (MINUS_EXPR,
5285 vr1min,
5286 build_int_cst (TREE_TYPE (vr1min), 1));
5287 if (!operand_less_p (max, min))
5289 *vr0type = VR_ANTI_RANGE;
5290 *vr0min = min;
5291 *vr0max = max;
5293 else
5294 *vr0max = vr1max;
5296 else
5297 *vr0max = vr1max;
5299 else
5301 /* If the result can be an anti-range, create one. */
5302 if (TREE_CODE (vr1max) == INTEGER_CST
5303 && TREE_CODE (*vr0min) == INTEGER_CST
5304 && vrp_val_is_min (vr1min)
5305 && vrp_val_is_max (*vr0max))
5307 tree min = int_const_binop (PLUS_EXPR,
5308 vr1max,
5309 build_int_cst (TREE_TYPE (vr1max), 1));
5310 tree max = int_const_binop (MINUS_EXPR,
5311 *vr0min,
5312 build_int_cst (TREE_TYPE (*vr0min), 1));
5313 if (!operand_less_p (max, min))
5315 *vr0type = VR_ANTI_RANGE;
5316 *vr0min = min;
5317 *vr0max = max;
5319 else
5320 *vr0min = vr1min;
5322 else
5323 *vr0min = vr1min;
5326 else
5327 gcc_unreachable ();
5329 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5330 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5332 /* [ ( ) ] or [( ) ] or [ ( )] */
5333 if (*vr0type == VR_RANGE
5334 && vr1type == VR_RANGE)
5336 else if (*vr0type == VR_ANTI_RANGE
5337 && vr1type == VR_ANTI_RANGE)
5339 *vr0type = vr1type;
5340 *vr0min = vr1min;
5341 *vr0max = vr1max;
5343 else if (*vr0type == VR_ANTI_RANGE
5344 && vr1type == VR_RANGE)
5346 /* Arbitrarily choose the right or left gap. */
5347 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5348 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5349 build_int_cst (TREE_TYPE (vr1min), 1));
5350 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5351 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5352 build_int_cst (TREE_TYPE (vr1max), 1));
5353 else
5354 goto give_up;
5356 else if (*vr0type == VR_RANGE
5357 && vr1type == VR_ANTI_RANGE)
5358 /* The result covers everything. */
5359 goto give_up;
5360 else
5361 gcc_unreachable ();
5363 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5364 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5366 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5367 if (*vr0type == VR_RANGE
5368 && vr1type == VR_RANGE)
5370 *vr0type = vr1type;
5371 *vr0min = vr1min;
5372 *vr0max = vr1max;
5374 else if (*vr0type == VR_ANTI_RANGE
5375 && vr1type == VR_ANTI_RANGE)
5377 else if (*vr0type == VR_RANGE
5378 && vr1type == VR_ANTI_RANGE)
5380 *vr0type = VR_ANTI_RANGE;
5381 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5383 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5384 build_int_cst (TREE_TYPE (*vr0min), 1));
5385 *vr0min = vr1min;
5387 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5389 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5390 build_int_cst (TREE_TYPE (*vr0max), 1));
5391 *vr0max = vr1max;
5393 else
5394 goto give_up;
5396 else if (*vr0type == VR_ANTI_RANGE
5397 && vr1type == VR_RANGE)
5398 /* The result covers everything. */
5399 goto give_up;
5400 else
5401 gcc_unreachable ();
5403 else if ((operand_less_p (vr1min, *vr0max) == 1
5404 || operand_equal_p (vr1min, *vr0max, 0))
5405 && operand_less_p (*vr0min, vr1min) == 1
5406 && operand_less_p (*vr0max, vr1max) == 1)
5408 /* [ ( ] ) or [ ]( ) */
5409 if (*vr0type == VR_RANGE
5410 && vr1type == VR_RANGE)
5411 *vr0max = vr1max;
5412 else if (*vr0type == VR_ANTI_RANGE
5413 && vr1type == VR_ANTI_RANGE)
5414 *vr0min = vr1min;
5415 else if (*vr0type == VR_ANTI_RANGE
5416 && vr1type == VR_RANGE)
5418 if (TREE_CODE (vr1min) == INTEGER_CST)
5419 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5420 build_int_cst (TREE_TYPE (vr1min), 1));
5421 else
5422 goto give_up;
5424 else if (*vr0type == VR_RANGE
5425 && vr1type == VR_ANTI_RANGE)
5427 if (TREE_CODE (*vr0max) == INTEGER_CST)
5429 *vr0type = vr1type;
5430 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5431 build_int_cst (TREE_TYPE (*vr0max), 1));
5432 *vr0max = vr1max;
5434 else
5435 goto give_up;
5437 else
5438 gcc_unreachable ();
5440 else if ((operand_less_p (*vr0min, vr1max) == 1
5441 || operand_equal_p (*vr0min, vr1max, 0))
5442 && operand_less_p (vr1min, *vr0min) == 1
5443 && operand_less_p (vr1max, *vr0max) == 1)
5445 /* ( [ ) ] or ( )[ ] */
5446 if (*vr0type == VR_RANGE
5447 && vr1type == VR_RANGE)
5448 *vr0min = vr1min;
5449 else if (*vr0type == VR_ANTI_RANGE
5450 && vr1type == VR_ANTI_RANGE)
5451 *vr0max = vr1max;
5452 else if (*vr0type == VR_ANTI_RANGE
5453 && vr1type == VR_RANGE)
5455 if (TREE_CODE (vr1max) == INTEGER_CST)
5456 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5457 build_int_cst (TREE_TYPE (vr1max), 1));
5458 else
5459 goto give_up;
5461 else if (*vr0type == VR_RANGE
5462 && vr1type == VR_ANTI_RANGE)
5464 if (TREE_CODE (*vr0min) == INTEGER_CST)
5466 *vr0type = vr1type;
5467 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5468 build_int_cst (TREE_TYPE (*vr0min), 1));
5469 *vr0min = vr1min;
5471 else
5472 goto give_up;
5474 else
5475 gcc_unreachable ();
5477 else
5478 goto give_up;
5480 return;
5482 give_up:
5483 *vr0type = VR_VARYING;
5484 *vr0min = NULL_TREE;
5485 *vr0max = NULL_TREE;
5488 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5489 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5490 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5491 possible such range. The resulting range is not canonicalized. */
5493 static void
5494 intersect_ranges (enum value_range_type *vr0type,
5495 tree *vr0min, tree *vr0max,
5496 enum value_range_type vr1type,
5497 tree vr1min, tree vr1max)
5499 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5500 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5502 /* [] is vr0, () is vr1 in the following classification comments. */
5503 if (mineq && maxeq)
5505 /* [( )] */
5506 if (*vr0type == vr1type)
5507 /* Nothing to do for equal ranges. */
5509 else if ((*vr0type == VR_RANGE
5510 && vr1type == VR_ANTI_RANGE)
5511 || (*vr0type == VR_ANTI_RANGE
5512 && vr1type == VR_RANGE))
5514 /* For anti-range with range intersection the result is empty. */
5515 *vr0type = VR_UNDEFINED;
5516 *vr0min = NULL_TREE;
5517 *vr0max = NULL_TREE;
5519 else
5520 gcc_unreachable ();
5522 else if (operand_less_p (*vr0max, vr1min) == 1
5523 || operand_less_p (vr1max, *vr0min) == 1)
5525 /* [ ] ( ) or ( ) [ ]
5526 If the ranges have an empty intersection, the result of the
5527 intersect operation is the range for intersecting an
5528 anti-range with a range or empty when intersecting two ranges. */
5529 if (*vr0type == VR_RANGE
5530 && vr1type == VR_ANTI_RANGE)
5532 else if (*vr0type == VR_ANTI_RANGE
5533 && vr1type == VR_RANGE)
5535 *vr0type = vr1type;
5536 *vr0min = vr1min;
5537 *vr0max = vr1max;
5539 else if (*vr0type == VR_RANGE
5540 && vr1type == VR_RANGE)
5542 *vr0type = VR_UNDEFINED;
5543 *vr0min = NULL_TREE;
5544 *vr0max = NULL_TREE;
5546 else if (*vr0type == VR_ANTI_RANGE
5547 && vr1type == VR_ANTI_RANGE)
5549 /* If the anti-ranges are adjacent to each other merge them. */
5550 if (TREE_CODE (*vr0max) == INTEGER_CST
5551 && TREE_CODE (vr1min) == INTEGER_CST
5552 && operand_less_p (*vr0max, vr1min) == 1
5553 && integer_onep (int_const_binop (MINUS_EXPR,
5554 vr1min, *vr0max)))
5555 *vr0max = vr1max;
5556 else if (TREE_CODE (vr1max) == INTEGER_CST
5557 && TREE_CODE (*vr0min) == INTEGER_CST
5558 && operand_less_p (vr1max, *vr0min) == 1
5559 && integer_onep (int_const_binop (MINUS_EXPR,
5560 *vr0min, vr1max)))
5561 *vr0min = vr1min;
5562 /* Else arbitrarily take VR0. */
5565 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5566 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5568 /* [ ( ) ] or [( ) ] or [ ( )] */
5569 if (*vr0type == VR_RANGE
5570 && vr1type == VR_RANGE)
5572 /* If both are ranges the result is the inner one. */
5573 *vr0type = vr1type;
5574 *vr0min = vr1min;
5575 *vr0max = vr1max;
5577 else if (*vr0type == VR_RANGE
5578 && vr1type == VR_ANTI_RANGE)
5580 /* Choose the right gap if the left one is empty. */
5581 if (mineq)
5583 if (TREE_CODE (vr1max) != INTEGER_CST)
5584 *vr0min = vr1max;
5585 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5586 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5587 *vr0min
5588 = int_const_binop (MINUS_EXPR, vr1max,
5589 build_int_cst (TREE_TYPE (vr1max), -1));
5590 else
5591 *vr0min
5592 = int_const_binop (PLUS_EXPR, vr1max,
5593 build_int_cst (TREE_TYPE (vr1max), 1));
5595 /* Choose the left gap if the right one is empty. */
5596 else if (maxeq)
5598 if (TREE_CODE (vr1min) != INTEGER_CST)
5599 *vr0max = vr1min;
5600 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5601 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5602 *vr0max
5603 = int_const_binop (PLUS_EXPR, vr1min,
5604 build_int_cst (TREE_TYPE (vr1min), -1));
5605 else
5606 *vr0max
5607 = int_const_binop (MINUS_EXPR, vr1min,
5608 build_int_cst (TREE_TYPE (vr1min), 1));
5610 /* Choose the anti-range if the range is effectively varying. */
5611 else if (vrp_val_is_min (*vr0min)
5612 && vrp_val_is_max (*vr0max))
5614 *vr0type = vr1type;
5615 *vr0min = vr1min;
5616 *vr0max = vr1max;
5618 /* Else choose the range. */
5620 else if (*vr0type == VR_ANTI_RANGE
5621 && vr1type == VR_ANTI_RANGE)
5622 /* If both are anti-ranges the result is the outer one. */
5624 else if (*vr0type == VR_ANTI_RANGE
5625 && vr1type == VR_RANGE)
5627 /* The intersection is empty. */
5628 *vr0type = VR_UNDEFINED;
5629 *vr0min = NULL_TREE;
5630 *vr0max = NULL_TREE;
5632 else
5633 gcc_unreachable ();
5635 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5636 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5638 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5639 if (*vr0type == VR_RANGE
5640 && vr1type == VR_RANGE)
5641 /* Choose the inner range. */
5643 else if (*vr0type == VR_ANTI_RANGE
5644 && vr1type == VR_RANGE)
5646 /* Choose the right gap if the left is empty. */
5647 if (mineq)
5649 *vr0type = VR_RANGE;
5650 if (TREE_CODE (*vr0max) != INTEGER_CST)
5651 *vr0min = *vr0max;
5652 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5653 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5654 *vr0min
5655 = int_const_binop (MINUS_EXPR, *vr0max,
5656 build_int_cst (TREE_TYPE (*vr0max), -1));
5657 else
5658 *vr0min
5659 = int_const_binop (PLUS_EXPR, *vr0max,
5660 build_int_cst (TREE_TYPE (*vr0max), 1));
5661 *vr0max = vr1max;
5663 /* Choose the left gap if the right is empty. */
5664 else if (maxeq)
5666 *vr0type = VR_RANGE;
5667 if (TREE_CODE (*vr0min) != INTEGER_CST)
5668 *vr0max = *vr0min;
5669 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5670 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5671 *vr0max
5672 = int_const_binop (PLUS_EXPR, *vr0min,
5673 build_int_cst (TREE_TYPE (*vr0min), -1));
5674 else
5675 *vr0max
5676 = int_const_binop (MINUS_EXPR, *vr0min,
5677 build_int_cst (TREE_TYPE (*vr0min), 1));
5678 *vr0min = vr1min;
5680 /* Choose the anti-range if the range is effectively varying. */
5681 else if (vrp_val_is_min (vr1min)
5682 && vrp_val_is_max (vr1max))
5684 /* Choose the anti-range if it is ~[0,0], that range is special
5685 enough to special case when vr1's range is relatively wide.
5686 At least for types bigger than int - this covers pointers
5687 and arguments to functions like ctz. */
5688 else if (*vr0min == *vr0max
5689 && integer_zerop (*vr0min)
5690 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5691 >= TYPE_PRECISION (integer_type_node))
5692 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5693 && TREE_CODE (vr1max) == INTEGER_CST
5694 && TREE_CODE (vr1min) == INTEGER_CST
5695 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5696 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5698 /* Else choose the range. */
5699 else
5701 *vr0type = vr1type;
5702 *vr0min = vr1min;
5703 *vr0max = vr1max;
5706 else if (*vr0type == VR_ANTI_RANGE
5707 && vr1type == VR_ANTI_RANGE)
5709 /* If both are anti-ranges the result is the outer one. */
5710 *vr0type = vr1type;
5711 *vr0min = vr1min;
5712 *vr0max = vr1max;
5714 else if (vr1type == VR_ANTI_RANGE
5715 && *vr0type == VR_RANGE)
5717 /* The intersection is empty. */
5718 *vr0type = VR_UNDEFINED;
5719 *vr0min = NULL_TREE;
5720 *vr0max = NULL_TREE;
5722 else
5723 gcc_unreachable ();
5725 else if ((operand_less_p (vr1min, *vr0max) == 1
5726 || operand_equal_p (vr1min, *vr0max, 0))
5727 && operand_less_p (*vr0min, vr1min) == 1)
5729 /* [ ( ] ) or [ ]( ) */
5730 if (*vr0type == VR_ANTI_RANGE
5731 && vr1type == VR_ANTI_RANGE)
5732 *vr0max = vr1max;
5733 else if (*vr0type == VR_RANGE
5734 && vr1type == VR_RANGE)
5735 *vr0min = vr1min;
5736 else if (*vr0type == VR_RANGE
5737 && vr1type == VR_ANTI_RANGE)
5739 if (TREE_CODE (vr1min) == INTEGER_CST)
5740 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5741 build_int_cst (TREE_TYPE (vr1min), 1));
5742 else
5743 *vr0max = vr1min;
5745 else if (*vr0type == VR_ANTI_RANGE
5746 && vr1type == VR_RANGE)
5748 *vr0type = VR_RANGE;
5749 if (TREE_CODE (*vr0max) == INTEGER_CST)
5750 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5751 build_int_cst (TREE_TYPE (*vr0max), 1));
5752 else
5753 *vr0min = *vr0max;
5754 *vr0max = vr1max;
5756 else
5757 gcc_unreachable ();
5759 else if ((operand_less_p (*vr0min, vr1max) == 1
5760 || operand_equal_p (*vr0min, vr1max, 0))
5761 && operand_less_p (vr1min, *vr0min) == 1)
5763 /* ( [ ) ] or ( )[ ] */
5764 if (*vr0type == VR_ANTI_RANGE
5765 && vr1type == VR_ANTI_RANGE)
5766 *vr0min = vr1min;
5767 else if (*vr0type == VR_RANGE
5768 && vr1type == VR_RANGE)
5769 *vr0max = vr1max;
5770 else if (*vr0type == VR_RANGE
5771 && vr1type == VR_ANTI_RANGE)
5773 if (TREE_CODE (vr1max) == INTEGER_CST)
5774 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5775 build_int_cst (TREE_TYPE (vr1max), 1));
5776 else
5777 *vr0min = vr1max;
5779 else if (*vr0type == VR_ANTI_RANGE
5780 && vr1type == VR_RANGE)
5782 *vr0type = VR_RANGE;
5783 if (TREE_CODE (*vr0min) == INTEGER_CST)
5784 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5785 build_int_cst (TREE_TYPE (*vr0min), 1));
5786 else
5787 *vr0max = *vr0min;
5788 *vr0min = vr1min;
5790 else
5791 gcc_unreachable ();
5794 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5795 result for the intersection. That's always a conservative
5796 correct estimate unless VR1 is a constant singleton range
5797 in which case we choose that. */
5798 if (vr1type == VR_RANGE
5799 && is_gimple_min_invariant (vr1min)
5800 && vrp_operand_equal_p (vr1min, vr1max))
5802 *vr0type = vr1type;
5803 *vr0min = vr1min;
5804 *vr0max = vr1max;
5807 return;
5811 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
5812 in *VR0. This may not be the smallest possible such range. */
5814 static void
5815 vrp_intersect_ranges_1 (value_range *vr0, const value_range *vr1)
5817 value_range saved;
5819 /* If either range is VR_VARYING the other one wins. */
5820 if (vr1->type == VR_VARYING)
5821 return;
5822 if (vr0->type == VR_VARYING)
5824 copy_value_range (vr0, vr1);
5825 return;
5828 /* When either range is VR_UNDEFINED the resulting range is
5829 VR_UNDEFINED, too. */
5830 if (vr0->type == VR_UNDEFINED)
5831 return;
5832 if (vr1->type == VR_UNDEFINED)
5834 set_value_range_to_undefined (vr0);
5835 return;
5838 /* Save the original vr0 so we can return it as conservative intersection
5839 result when our worker turns things to varying. */
5840 saved = *vr0;
5841 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
5842 vr1->type, vr1->min, vr1->max);
5843 /* Make sure to canonicalize the result though as the inversion of a
5844 VR_RANGE can still be a VR_RANGE. */
5845 set_and_canonicalize_value_range (vr0, vr0->type,
5846 vr0->min, vr0->max, vr0->equiv);
5847 /* If that failed, use the saved original VR0. */
5848 if (vr0->type == VR_VARYING)
5850 *vr0 = saved;
5851 return;
5853 /* If the result is VR_UNDEFINED there is no need to mess with
5854 the equivalencies. */
5855 if (vr0->type == VR_UNDEFINED)
5856 return;
5858 /* The resulting set of equivalences for range intersection is the union of
5859 the two sets. */
5860 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5861 bitmap_ior_into (vr0->equiv, vr1->equiv);
5862 else if (vr1->equiv && !vr0->equiv)
5864 /* All equivalence bitmaps are allocated from the same obstack. So
5865 we can use the obstack associated with VR to allocate vr0->equiv. */
5866 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
5867 bitmap_copy (vr0->equiv, vr1->equiv);
5871 void
5872 vrp_intersect_ranges (value_range *vr0, const value_range *vr1)
5874 if (dump_file && (dump_flags & TDF_DETAILS))
5876 fprintf (dump_file, "Intersecting\n ");
5877 dump_value_range (dump_file, vr0);
5878 fprintf (dump_file, "\nand\n ");
5879 dump_value_range (dump_file, vr1);
5880 fprintf (dump_file, "\n");
5882 vrp_intersect_ranges_1 (vr0, vr1);
5883 if (dump_file && (dump_flags & TDF_DETAILS))
5885 fprintf (dump_file, "to\n ");
5886 dump_value_range (dump_file, vr0);
5887 fprintf (dump_file, "\n");
5891 /* Meet operation for value ranges. Given two value ranges VR0 and
5892 VR1, store in VR0 a range that contains both VR0 and VR1. This
5893 may not be the smallest possible such range. */
5895 static void
5896 vrp_meet_1 (value_range *vr0, const value_range *vr1)
5898 value_range saved;
5900 if (vr0->type == VR_UNDEFINED)
5902 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
5903 return;
5906 if (vr1->type == VR_UNDEFINED)
5908 /* VR0 already has the resulting range. */
5909 return;
5912 if (vr0->type == VR_VARYING)
5914 /* Nothing to do. VR0 already has the resulting range. */
5915 return;
5918 if (vr1->type == VR_VARYING)
5920 set_value_range_to_varying (vr0);
5921 return;
5924 saved = *vr0;
5925 union_ranges (&vr0->type, &vr0->min, &vr0->max,
5926 vr1->type, vr1->min, vr1->max);
5927 if (vr0->type == VR_VARYING)
5929 /* Failed to find an efficient meet. Before giving up and setting
5930 the result to VARYING, see if we can at least derive a useful
5931 anti-range. */
5932 if (range_includes_zero_p (&saved) == 0
5933 && range_includes_zero_p (vr1) == 0)
5935 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
5937 /* Since this meet operation did not result from the meeting of
5938 two equivalent names, VR0 cannot have any equivalences. */
5939 if (vr0->equiv)
5940 bitmap_clear (vr0->equiv);
5941 return;
5944 set_value_range_to_varying (vr0);
5945 return;
5947 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
5948 vr0->equiv);
5949 if (vr0->type == VR_VARYING)
5950 return;
5952 /* The resulting set of equivalences is always the intersection of
5953 the two sets. */
5954 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5955 bitmap_and_into (vr0->equiv, vr1->equiv);
5956 else if (vr0->equiv && !vr1->equiv)
5957 bitmap_clear (vr0->equiv);
5960 void
5961 vrp_meet (value_range *vr0, const value_range *vr1)
5963 if (dump_file && (dump_flags & TDF_DETAILS))
5965 fprintf (dump_file, "Meeting\n ");
5966 dump_value_range (dump_file, vr0);
5967 fprintf (dump_file, "\nand\n ");
5968 dump_value_range (dump_file, vr1);
5969 fprintf (dump_file, "\n");
5971 vrp_meet_1 (vr0, vr1);
5972 if (dump_file && (dump_flags & TDF_DETAILS))
5974 fprintf (dump_file, "to\n ");
5975 dump_value_range (dump_file, vr0);
5976 fprintf (dump_file, "\n");
5981 /* Visit all arguments for PHI node PHI that flow through executable
5982 edges. If a valid value range can be derived from all the incoming
5983 value ranges, set a new range for the LHS of PHI. */
5985 enum ssa_prop_result
5986 vrp_prop::visit_phi (gphi *phi)
5988 tree lhs = PHI_RESULT (phi);
5989 value_range vr_result = VR_INITIALIZER;
5990 extract_range_from_phi_node (phi, &vr_result);
5991 if (update_value_range (lhs, &vr_result))
5993 if (dump_file && (dump_flags & TDF_DETAILS))
5995 fprintf (dump_file, "Found new range for ");
5996 print_generic_expr (dump_file, lhs);
5997 fprintf (dump_file, ": ");
5998 dump_value_range (dump_file, &vr_result);
5999 fprintf (dump_file, "\n");
6002 if (vr_result.type == VR_VARYING)
6003 return SSA_PROP_VARYING;
6005 return SSA_PROP_INTERESTING;
6008 /* Nothing changed, don't add outgoing edges. */
6009 return SSA_PROP_NOT_INTERESTING;
6012 class vrp_folder : public substitute_and_fold_engine
6014 public:
6015 tree get_value (tree) FINAL OVERRIDE;
6016 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6017 bool fold_predicate_in (gimple_stmt_iterator *);
6019 class vr_values *vr_values;
6021 /* Delegators. */
6022 tree vrp_evaluate_conditional (tree_code code, tree op0,
6023 tree op1, gimple *stmt)
6024 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6025 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6026 { return vr_values->simplify_stmt_using_ranges (gsi); }
6027 tree op_with_constant_singleton_value_range (tree op)
6028 { return vr_values->op_with_constant_singleton_value_range (op); }
6031 /* If the statement pointed by SI has a predicate whose value can be
6032 computed using the value range information computed by VRP, compute
6033 its value and return true. Otherwise, return false. */
6035 bool
6036 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6038 bool assignment_p = false;
6039 tree val;
6040 gimple *stmt = gsi_stmt (*si);
6042 if (is_gimple_assign (stmt)
6043 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6045 assignment_p = true;
6046 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6047 gimple_assign_rhs1 (stmt),
6048 gimple_assign_rhs2 (stmt),
6049 stmt);
6051 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6052 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6053 gimple_cond_lhs (cond_stmt),
6054 gimple_cond_rhs (cond_stmt),
6055 stmt);
6056 else
6057 return false;
6059 if (val)
6061 if (assignment_p)
6062 val = fold_convert (gimple_expr_type (stmt), val);
6064 if (dump_file)
6066 fprintf (dump_file, "Folding predicate ");
6067 print_gimple_expr (dump_file, stmt, 0);
6068 fprintf (dump_file, " to ");
6069 print_generic_expr (dump_file, val);
6070 fprintf (dump_file, "\n");
6073 if (is_gimple_assign (stmt))
6074 gimple_assign_set_rhs_from_tree (si, val);
6075 else
6077 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6078 gcond *cond_stmt = as_a <gcond *> (stmt);
6079 if (integer_zerop (val))
6080 gimple_cond_make_false (cond_stmt);
6081 else if (integer_onep (val))
6082 gimple_cond_make_true (cond_stmt);
6083 else
6084 gcc_unreachable ();
6087 return true;
6090 return false;
6093 /* Callback for substitute_and_fold folding the stmt at *SI. */
6095 bool
6096 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6098 if (fold_predicate_in (si))
6099 return true;
6101 return simplify_stmt_using_ranges (si);
6104 /* If OP has a value range with a single constant value return that,
6105 otherwise return NULL_TREE. This returns OP itself if OP is a
6106 constant.
6108 Implemented as a pure wrapper right now, but this will change. */
6110 tree
6111 vrp_folder::get_value (tree op)
6113 return op_with_constant_singleton_value_range (op);
6116 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6117 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6118 BB. If no such ASSERT_EXPR is found, return OP. */
6120 static tree
6121 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6123 imm_use_iterator imm_iter;
6124 gimple *use_stmt;
6125 use_operand_p use_p;
6127 if (TREE_CODE (op) == SSA_NAME)
6129 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6131 use_stmt = USE_STMT (use_p);
6132 if (use_stmt != stmt
6133 && gimple_assign_single_p (use_stmt)
6134 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6135 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6136 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6137 return gimple_assign_lhs (use_stmt);
6140 return op;
6143 /* A hack. */
6144 static class vr_values *x_vr_values;
6146 /* A trivial wrapper so that we can present the generic jump threading
6147 code with a simple API for simplifying statements. STMT is the
6148 statement we want to simplify, WITHIN_STMT provides the location
6149 for any overflow warnings. */
6151 static tree
6152 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6153 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6154 basic_block bb)
6156 /* First see if the conditional is in the hash table. */
6157 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6158 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6159 return cached_lhs;
6161 vr_values *vr_values = x_vr_values;
6162 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6164 tree op0 = gimple_cond_lhs (cond_stmt);
6165 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6167 tree op1 = gimple_cond_rhs (cond_stmt);
6168 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6170 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6171 op0, op1, within_stmt);
6174 /* We simplify a switch statement by trying to determine which case label
6175 will be taken. If we are successful then we return the corresponding
6176 CASE_LABEL_EXPR. */
6177 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6179 tree op = gimple_switch_index (switch_stmt);
6180 if (TREE_CODE (op) != SSA_NAME)
6181 return NULL_TREE;
6183 op = lhs_of_dominating_assert (op, bb, stmt);
6185 const value_range *vr = vr_values->get_value_range (op);
6186 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6187 || symbolic_range_p (vr))
6188 return NULL_TREE;
6190 if (vr->type == VR_RANGE)
6192 size_t i, j;
6193 /* Get the range of labels that contain a part of the operand's
6194 value range. */
6195 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6197 /* Is there only one such label? */
6198 if (i == j)
6200 tree label = gimple_switch_label (switch_stmt, i);
6202 /* The i'th label will be taken only if the value range of the
6203 operand is entirely within the bounds of this label. */
6204 if (CASE_HIGH (label) != NULL_TREE
6205 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6206 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6207 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6208 && tree_int_cst_equal (vr->min, vr->max)))
6209 return label;
6212 /* If there are no such labels then the default label will be
6213 taken. */
6214 if (i > j)
6215 return gimple_switch_label (switch_stmt, 0);
6218 if (vr->type == VR_ANTI_RANGE)
6220 unsigned n = gimple_switch_num_labels (switch_stmt);
6221 tree min_label = gimple_switch_label (switch_stmt, 1);
6222 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6224 /* The default label will be taken only if the anti-range of the
6225 operand is entirely outside the bounds of all the (non-default)
6226 case labels. */
6227 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6228 && (CASE_HIGH (max_label) != NULL_TREE
6229 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6230 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6231 return gimple_switch_label (switch_stmt, 0);
6234 return NULL_TREE;
6237 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6239 tree lhs = gimple_assign_lhs (assign_stmt);
6240 if (TREE_CODE (lhs) == SSA_NAME
6241 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6242 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6243 && stmt_interesting_for_vrp (stmt))
6245 edge dummy_e;
6246 tree dummy_tree;
6247 value_range new_vr = VR_INITIALIZER;
6248 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6249 &dummy_tree, &new_vr);
6250 if (range_int_cst_singleton_p (&new_vr))
6251 return new_vr.min;
6255 return NULL_TREE;
6258 class vrp_dom_walker : public dom_walker
6260 public:
6261 vrp_dom_walker (cdi_direction direction,
6262 class const_and_copies *const_and_copies,
6263 class avail_exprs_stack *avail_exprs_stack)
6264 : dom_walker (direction, REACHABLE_BLOCKS),
6265 m_const_and_copies (const_and_copies),
6266 m_avail_exprs_stack (avail_exprs_stack),
6267 m_dummy_cond (NULL) {}
6269 virtual edge before_dom_children (basic_block);
6270 virtual void after_dom_children (basic_block);
6272 class vr_values *vr_values;
6274 private:
6275 class const_and_copies *m_const_and_copies;
6276 class avail_exprs_stack *m_avail_exprs_stack;
6278 gcond *m_dummy_cond;
6282 /* Called before processing dominator children of BB. We want to look
6283 at ASSERT_EXPRs and record information from them in the appropriate
6284 tables.
6286 We could look at other statements here. It's not seen as likely
6287 to significantly increase the jump threads we discover. */
6289 edge
6290 vrp_dom_walker::before_dom_children (basic_block bb)
6292 gimple_stmt_iterator gsi;
6294 m_avail_exprs_stack->push_marker ();
6295 m_const_and_copies->push_marker ();
6296 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6298 gimple *stmt = gsi_stmt (gsi);
6299 if (gimple_assign_single_p (stmt)
6300 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6302 tree rhs1 = gimple_assign_rhs1 (stmt);
6303 tree cond = TREE_OPERAND (rhs1, 1);
6304 tree inverted = invert_truthvalue (cond);
6305 vec<cond_equivalence> p;
6306 p.create (3);
6307 record_conditions (&p, cond, inverted);
6308 for (unsigned int i = 0; i < p.length (); i++)
6309 m_avail_exprs_stack->record_cond (&p[i]);
6311 tree lhs = gimple_assign_lhs (stmt);
6312 m_const_and_copies->record_const_or_copy (lhs,
6313 TREE_OPERAND (rhs1, 0));
6314 p.release ();
6315 continue;
6317 break;
6319 return NULL;
6322 /* Called after processing dominator children of BB. This is where we
6323 actually call into the threader. */
6324 void
6325 vrp_dom_walker::after_dom_children (basic_block bb)
6327 if (!m_dummy_cond)
6328 m_dummy_cond = gimple_build_cond (NE_EXPR,
6329 integer_zero_node, integer_zero_node,
6330 NULL, NULL);
6332 x_vr_values = vr_values;
6333 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6334 m_avail_exprs_stack, NULL,
6335 simplify_stmt_for_jump_threading);
6336 x_vr_values = NULL;
6338 m_avail_exprs_stack->pop_to_marker ();
6339 m_const_and_copies->pop_to_marker ();
6342 /* Blocks which have more than one predecessor and more than
6343 one successor present jump threading opportunities, i.e.,
6344 when the block is reached from a specific predecessor, we
6345 may be able to determine which of the outgoing edges will
6346 be traversed. When this optimization applies, we are able
6347 to avoid conditionals at runtime and we may expose secondary
6348 optimization opportunities.
6350 This routine is effectively a driver for the generic jump
6351 threading code. It basically just presents the generic code
6352 with edges that may be suitable for jump threading.
6354 Unlike DOM, we do not iterate VRP if jump threading was successful.
6355 While iterating may expose new opportunities for VRP, it is expected
6356 those opportunities would be very limited and the compile time cost
6357 to expose those opportunities would be significant.
6359 As jump threading opportunities are discovered, they are registered
6360 for later realization. */
6362 static void
6363 identify_jump_threads (class vr_values *vr_values)
6365 /* Ugh. When substituting values earlier in this pass we can
6366 wipe the dominance information. So rebuild the dominator
6367 information as we need it within the jump threading code. */
6368 calculate_dominance_info (CDI_DOMINATORS);
6370 /* We do not allow VRP information to be used for jump threading
6371 across a back edge in the CFG. Otherwise it becomes too
6372 difficult to avoid eliminating loop exit tests. Of course
6373 EDGE_DFS_BACK is not accurate at this time so we have to
6374 recompute it. */
6375 mark_dfs_back_edges ();
6377 /* Allocate our unwinder stack to unwind any temporary equivalences
6378 that might be recorded. */
6379 const_and_copies *equiv_stack = new const_and_copies ();
6381 hash_table<expr_elt_hasher> *avail_exprs
6382 = new hash_table<expr_elt_hasher> (1024);
6383 avail_exprs_stack *avail_exprs_stack
6384 = new class avail_exprs_stack (avail_exprs);
6386 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6387 walker.vr_values = vr_values;
6388 walker.walk (cfun->cfg->x_entry_block_ptr);
6390 /* We do not actually update the CFG or SSA graphs at this point as
6391 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6392 handle ASSERT_EXPRs gracefully. */
6393 delete equiv_stack;
6394 delete avail_exprs;
6395 delete avail_exprs_stack;
6398 /* Traverse all the blocks folding conditionals with known ranges. */
6400 void
6401 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6403 size_t i;
6405 /* We have completed propagating through the lattice. */
6406 vr_values.set_lattice_propagation_complete ();
6408 if (dump_file)
6410 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6411 vr_values.dump_all_value_ranges (dump_file);
6412 fprintf (dump_file, "\n");
6415 /* Set value range to non pointer SSA_NAMEs. */
6416 for (i = 0; i < num_ssa_names; i++)
6418 tree name = ssa_name (i);
6419 if (!name)
6420 continue;
6422 const value_range *vr = get_value_range (name);
6423 if (!name
6424 || (vr->type == VR_VARYING)
6425 || (vr->type == VR_UNDEFINED)
6426 || (TREE_CODE (vr->min) != INTEGER_CST)
6427 || (TREE_CODE (vr->max) != INTEGER_CST))
6428 continue;
6430 if (POINTER_TYPE_P (TREE_TYPE (name))
6431 && range_includes_zero_p (vr) == 0)
6432 set_ptr_nonnull (name);
6433 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6434 set_range_info (name, vr->type,
6435 wi::to_wide (vr->min),
6436 wi::to_wide (vr->max));
6439 /* If we're checking array refs, we want to merge information on
6440 the executability of each edge between vrp_folder and the
6441 check_array_bounds_dom_walker: each can clear the
6442 EDGE_EXECUTABLE flag on edges, in different ways.
6444 Hence, if we're going to call check_all_array_refs, set
6445 the flag on every edge now, rather than in
6446 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6447 it from some edges. */
6448 if (warn_array_bounds && warn_array_bounds_p)
6449 set_all_edges_as_executable (cfun);
6451 class vrp_folder vrp_folder;
6452 vrp_folder.vr_values = &vr_values;
6453 vrp_folder.substitute_and_fold ();
6455 if (warn_array_bounds && warn_array_bounds_p)
6456 check_all_array_refs ();
6459 /* Main entry point to VRP (Value Range Propagation). This pass is
6460 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6461 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6462 Programming Language Design and Implementation, pp. 67-78, 1995.
6463 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6465 This is essentially an SSA-CCP pass modified to deal with ranges
6466 instead of constants.
6468 While propagating ranges, we may find that two or more SSA name
6469 have equivalent, though distinct ranges. For instance,
6471 1 x_9 = p_3->a;
6472 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6473 3 if (p_4 == q_2)
6474 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6475 5 endif
6476 6 if (q_2)
6478 In the code above, pointer p_5 has range [q_2, q_2], but from the
6479 code we can also determine that p_5 cannot be NULL and, if q_2 had
6480 a non-varying range, p_5's range should also be compatible with it.
6482 These equivalences are created by two expressions: ASSERT_EXPR and
6483 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6484 result of another assertion, then we can use the fact that p_5 and
6485 p_4 are equivalent when evaluating p_5's range.
6487 Together with value ranges, we also propagate these equivalences
6488 between names so that we can take advantage of information from
6489 multiple ranges when doing final replacement. Note that this
6490 equivalency relation is transitive but not symmetric.
6492 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6493 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6494 in contexts where that assertion does not hold (e.g., in line 6).
6496 TODO, the main difference between this pass and Patterson's is that
6497 we do not propagate edge probabilities. We only compute whether
6498 edges can be taken or not. That is, instead of having a spectrum
6499 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6500 DON'T KNOW. In the future, it may be worthwhile to propagate
6501 probabilities to aid branch prediction. */
6503 static unsigned int
6504 execute_vrp (bool warn_array_bounds_p)
6507 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6508 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6509 scev_initialize ();
6511 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6512 Inserting assertions may split edges which will invalidate
6513 EDGE_DFS_BACK. */
6514 insert_range_assertions ();
6516 threadedge_initialize_values ();
6518 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6519 mark_dfs_back_edges ();
6521 class vrp_prop vrp_prop;
6522 vrp_prop.vrp_initialize ();
6523 vrp_prop.ssa_propagate ();
6524 vrp_prop.vrp_finalize (warn_array_bounds_p);
6526 /* We must identify jump threading opportunities before we release
6527 the datastructures built by VRP. */
6528 identify_jump_threads (&vrp_prop.vr_values);
6530 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6531 was set by a type conversion can often be rewritten to use the
6532 RHS of the type conversion.
6534 However, doing so inhibits jump threading through the comparison.
6535 So that transformation is not performed until after jump threading
6536 is complete. */
6537 basic_block bb;
6538 FOR_EACH_BB_FN (bb, cfun)
6540 gimple *last = last_stmt (bb);
6541 if (last && gimple_code (last) == GIMPLE_COND)
6542 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6545 free_numbers_of_iterations_estimates (cfun);
6547 /* ASSERT_EXPRs must be removed before finalizing jump threads
6548 as finalizing jump threads calls the CFG cleanup code which
6549 does not properly handle ASSERT_EXPRs. */
6550 remove_range_assertions ();
6552 /* If we exposed any new variables, go ahead and put them into
6553 SSA form now, before we handle jump threading. This simplifies
6554 interactions between rewriting of _DECL nodes into SSA form
6555 and rewriting SSA_NAME nodes into SSA form after block
6556 duplication and CFG manipulation. */
6557 update_ssa (TODO_update_ssa);
6559 /* We identified all the jump threading opportunities earlier, but could
6560 not transform the CFG at that time. This routine transforms the
6561 CFG and arranges for the dominator tree to be rebuilt if necessary.
6563 Note the SSA graph update will occur during the normal TODO
6564 processing by the pass manager. */
6565 thread_through_all_blocks (false);
6567 vrp_prop.vr_values.cleanup_edges_and_switches ();
6568 threadedge_finalize_values ();
6570 scev_finalize ();
6571 loop_optimizer_finalize ();
6572 return 0;
6575 namespace {
6577 const pass_data pass_data_vrp =
6579 GIMPLE_PASS, /* type */
6580 "vrp", /* name */
6581 OPTGROUP_NONE, /* optinfo_flags */
6582 TV_TREE_VRP, /* tv_id */
6583 PROP_ssa, /* properties_required */
6584 0, /* properties_provided */
6585 0, /* properties_destroyed */
6586 0, /* todo_flags_start */
6587 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6590 class pass_vrp : public gimple_opt_pass
6592 public:
6593 pass_vrp (gcc::context *ctxt)
6594 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6597 /* opt_pass methods: */
6598 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6599 void set_pass_param (unsigned int n, bool param)
6601 gcc_assert (n == 0);
6602 warn_array_bounds_p = param;
6604 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6605 virtual unsigned int execute (function *)
6606 { return execute_vrp (warn_array_bounds_p); }
6608 private:
6609 bool warn_array_bounds_p;
6610 }; // class pass_vrp
6612 } // anon namespace
6614 gimple_opt_pass *
6615 make_pass_vrp (gcc::context *ctxt)
6617 return new pass_vrp (ctxt);
6621 /* Worker for determine_value_range. */
6623 static void
6624 determine_value_range_1 (value_range *vr, tree expr)
6626 if (BINARY_CLASS_P (expr))
6628 value_range vr0 = VR_INITIALIZER, vr1 = VR_INITIALIZER;
6629 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6630 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6631 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr),
6632 &vr0, &vr1);
6634 else if (UNARY_CLASS_P (expr))
6636 value_range vr0 = VR_INITIALIZER;
6637 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6638 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6639 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6641 else if (TREE_CODE (expr) == INTEGER_CST)
6642 set_value_range_to_value (vr, expr, NULL);
6643 else
6645 value_range_type kind;
6646 wide_int min, max;
6647 /* For SSA names try to extract range info computed by VRP. Otherwise
6648 fall back to varying. */
6649 if (TREE_CODE (expr) == SSA_NAME
6650 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6651 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6652 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min),
6653 wide_int_to_tree (TREE_TYPE (expr), max), NULL);
6654 else
6655 set_value_range_to_varying (vr);
6659 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6660 the determined range type. */
6662 value_range_type
6663 determine_value_range (tree expr, wide_int *min, wide_int *max)
6665 value_range vr = VR_INITIALIZER;
6666 determine_value_range_1 (&vr, expr);
6667 if ((vr.type == VR_RANGE
6668 || vr.type == VR_ANTI_RANGE)
6669 && !symbolic_range_p (&vr))
6671 *min = wi::to_wide (vr.min);
6672 *max = wi::to_wide (vr.max);
6673 return vr.type;
6676 return VR_VARYING;