c++: new-expression is potentially constant in C++20
[official-gcc.git] / gcc / tree-vrp.cc
blob74277617b66919d1f14c353920abc0bea4a6c8d2
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "basic-block.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "options.h"
28 #include "dominance.h"
29 #include "function.h"
30 #include "cfg.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
46 #include "domwalk.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 /* Set of SSA names found live during the RPO traversal of the function
55 for still active basic-blocks. */
56 class live_names
58 public:
59 live_names ();
60 ~live_names ();
61 void set (tree, basic_block);
62 void clear (tree, basic_block);
63 void merge (basic_block dest, basic_block src);
64 bool live_on_block_p (tree, basic_block);
65 bool live_on_edge_p (tree, edge);
66 bool block_has_live_names_p (basic_block);
67 void clear_block (basic_block);
69 private:
70 sbitmap *live;
71 unsigned num_blocks;
72 void init_bitmap_if_needed (basic_block);
75 void
76 live_names::init_bitmap_if_needed (basic_block bb)
78 unsigned i = bb->index;
79 if (!live[i])
81 live[i] = sbitmap_alloc (num_ssa_names);
82 bitmap_clear (live[i]);
86 bool
87 live_names::block_has_live_names_p (basic_block bb)
89 unsigned i = bb->index;
90 return live[i] && bitmap_empty_p (live[i]);
93 void
94 live_names::clear_block (basic_block bb)
96 unsigned i = bb->index;
97 if (live[i])
99 sbitmap_free (live[i]);
100 live[i] = NULL;
104 void
105 live_names::merge (basic_block dest, basic_block src)
107 init_bitmap_if_needed (dest);
108 init_bitmap_if_needed (src);
109 bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
112 void
113 live_names::set (tree name, basic_block bb)
115 init_bitmap_if_needed (bb);
116 bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
119 void
120 live_names::clear (tree name, basic_block bb)
122 unsigned i = bb->index;
123 if (live[i])
124 bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
127 live_names::live_names ()
129 num_blocks = last_basic_block_for_fn (cfun);
130 live = XCNEWVEC (sbitmap, num_blocks);
133 live_names::~live_names ()
135 for (unsigned i = 0; i < num_blocks; ++i)
136 if (live[i])
137 sbitmap_free (live[i]);
138 XDELETEVEC (live);
141 bool
142 live_names::live_on_block_p (tree name, basic_block bb)
144 return (live[bb->index]
145 && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
148 /* Return true if the SSA name NAME is live on the edge E. */
150 bool
151 live_names::live_on_edge_p (tree name, edge e)
153 return live_on_block_p (name, e->dest);
157 /* VR_TYPE describes a range with mininum value *MIN and maximum
158 value *MAX. Restrict the range to the set of values that have
159 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
160 return the new range type.
162 SGN gives the sign of the values described by the range. */
164 enum value_range_kind
165 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
166 wide_int *min, wide_int *max,
167 const wide_int &nonzero_bits,
168 signop sgn)
170 if (vr_type == VR_ANTI_RANGE)
172 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
173 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
174 to create an inclusive upper bound for A and an inclusive lower
175 bound for B. */
176 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
177 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
179 /* If the calculation of A_MAX wrapped, A is effectively empty
180 and A_MAX is the highest value that satisfies NONZERO_BITS.
181 Likewise if the calculation of B_MIN wrapped, B is effectively
182 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
183 bool a_empty = wi::ge_p (a_max, *min, sgn);
184 bool b_empty = wi::le_p (b_min, *max, sgn);
186 /* If both A and B are empty, there are no valid values. */
187 if (a_empty && b_empty)
188 return VR_UNDEFINED;
190 /* If exactly one of A or B is empty, return a VR_RANGE for the
191 other one. */
192 if (a_empty || b_empty)
194 *min = b_min;
195 *max = a_max;
196 gcc_checking_assert (wi::le_p (*min, *max, sgn));
197 return VR_RANGE;
200 /* Update the VR_ANTI_RANGE bounds. */
201 *min = a_max + 1;
202 *max = b_min - 1;
203 gcc_checking_assert (wi::le_p (*min, *max, sgn));
205 /* Now check whether the excluded range includes any values that
206 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
207 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
209 unsigned int precision = min->get_precision ();
210 *min = wi::min_value (precision, sgn);
211 *max = wi::max_value (precision, sgn);
212 vr_type = VR_RANGE;
215 if (vr_type == VR_RANGE || vr_type == VR_VARYING)
217 *max = wi::round_down_for_mask (*max, nonzero_bits);
219 /* Check that the range contains at least one valid value. */
220 if (wi::gt_p (*min, *max, sgn))
221 return VR_UNDEFINED;
223 *min = wi::round_up_for_mask (*min, nonzero_bits);
224 gcc_checking_assert (wi::le_p (*min, *max, sgn));
226 return vr_type;
229 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
230 a singleton. */
232 bool
233 range_int_cst_p (const value_range *vr)
235 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
238 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
239 otherwise. We only handle additive operations and set NEG to true if the
240 symbol is negated and INV to the invariant part, if any. */
242 tree
243 get_single_symbol (tree t, bool *neg, tree *inv)
245 bool neg_;
246 tree inv_;
248 *inv = NULL_TREE;
249 *neg = false;
251 if (TREE_CODE (t) == PLUS_EXPR
252 || TREE_CODE (t) == POINTER_PLUS_EXPR
253 || TREE_CODE (t) == MINUS_EXPR)
255 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
257 neg_ = (TREE_CODE (t) == MINUS_EXPR);
258 inv_ = TREE_OPERAND (t, 0);
259 t = TREE_OPERAND (t, 1);
261 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
263 neg_ = false;
264 inv_ = TREE_OPERAND (t, 1);
265 t = TREE_OPERAND (t, 0);
267 else
268 return NULL_TREE;
270 else
272 neg_ = false;
273 inv_ = NULL_TREE;
276 if (TREE_CODE (t) == NEGATE_EXPR)
278 t = TREE_OPERAND (t, 0);
279 neg_ = !neg_;
282 if (TREE_CODE (t) != SSA_NAME)
283 return NULL_TREE;
285 if (inv_ && TREE_OVERFLOW_P (inv_))
286 inv_ = drop_tree_overflow (inv_);
288 *neg = neg_;
289 *inv = inv_;
290 return t;
293 /* The reverse operation: build a symbolic expression with TYPE
294 from symbol SYM, negated according to NEG, and invariant INV. */
296 static tree
297 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
299 const bool pointer_p = POINTER_TYPE_P (type);
300 tree t = sym;
302 if (neg)
303 t = build1 (NEGATE_EXPR, type, t);
305 if (integer_zerop (inv))
306 return t;
308 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
311 /* Return
312 1 if VAL < VAL2
313 0 if !(VAL < VAL2)
314 -2 if those are incomparable. */
316 operand_less_p (tree val, tree val2)
318 /* LT is folded faster than GE and others. Inline the common case. */
319 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
320 return tree_int_cst_lt (val, val2);
321 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
322 return val == val2 ? 0 : -2;
323 else
325 int cmp = compare_values (val, val2);
326 if (cmp == -1)
327 return 1;
328 else if (cmp == 0 || cmp == 1)
329 return 0;
330 else
331 return -2;
335 /* Compare two values VAL1 and VAL2. Return
337 -2 if VAL1 and VAL2 cannot be compared at compile-time,
338 -1 if VAL1 < VAL2,
339 0 if VAL1 == VAL2,
340 +1 if VAL1 > VAL2, and
341 +2 if VAL1 != VAL2
343 This is similar to tree_int_cst_compare but supports pointer values
344 and values that cannot be compared at compile time.
346 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
347 true if the return value is only valid if we assume that signed
348 overflow is undefined. */
351 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
353 if (val1 == val2)
354 return 0;
356 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
357 both integers. */
358 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
359 == POINTER_TYPE_P (TREE_TYPE (val2)));
361 /* Convert the two values into the same type. This is needed because
362 sizetype causes sign extension even for unsigned types. */
363 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
364 val2 = fold_convert (TREE_TYPE (val1), val2);
366 const bool overflow_undefined
367 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
368 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
369 tree inv1, inv2;
370 bool neg1, neg2;
371 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
372 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
374 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
375 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
376 if (sym1 && sym2)
378 /* Both values must use the same name with the same sign. */
379 if (sym1 != sym2 || neg1 != neg2)
380 return -2;
382 /* [-]NAME + CST == [-]NAME + CST. */
383 if (inv1 == inv2)
384 return 0;
386 /* If overflow is defined we cannot simplify more. */
387 if (!overflow_undefined)
388 return -2;
390 if (strict_overflow_p != NULL
391 /* Symbolic range building sets the no-warning bit to declare
392 that overflow doesn't happen. */
393 && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
394 && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
395 *strict_overflow_p = true;
397 if (!inv1)
398 inv1 = build_int_cst (TREE_TYPE (val1), 0);
399 if (!inv2)
400 inv2 = build_int_cst (TREE_TYPE (val2), 0);
402 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
403 TYPE_SIGN (TREE_TYPE (val1)));
406 const bool cst1 = is_gimple_min_invariant (val1);
407 const bool cst2 = is_gimple_min_invariant (val2);
409 /* If one is of the form '[-]NAME + CST' and the other is constant, then
410 it might be possible to say something depending on the constants. */
411 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
413 if (!overflow_undefined)
414 return -2;
416 if (strict_overflow_p != NULL
417 /* Symbolic range building sets the no-warning bit to declare
418 that overflow doesn't happen. */
419 && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
420 && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
421 *strict_overflow_p = true;
423 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
424 tree cst = cst1 ? val1 : val2;
425 tree inv = cst1 ? inv2 : inv1;
427 /* Compute the difference between the constants. If it overflows or
428 underflows, this means that we can trivially compare the NAME with
429 it and, consequently, the two values with each other. */
430 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
431 if (wi::cmp (0, wi::to_wide (inv), sgn)
432 != wi::cmp (diff, wi::to_wide (cst), sgn))
434 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
435 return cst1 ? res : -res;
438 return -2;
441 /* We cannot say anything more for non-constants. */
442 if (!cst1 || !cst2)
443 return -2;
445 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
447 /* We cannot compare overflowed values. */
448 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
449 return -2;
451 if (TREE_CODE (val1) == INTEGER_CST
452 && TREE_CODE (val2) == INTEGER_CST)
453 return tree_int_cst_compare (val1, val2);
455 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
457 if (known_eq (wi::to_poly_widest (val1),
458 wi::to_poly_widest (val2)))
459 return 0;
460 if (known_lt (wi::to_poly_widest (val1),
461 wi::to_poly_widest (val2)))
462 return -1;
463 if (known_gt (wi::to_poly_widest (val1),
464 wi::to_poly_widest (val2)))
465 return 1;
468 return -2;
470 else
472 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
474 /* We cannot compare overflowed values. */
475 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
476 return -2;
478 return tree_int_cst_compare (val1, val2);
481 /* First see if VAL1 and VAL2 are not the same. */
482 if (operand_equal_p (val1, val2, 0))
483 return 0;
485 fold_defer_overflow_warnings ();
487 /* If VAL1 is a lower address than VAL2, return -1. */
488 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
489 if (t && integer_onep (t))
491 fold_undefer_and_ignore_overflow_warnings ();
492 return -1;
495 /* If VAL1 is a higher address than VAL2, return +1. */
496 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
497 if (t && integer_onep (t))
499 fold_undefer_and_ignore_overflow_warnings ();
500 return 1;
503 /* If VAL1 is different than VAL2, return +2. */
504 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
505 fold_undefer_and_ignore_overflow_warnings ();
506 if (t && integer_onep (t))
507 return 2;
509 return -2;
513 /* Compare values like compare_values_warnv. */
516 compare_values (tree val1, tree val2)
518 bool sop;
519 return compare_values_warnv (val1, val2, &sop);
522 /* If BOUND will include a symbolic bound, adjust it accordingly,
523 otherwise leave it as is.
525 CODE is the original operation that combined the bounds (PLUS_EXPR
526 or MINUS_EXPR).
528 TYPE is the type of the original operation.
530 SYM_OPn is the symbolic for OPn if it has a symbolic.
532 NEG_OPn is TRUE if the OPn was negated. */
534 static void
535 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
536 tree sym_op0, tree sym_op1,
537 bool neg_op0, bool neg_op1)
539 bool minus_p = (code == MINUS_EXPR);
540 /* If the result bound is constant, we're done; otherwise, build the
541 symbolic lower bound. */
542 if (sym_op0 == sym_op1)
544 else if (sym_op0)
545 bound = build_symbolic_expr (type, sym_op0,
546 neg_op0, bound);
547 else if (sym_op1)
549 /* We may not negate if that might introduce
550 undefined overflow. */
551 if (!minus_p
552 || neg_op1
553 || TYPE_OVERFLOW_WRAPS (type))
554 bound = build_symbolic_expr (type, sym_op1,
555 neg_op1 ^ minus_p, bound);
556 else
557 bound = NULL_TREE;
561 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
562 int bound according to CODE. CODE is the operation combining the
563 bound (either a PLUS_EXPR or a MINUS_EXPR).
565 TYPE is the type of the combine operation.
567 WI is the wide int to store the result.
569 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
570 if over/underflow occurred. */
572 static void
573 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
574 tree type, tree op0, tree op1)
576 bool minus_p = (code == MINUS_EXPR);
577 const signop sgn = TYPE_SIGN (type);
578 const unsigned int prec = TYPE_PRECISION (type);
580 /* Combine the bounds, if any. */
581 if (op0 && op1)
583 if (minus_p)
584 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
585 else
586 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
588 else if (op0)
589 wi = wi::to_wide (op0);
590 else if (op1)
592 if (minus_p)
593 wi = wi::neg (wi::to_wide (op1), &ovf);
594 else
595 wi = wi::to_wide (op1);
597 else
598 wi = wi::shwi (0, prec);
601 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
602 put the result in VR.
604 TYPE is the type of the range.
606 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
607 occurred while originally calculating WMIN or WMAX. -1 indicates
608 underflow. +1 indicates overflow. 0 indicates neither. */
610 static void
611 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
612 tree type,
613 const wide_int &wmin, const wide_int &wmax,
614 wi::overflow_type min_ovf,
615 wi::overflow_type max_ovf)
617 const signop sgn = TYPE_SIGN (type);
618 const unsigned int prec = TYPE_PRECISION (type);
620 /* For one bit precision if max < min, then the swapped
621 range covers all values. */
622 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
624 kind = VR_VARYING;
625 return;
628 if (TYPE_OVERFLOW_WRAPS (type))
630 /* If overflow wraps, truncate the values and adjust the
631 range kind and bounds appropriately. */
632 wide_int tmin = wide_int::from (wmin, prec, sgn);
633 wide_int tmax = wide_int::from (wmax, prec, sgn);
634 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
636 /* If the limits are swapped, we wrapped around and cover
637 the entire range. */
638 if (wi::gt_p (tmin, tmax, sgn))
639 kind = VR_VARYING;
640 else
642 kind = VR_RANGE;
643 /* No overflow or both overflow or underflow. The
644 range kind stays VR_RANGE. */
645 min = wide_int_to_tree (type, tmin);
646 max = wide_int_to_tree (type, tmax);
648 return;
650 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
651 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
653 /* Min underflow or max overflow. The range kind
654 changes to VR_ANTI_RANGE. */
655 bool covers = false;
656 wide_int tem = tmin;
657 tmin = tmax + 1;
658 if (wi::cmp (tmin, tmax, sgn) < 0)
659 covers = true;
660 tmax = tem - 1;
661 if (wi::cmp (tmax, tem, sgn) > 0)
662 covers = true;
663 /* If the anti-range would cover nothing, drop to varying.
664 Likewise if the anti-range bounds are outside of the
665 types values. */
666 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
668 kind = VR_VARYING;
669 return;
671 kind = VR_ANTI_RANGE;
672 min = wide_int_to_tree (type, tmin);
673 max = wide_int_to_tree (type, tmax);
674 return;
676 else
678 /* Other underflow and/or overflow, drop to VR_VARYING. */
679 kind = VR_VARYING;
680 return;
683 else
685 /* If overflow does not wrap, saturate to the types min/max
686 value. */
687 wide_int type_min = wi::min_value (prec, sgn);
688 wide_int type_max = wi::max_value (prec, sgn);
689 kind = VR_RANGE;
690 if (min_ovf == wi::OVF_UNDERFLOW)
691 min = wide_int_to_tree (type, type_min);
692 else if (min_ovf == wi::OVF_OVERFLOW)
693 min = wide_int_to_tree (type, type_max);
694 else
695 min = wide_int_to_tree (type, wmin);
697 if (max_ovf == wi::OVF_UNDERFLOW)
698 max = wide_int_to_tree (type, type_min);
699 else if (max_ovf == wi::OVF_OVERFLOW)
700 max = wide_int_to_tree (type, type_max);
701 else
702 max = wide_int_to_tree (type, wmax);
706 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
708 static void
709 extract_range_from_pointer_plus_expr (value_range *vr,
710 enum tree_code code,
711 tree expr_type,
712 const value_range *vr0,
713 const value_range *vr1)
715 gcc_checking_assert (POINTER_TYPE_P (expr_type)
716 && code == POINTER_PLUS_EXPR);
717 /* For pointer types, we are really only interested in asserting
718 whether the expression evaluates to non-NULL.
719 With -fno-delete-null-pointer-checks we need to be more
720 conservative. As some object might reside at address 0,
721 then some offset could be added to it and the same offset
722 subtracted again and the result would be NULL.
723 E.g.
724 static int a[12]; where &a[0] is NULL and
725 ptr = &a[6];
726 ptr -= 6;
727 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
728 where the first range doesn't include zero and the second one
729 doesn't either. As the second operand is sizetype (unsigned),
730 consider all ranges where the MSB could be set as possible
731 subtractions where the result might be NULL. */
732 if ((!range_includes_zero_p (vr0)
733 || !range_includes_zero_p (vr1))
734 && !TYPE_OVERFLOW_WRAPS (expr_type)
735 && (flag_delete_null_pointer_checks
736 || (range_int_cst_p (vr1)
737 && !tree_int_cst_sign_bit (vr1->max ()))))
738 vr->set_nonzero (expr_type);
739 else if (vr0->zero_p () && vr1->zero_p ())
740 vr->set_zero (expr_type);
741 else
742 vr->set_varying (expr_type);
745 /* Extract range information from a PLUS/MINUS_EXPR and store the
746 result in *VR. */
748 static void
749 extract_range_from_plus_minus_expr (value_range *vr,
750 enum tree_code code,
751 tree expr_type,
752 const value_range *vr0_,
753 const value_range *vr1_)
755 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
757 value_range vr0 = *vr0_, vr1 = *vr1_;
758 value_range vrtem0, vrtem1;
760 /* Now canonicalize anti-ranges to ranges when they are not symbolic
761 and express ~[] op X as ([]' op X) U ([]'' op X). */
762 if (vr0.kind () == VR_ANTI_RANGE
763 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
765 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
766 if (!vrtem1.undefined_p ())
768 value_range vrres;
769 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
770 &vrtem1, vr1_);
771 vr->union_ (vrres);
773 return;
775 /* Likewise for X op ~[]. */
776 if (vr1.kind () == VR_ANTI_RANGE
777 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
779 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
780 if (!vrtem1.undefined_p ())
782 value_range vrres;
783 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
784 vr0_, &vrtem1);
785 vr->union_ (vrres);
787 return;
790 value_range_kind kind;
791 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
792 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
793 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
794 tree min = NULL_TREE, max = NULL_TREE;
796 /* This will normalize things such that calculating
797 [0,0] - VR_VARYING is not dropped to varying, but is
798 calculated as [MIN+1, MAX]. */
799 if (vr0.varying_p ())
801 vr0_kind = VR_RANGE;
802 vr0_min = vrp_val_min (expr_type);
803 vr0_max = vrp_val_max (expr_type);
805 if (vr1.varying_p ())
807 vr1_kind = VR_RANGE;
808 vr1_min = vrp_val_min (expr_type);
809 vr1_max = vrp_val_max (expr_type);
812 const bool minus_p = (code == MINUS_EXPR);
813 tree min_op0 = vr0_min;
814 tree min_op1 = minus_p ? vr1_max : vr1_min;
815 tree max_op0 = vr0_max;
816 tree max_op1 = minus_p ? vr1_min : vr1_max;
817 tree sym_min_op0 = NULL_TREE;
818 tree sym_min_op1 = NULL_TREE;
819 tree sym_max_op0 = NULL_TREE;
820 tree sym_max_op1 = NULL_TREE;
821 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
823 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
825 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
826 single-symbolic ranges, try to compute the precise resulting range,
827 but only if we know that this resulting range will also be constant
828 or single-symbolic. */
829 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
830 && (TREE_CODE (min_op0) == INTEGER_CST
831 || (sym_min_op0
832 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
833 && (TREE_CODE (min_op1) == INTEGER_CST
834 || (sym_min_op1
835 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
836 && (!(sym_min_op0 && sym_min_op1)
837 || (sym_min_op0 == sym_min_op1
838 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
839 && (TREE_CODE (max_op0) == INTEGER_CST
840 || (sym_max_op0
841 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
842 && (TREE_CODE (max_op1) == INTEGER_CST
843 || (sym_max_op1
844 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
845 && (!(sym_max_op0 && sym_max_op1)
846 || (sym_max_op0 == sym_max_op1
847 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
849 wide_int wmin, wmax;
850 wi::overflow_type min_ovf = wi::OVF_NONE;
851 wi::overflow_type max_ovf = wi::OVF_NONE;
853 /* Build the bounds. */
854 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
855 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
857 /* If the resulting range will be symbolic, we need to eliminate any
858 explicit or implicit overflow introduced in the above computation
859 because compare_values could make an incorrect use of it. That's
860 why we require one of the ranges to be a singleton. */
861 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
862 && ((bool)min_ovf || (bool)max_ovf
863 || (min_op0 != max_op0 && min_op1 != max_op1)))
865 vr->set_varying (expr_type);
866 return;
869 /* Adjust the range for possible overflow. */
870 set_value_range_with_overflow (kind, min, max, expr_type,
871 wmin, wmax, min_ovf, max_ovf);
872 if (kind == VR_VARYING)
874 vr->set_varying (expr_type);
875 return;
878 /* Build the symbolic bounds if needed. */
879 adjust_symbolic_bound (min, code, expr_type,
880 sym_min_op0, sym_min_op1,
881 neg_min_op0, neg_min_op1);
882 adjust_symbolic_bound (max, code, expr_type,
883 sym_max_op0, sym_max_op1,
884 neg_max_op0, neg_max_op1);
886 else
888 /* For other cases, for example if we have a PLUS_EXPR with two
889 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
890 to compute a precise range for such a case.
891 ??? General even mixed range kind operations can be expressed
892 by for example transforming ~[3, 5] + [1, 2] to range-only
893 operations and a union primitive:
894 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
895 [-INF+1, 4] U [6, +INF(OVF)]
896 though usually the union is not exactly representable with
897 a single range or anti-range as the above is
898 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
899 but one could use a scheme similar to equivalences for this. */
900 vr->set_varying (expr_type);
901 return;
904 /* If either MIN or MAX overflowed, then set the resulting range to
905 VARYING. */
906 if (min == NULL_TREE
907 || TREE_OVERFLOW_P (min)
908 || max == NULL_TREE
909 || TREE_OVERFLOW_P (max))
911 vr->set_varying (expr_type);
912 return;
915 int cmp = compare_values (min, max);
916 if (cmp == -2 || cmp == 1)
918 /* If the new range has its limits swapped around (MIN > MAX),
919 then the operation caused one of them to wrap around, mark
920 the new range VARYING. */
921 vr->set_varying (expr_type);
923 else
924 vr->set (min, max, kind);
927 /* If the types passed are supported, return TRUE, otherwise set VR to
928 VARYING and return FALSE. */
930 static bool
931 supported_types_p (value_range *vr,
932 tree type0,
933 tree type1 = NULL)
935 if (!value_range::supports_type_p (type0)
936 || (type1 && !value_range::supports_type_p (type1)))
938 vr->set_varying (type0);
939 return false;
941 return true;
944 /* If any of the ranges passed are defined, return TRUE, otherwise set
945 VR to UNDEFINED and return FALSE. */
947 static bool
948 defined_ranges_p (value_range *vr,
949 const value_range *vr0, const value_range *vr1 = NULL)
951 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
953 vr->set_undefined ();
954 return false;
956 return true;
959 static value_range
960 drop_undefines_to_varying (const value_range *vr, tree expr_type)
962 if (vr->undefined_p ())
963 return value_range (expr_type);
964 else
965 return *vr;
968 /* If any operand is symbolic, perform a binary operation on them and
969 return TRUE, otherwise return FALSE. */
971 static bool
972 range_fold_binary_symbolics_p (value_range *vr,
973 tree_code code,
974 tree expr_type,
975 const value_range *vr0_,
976 const value_range *vr1_)
978 if (vr0_->symbolic_p () || vr1_->symbolic_p ())
980 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
981 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
982 if ((code == PLUS_EXPR || code == MINUS_EXPR))
984 extract_range_from_plus_minus_expr (vr, code, expr_type,
985 &vr0, &vr1);
986 return true;
988 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
990 extract_range_from_pointer_plus_expr (vr, code, expr_type,
991 &vr0, &vr1);
992 return true;
994 range_op_handler op (code, expr_type);
995 if (!op)
996 vr->set_varying (expr_type);
997 vr0.normalize_symbolics ();
998 vr1.normalize_symbolics ();
999 return op.fold_range (*vr, expr_type, vr0, vr1);
1001 return false;
1004 /* If operand is symbolic, perform a unary operation on it and return
1005 TRUE, otherwise return FALSE. */
1007 static bool
1008 range_fold_unary_symbolics_p (value_range *vr,
1009 tree_code code,
1010 tree expr_type,
1011 const value_range *vr0)
1013 if (vr0->symbolic_p ())
1015 if (code == NEGATE_EXPR)
1017 /* -X is simply 0 - X. */
1018 value_range zero;
1019 zero.set_zero (vr0->type ());
1020 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1021 return true;
1023 if (code == BIT_NOT_EXPR)
1025 /* ~X is simply -1 - X. */
1026 value_range minusone;
1027 minusone.set (build_int_cst (vr0->type (), -1));
1028 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1029 return true;
1031 range_op_handler op (code, expr_type);
1032 if (!op)
1033 vr->set_varying (expr_type);
1034 value_range vr0_cst (*vr0);
1035 vr0_cst.normalize_symbolics ();
1036 return op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1038 return false;
1041 /* Perform a binary operation on a pair of ranges. */
1043 void
1044 range_fold_binary_expr (value_range *vr,
1045 enum tree_code code,
1046 tree expr_type,
1047 const value_range *vr0_,
1048 const value_range *vr1_)
1050 if (!supported_types_p (vr, expr_type)
1051 || !defined_ranges_p (vr, vr0_, vr1_))
1052 return;
1053 range_op_handler op (code, expr_type);
1054 if (!op)
1056 vr->set_varying (expr_type);
1057 return;
1060 if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1061 return;
1063 value_range vr0 (*vr0_);
1064 value_range vr1 (*vr1_);
1065 if (vr0.undefined_p ())
1066 vr0.set_varying (expr_type);
1067 if (vr1.undefined_p ())
1068 vr1.set_varying (expr_type);
1069 vr0.normalize_addresses ();
1070 vr1.normalize_addresses ();
1071 op.fold_range (*vr, expr_type, vr0, vr1);
1074 /* Perform a unary operation on a range. */
1076 void
1077 range_fold_unary_expr (value_range *vr,
1078 enum tree_code code, tree expr_type,
1079 const value_range *vr0,
1080 tree vr0_type)
1082 if (!supported_types_p (vr, expr_type, vr0_type)
1083 || !defined_ranges_p (vr, vr0))
1084 return;
1085 range_op_handler op (code, expr_type);
1086 if (!op)
1088 vr->set_varying (expr_type);
1089 return;
1092 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1093 return;
1095 value_range vr0_cst (*vr0);
1096 vr0_cst.normalize_addresses ();
1097 op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1100 /* If the range of values taken by OP can be inferred after STMT executes,
1101 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1102 describes the inferred range. Return true if a range could be
1103 inferred. */
1105 bool
1106 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1108 *val_p = NULL_TREE;
1109 *comp_code_p = ERROR_MARK;
1111 /* Do not attempt to infer anything in names that flow through
1112 abnormal edges. */
1113 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1114 return false;
1116 /* If STMT is the last statement of a basic block with no normal
1117 successors, there is no point inferring anything about any of its
1118 operands. We would not be able to find a proper insertion point
1119 for the assertion, anyway. */
1120 if (stmt_ends_bb_p (stmt))
1122 edge_iterator ei;
1123 edge e;
1125 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1126 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1127 break;
1128 if (e == NULL)
1129 return false;
1132 if (infer_nonnull_range (stmt, op))
1134 *val_p = build_int_cst (TREE_TYPE (op), 0);
1135 *comp_code_p = NE_EXPR;
1136 return true;
1139 return false;
1142 /* Dump assert_info structure. */
1144 void
1145 dump_assert_info (FILE *file, const assert_info &assert)
1147 fprintf (file, "Assert for: ");
1148 print_generic_expr (file, assert.name);
1149 fprintf (file, "\n\tPREDICATE: expr=[");
1150 print_generic_expr (file, assert.expr);
1151 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1152 fprintf (file, "val=[");
1153 print_generic_expr (file, assert.val);
1154 fprintf (file, "]\n\n");
1157 DEBUG_FUNCTION void
1158 debug (const assert_info &assert)
1160 dump_assert_info (stderr, assert);
1163 /* Dump a vector of assert_info's. */
1165 void
1166 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1168 for (unsigned i = 0; i < asserts.length (); ++i)
1170 dump_assert_info (file, asserts[i]);
1171 fprintf (file, "\n");
1175 DEBUG_FUNCTION void
1176 debug (const vec<assert_info> &asserts)
1178 dump_asserts_info (stderr, asserts);
1181 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1183 static void
1184 add_assert_info (vec<assert_info> &asserts,
1185 tree name, tree expr, enum tree_code comp_code, tree val)
1187 assert_info info;
1188 info.comp_code = comp_code;
1189 info.name = name;
1190 if (TREE_OVERFLOW_P (val))
1191 val = drop_tree_overflow (val);
1192 info.val = val;
1193 info.expr = expr;
1194 asserts.safe_push (info);
1195 if (dump_enabled_p ())
1196 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1197 "Adding assert for %T from %T %s %T\n",
1198 name, expr, op_symbol_code (comp_code), val);
1201 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1202 Extract a suitable test code and value and store them into *CODE_P and
1203 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1205 If no extraction was possible, return FALSE, otherwise return TRUE.
1207 If INVERT is true, then we invert the result stored into *CODE_P. */
1209 static bool
1210 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1211 tree cond_op0, tree cond_op1,
1212 bool invert, enum tree_code *code_p,
1213 tree *val_p)
1215 enum tree_code comp_code;
1216 tree val;
1218 /* Otherwise, we have a comparison of the form NAME COMP VAL
1219 or VAL COMP NAME. */
1220 if (name == cond_op1)
1222 /* If the predicate is of the form VAL COMP NAME, flip
1223 COMP around because we need to register NAME as the
1224 first operand in the predicate. */
1225 comp_code = swap_tree_comparison (cond_code);
1226 val = cond_op0;
1228 else if (name == cond_op0)
1230 /* The comparison is of the form NAME COMP VAL, so the
1231 comparison code remains unchanged. */
1232 comp_code = cond_code;
1233 val = cond_op1;
1235 else
1236 gcc_unreachable ();
1238 /* Invert the comparison code as necessary. */
1239 if (invert)
1240 comp_code = invert_tree_comparison (comp_code, 0);
1242 /* VRP only handles integral and pointer types. */
1243 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1244 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1245 return false;
1247 /* Do not register always-false predicates.
1248 FIXME: this works around a limitation in fold() when dealing with
1249 enumerations. Given 'enum { N1, N2 } x;', fold will not
1250 fold 'if (x > N2)' to 'if (0)'. */
1251 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1252 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1254 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1255 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1257 if (comp_code == GT_EXPR
1258 && (!max
1259 || compare_values (val, max) == 0))
1260 return false;
1262 if (comp_code == LT_EXPR
1263 && (!min
1264 || compare_values (val, min) == 0))
1265 return false;
1267 *code_p = comp_code;
1268 *val_p = val;
1269 return true;
1272 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1273 (otherwise return VAL). VAL and MASK must be zero-extended for
1274 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1275 (to transform signed values into unsigned) and at the end xor
1276 SGNBIT back. */
1278 wide_int
1279 masked_increment (const wide_int &val_in, const wide_int &mask,
1280 const wide_int &sgnbit, unsigned int prec)
1282 wide_int bit = wi::one (prec), res;
1283 unsigned int i;
1285 wide_int val = val_in ^ sgnbit;
1286 for (i = 0; i < prec; i++, bit += bit)
1288 res = mask;
1289 if ((res & bit) == 0)
1290 continue;
1291 res = bit - 1;
1292 res = wi::bit_and_not (val + bit, res);
1293 res &= mask;
1294 if (wi::gtu_p (res, val))
1295 return res ^ sgnbit;
1297 return val ^ sgnbit;
1300 /* Helper for overflow_comparison_p
1302 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1303 OP1's defining statement to see if it ultimately has the form
1304 OP0 CODE (OP0 PLUS INTEGER_CST)
1306 If so, return TRUE indicating this is an overflow test and store into
1307 *NEW_CST an updated constant that can be used in a narrowed range test.
1309 REVERSED indicates if the comparison was originally:
1311 OP1 CODE' OP0.
1313 This affects how we build the updated constant. */
1315 static bool
1316 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1317 bool follow_assert_exprs, bool reversed, tree *new_cst)
1319 /* See if this is a relational operation between two SSA_NAMES with
1320 unsigned, overflow wrapping values. If so, check it more deeply. */
1321 if ((code == LT_EXPR || code == LE_EXPR
1322 || code == GE_EXPR || code == GT_EXPR)
1323 && TREE_CODE (op0) == SSA_NAME
1324 && TREE_CODE (op1) == SSA_NAME
1325 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1326 && TYPE_UNSIGNED (TREE_TYPE (op0))
1327 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1329 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1331 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1332 if (follow_assert_exprs)
1334 while (gimple_assign_single_p (op1_def)
1335 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1337 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1338 if (TREE_CODE (op1) != SSA_NAME)
1339 break;
1340 op1_def = SSA_NAME_DEF_STMT (op1);
1344 /* Now look at the defining statement of OP1 to see if it adds
1345 or subtracts a nonzero constant from another operand. */
1346 if (op1_def
1347 && is_gimple_assign (op1_def)
1348 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1349 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1350 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1352 tree target = gimple_assign_rhs1 (op1_def);
1354 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1355 for one where TARGET appears on the RHS. */
1356 if (follow_assert_exprs)
1358 /* Now see if that "other operand" is op0, following the chain
1359 of ASSERT_EXPRs if necessary. */
1360 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1361 while (op0 != target
1362 && gimple_assign_single_p (op0_def)
1363 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1365 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1366 if (TREE_CODE (op0) != SSA_NAME)
1367 break;
1368 op0_def = SSA_NAME_DEF_STMT (op0);
1372 /* If we did not find our target SSA_NAME, then this is not
1373 an overflow test. */
1374 if (op0 != target)
1375 return false;
1377 tree type = TREE_TYPE (op0);
1378 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1379 tree inc = gimple_assign_rhs2 (op1_def);
1380 if (reversed)
1381 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1382 else
1383 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1384 return true;
1387 return false;
1390 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1391 OP1's defining statement to see if it ultimately has the form
1392 OP0 CODE (OP0 PLUS INTEGER_CST)
1394 If so, return TRUE indicating this is an overflow test and store into
1395 *NEW_CST an updated constant that can be used in a narrowed range test.
1397 These statements are left as-is in the IL to facilitate discovery of
1398 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1399 the alternate range representation is often useful within VRP. */
1401 bool
1402 overflow_comparison_p (tree_code code, tree name, tree val,
1403 bool use_equiv_p, tree *new_cst)
1405 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1406 return true;
1407 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1408 use_equiv_p, true, new_cst);
1412 /* Try to register an edge assertion for SSA name NAME on edge E for
1413 the condition COND contributing to the conditional jump pointed to by BSI.
1414 Invert the condition COND if INVERT is true. */
1416 static void
1417 register_edge_assert_for_2 (tree name, edge e,
1418 enum tree_code cond_code,
1419 tree cond_op0, tree cond_op1, bool invert,
1420 vec<assert_info> &asserts)
1422 tree val;
1423 enum tree_code comp_code;
1425 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1426 cond_op0,
1427 cond_op1,
1428 invert, &comp_code, &val))
1429 return;
1431 /* Queue the assert. */
1432 tree x;
1433 if (overflow_comparison_p (comp_code, name, val, false, &x))
1435 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1436 ? GT_EXPR : LE_EXPR);
1437 add_assert_info (asserts, name, name, new_code, x);
1439 add_assert_info (asserts, name, name, comp_code, val);
1441 /* In the case of NAME <= CST and NAME being defined as
1442 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1443 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1444 This catches range and anti-range tests. */
1445 if ((comp_code == LE_EXPR
1446 || comp_code == GT_EXPR)
1447 && TREE_CODE (val) == INTEGER_CST
1448 && TYPE_UNSIGNED (TREE_TYPE (val)))
1450 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1451 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1453 /* Extract CST2 from the (optional) addition. */
1454 if (is_gimple_assign (def_stmt)
1455 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1457 name2 = gimple_assign_rhs1 (def_stmt);
1458 cst2 = gimple_assign_rhs2 (def_stmt);
1459 if (TREE_CODE (name2) == SSA_NAME
1460 && TREE_CODE (cst2) == INTEGER_CST)
1461 def_stmt = SSA_NAME_DEF_STMT (name2);
1464 /* Extract NAME2 from the (optional) sign-changing cast. */
1465 if (gassign *ass = dyn_cast <gassign *> (def_stmt))
1467 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
1468 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
1469 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
1470 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
1471 name3 = gimple_assign_rhs1 (ass);
1474 /* If name3 is used later, create an ASSERT_EXPR for it. */
1475 if (name3 != NULL_TREE
1476 && TREE_CODE (name3) == SSA_NAME
1477 && (cst2 == NULL_TREE
1478 || TREE_CODE (cst2) == INTEGER_CST)
1479 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1481 tree tmp;
1483 /* Build an expression for the range test. */
1484 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1485 if (cst2 != NULL_TREE)
1486 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1487 add_assert_info (asserts, name3, tmp, comp_code, val);
1490 /* If name2 is used later, create an ASSERT_EXPR for it. */
1491 if (name2 != NULL_TREE
1492 && TREE_CODE (name2) == SSA_NAME
1493 && TREE_CODE (cst2) == INTEGER_CST
1494 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1496 tree tmp;
1498 /* Build an expression for the range test. */
1499 tmp = name2;
1500 if (TREE_TYPE (name) != TREE_TYPE (name2))
1501 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1502 if (cst2 != NULL_TREE)
1503 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1504 add_assert_info (asserts, name2, tmp, comp_code, val);
1508 /* In the case of post-in/decrement tests like if (i++) ... and uses
1509 of the in/decremented value on the edge the extra name we want to
1510 assert for is not on the def chain of the name compared. Instead
1511 it is in the set of use stmts.
1512 Similar cases happen for conversions that were simplified through
1513 fold_{sign_changed,widened}_comparison. */
1514 if ((comp_code == NE_EXPR
1515 || comp_code == EQ_EXPR)
1516 && TREE_CODE (val) == INTEGER_CST)
1518 imm_use_iterator ui;
1519 gimple *use_stmt;
1520 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1522 if (!is_gimple_assign (use_stmt))
1523 continue;
1525 /* Cut off to use-stmts that are dominating the predecessor. */
1526 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1527 continue;
1529 tree name2 = gimple_assign_lhs (use_stmt);
1530 if (TREE_CODE (name2) != SSA_NAME)
1531 continue;
1533 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1534 tree cst;
1535 if (code == PLUS_EXPR
1536 || code == MINUS_EXPR)
1538 cst = gimple_assign_rhs2 (use_stmt);
1539 if (TREE_CODE (cst) != INTEGER_CST)
1540 continue;
1541 cst = int_const_binop (code, val, cst);
1543 else if (CONVERT_EXPR_CODE_P (code))
1545 /* For truncating conversions we cannot record
1546 an inequality. */
1547 if (comp_code == NE_EXPR
1548 && (TYPE_PRECISION (TREE_TYPE (name2))
1549 < TYPE_PRECISION (TREE_TYPE (name))))
1550 continue;
1551 cst = fold_convert (TREE_TYPE (name2), val);
1553 else
1554 continue;
1556 if (TREE_OVERFLOW_P (cst))
1557 cst = drop_tree_overflow (cst);
1558 add_assert_info (asserts, name2, name2, comp_code, cst);
1562 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1563 && TREE_CODE (val) == INTEGER_CST)
1565 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1566 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1567 tree val2 = NULL_TREE;
1568 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1569 wide_int mask = wi::zero (prec);
1570 unsigned int nprec = prec;
1571 enum tree_code rhs_code = ERROR_MARK;
1573 if (is_gimple_assign (def_stmt))
1574 rhs_code = gimple_assign_rhs_code (def_stmt);
1576 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1577 assert that A != CST1 -+ CST2. */
1578 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1579 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1581 tree op0 = gimple_assign_rhs1 (def_stmt);
1582 tree op1 = gimple_assign_rhs2 (def_stmt);
1583 if (TREE_CODE (op0) == SSA_NAME
1584 && TREE_CODE (op1) == INTEGER_CST)
1586 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1587 ? MINUS_EXPR : PLUS_EXPR);
1588 op1 = int_const_binop (reverse_op, val, op1);
1589 if (TREE_OVERFLOW (op1))
1590 op1 = drop_tree_overflow (op1);
1591 add_assert_info (asserts, op0, op0, comp_code, op1);
1595 /* Add asserts for NAME cmp CST and NAME being defined
1596 as NAME = (int) NAME2. */
1597 if (!TYPE_UNSIGNED (TREE_TYPE (val))
1598 && (comp_code == LE_EXPR || comp_code == LT_EXPR
1599 || comp_code == GT_EXPR || comp_code == GE_EXPR)
1600 && gimple_assign_cast_p (def_stmt))
1602 name2 = gimple_assign_rhs1 (def_stmt);
1603 if (CONVERT_EXPR_CODE_P (rhs_code)
1604 && TREE_CODE (name2) == SSA_NAME
1605 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1606 && TYPE_UNSIGNED (TREE_TYPE (name2))
1607 && prec == TYPE_PRECISION (TREE_TYPE (name2))
1608 && (comp_code == LE_EXPR || comp_code == GT_EXPR
1609 || !tree_int_cst_equal (val,
1610 TYPE_MIN_VALUE (TREE_TYPE (val)))))
1612 tree tmp, cst;
1613 enum tree_code new_comp_code = comp_code;
1615 cst = fold_convert (TREE_TYPE (name2),
1616 TYPE_MIN_VALUE (TREE_TYPE (val)));
1617 /* Build an expression for the range test. */
1618 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1619 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1620 fold_convert (TREE_TYPE (name2), val));
1621 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1623 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1624 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1625 build_int_cst (TREE_TYPE (name2), 1));
1627 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1631 /* Add asserts for NAME cmp CST and NAME being defined as
1632 NAME = NAME2 >> CST2.
1634 Extract CST2 from the right shift. */
1635 if (rhs_code == RSHIFT_EXPR)
1637 name2 = gimple_assign_rhs1 (def_stmt);
1638 cst2 = gimple_assign_rhs2 (def_stmt);
1639 if (TREE_CODE (name2) == SSA_NAME
1640 && tree_fits_uhwi_p (cst2)
1641 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1642 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1643 && type_has_mode_precision_p (TREE_TYPE (val)))
1645 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1646 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1649 if (val2 != NULL_TREE
1650 && TREE_CODE (val2) == INTEGER_CST
1651 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1652 TREE_TYPE (val),
1653 val2, cst2), val))
1655 enum tree_code new_comp_code = comp_code;
1656 tree tmp, new_val;
1658 tmp = name2;
1659 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1661 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1663 tree type = build_nonstandard_integer_type (prec, 1);
1664 tmp = build1 (NOP_EXPR, type, name2);
1665 val2 = fold_convert (type, val2);
1667 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1668 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1669 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1671 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1673 wide_int minval
1674 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1675 new_val = val2;
1676 if (minval == wi::to_wide (new_val))
1677 new_val = NULL_TREE;
1679 else
1681 wide_int maxval
1682 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1683 mask |= wi::to_wide (val2);
1684 if (wi::eq_p (mask, maxval))
1685 new_val = NULL_TREE;
1686 else
1687 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1690 if (new_val)
1691 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1694 /* If we have a conversion that doesn't change the value of the source
1695 simply register the same assert for it. */
1696 if (CONVERT_EXPR_CODE_P (rhs_code))
1698 value_range vr;
1699 tree rhs1 = gimple_assign_rhs1 (def_stmt);
1700 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1701 && TREE_CODE (rhs1) == SSA_NAME
1702 /* Make sure the relation preserves the upper/lower boundary of
1703 the range conservatively. */
1704 && (comp_code == NE_EXPR
1705 || comp_code == EQ_EXPR
1706 || (TYPE_SIGN (TREE_TYPE (name))
1707 == TYPE_SIGN (TREE_TYPE (rhs1)))
1708 || ((comp_code == LE_EXPR
1709 || comp_code == LT_EXPR)
1710 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1711 || ((comp_code == GE_EXPR
1712 || comp_code == GT_EXPR)
1713 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1714 /* And the conversion does not alter the value we compare
1715 against and all values in rhs1 can be represented in
1716 the converted to type. */
1717 && int_fits_type_p (val, TREE_TYPE (rhs1))
1718 && ((TYPE_PRECISION (TREE_TYPE (name))
1719 > TYPE_PRECISION (TREE_TYPE (rhs1)))
1720 || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
1721 && vr.kind () == VR_RANGE)
1722 && wi::fits_to_tree_p
1723 (widest_int::from (vr.lower_bound (),
1724 TYPE_SIGN (TREE_TYPE (rhs1))),
1725 TREE_TYPE (name))
1726 && wi::fits_to_tree_p
1727 (widest_int::from (vr.upper_bound (),
1728 TYPE_SIGN (TREE_TYPE (rhs1))),
1729 TREE_TYPE (name)))))
1730 add_assert_info (asserts, rhs1, rhs1,
1731 comp_code, fold_convert (TREE_TYPE (rhs1), val));
1734 /* Add asserts for NAME cmp CST and NAME being defined as
1735 NAME = NAME2 & CST2.
1737 Extract CST2 from the and.
1739 Also handle
1740 NAME = (unsigned) NAME2;
1741 casts where NAME's type is unsigned and has smaller precision
1742 than NAME2's type as if it was NAME = NAME2 & MASK. */
1743 names[0] = NULL_TREE;
1744 names[1] = NULL_TREE;
1745 cst2 = NULL_TREE;
1746 if (rhs_code == BIT_AND_EXPR
1747 || (CONVERT_EXPR_CODE_P (rhs_code)
1748 && INTEGRAL_TYPE_P (TREE_TYPE (val))
1749 && TYPE_UNSIGNED (TREE_TYPE (val))
1750 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1751 > prec))
1753 name2 = gimple_assign_rhs1 (def_stmt);
1754 if (rhs_code == BIT_AND_EXPR)
1755 cst2 = gimple_assign_rhs2 (def_stmt);
1756 else
1758 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1759 nprec = TYPE_PRECISION (TREE_TYPE (name2));
1761 if (TREE_CODE (name2) == SSA_NAME
1762 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1763 && TREE_CODE (cst2) == INTEGER_CST
1764 && !integer_zerop (cst2)
1765 && (nprec > 1
1766 || TYPE_UNSIGNED (TREE_TYPE (val))))
1768 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1769 if (gimple_assign_cast_p (def_stmt2))
1771 names[1] = gimple_assign_rhs1 (def_stmt2);
1772 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1773 || TREE_CODE (names[1]) != SSA_NAME
1774 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1775 || (TYPE_PRECISION (TREE_TYPE (name2))
1776 != TYPE_PRECISION (TREE_TYPE (names[1]))))
1777 names[1] = NULL_TREE;
1779 names[0] = name2;
1782 if (names[0] || names[1])
1784 wide_int minv, maxv, valv, cst2v;
1785 wide_int tem, sgnbit;
1786 bool valid_p = false, valn, cst2n;
1787 enum tree_code ccode = comp_code;
1789 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1790 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1791 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1792 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1793 /* If CST2 doesn't have most significant bit set,
1794 but VAL is negative, we have comparison like
1795 if ((x & 0x123) > -4) (always true). Just give up. */
1796 if (!cst2n && valn)
1797 ccode = ERROR_MARK;
1798 if (cst2n)
1799 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1800 else
1801 sgnbit = wi::zero (nprec);
1802 minv = valv & cst2v;
1803 switch (ccode)
1805 case EQ_EXPR:
1806 /* Minimum unsigned value for equality is VAL & CST2
1807 (should be equal to VAL, otherwise we probably should
1808 have folded the comparison into false) and
1809 maximum unsigned value is VAL | ~CST2. */
1810 maxv = valv | ~cst2v;
1811 valid_p = true;
1812 break;
1814 case NE_EXPR:
1815 tem = valv | ~cst2v;
1816 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
1817 if (valv == 0)
1819 cst2n = false;
1820 sgnbit = wi::zero (nprec);
1821 goto gt_expr;
1823 /* If (VAL | ~CST2) is all ones, handle it as
1824 (X & CST2) < VAL. */
1825 if (tem == -1)
1827 cst2n = false;
1828 valn = false;
1829 sgnbit = wi::zero (nprec);
1830 goto lt_expr;
1832 if (!cst2n && wi::neg_p (cst2v))
1833 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1834 if (sgnbit != 0)
1836 if (valv == sgnbit)
1838 cst2n = true;
1839 valn = true;
1840 goto gt_expr;
1842 if (tem == wi::mask (nprec - 1, false, nprec))
1844 cst2n = true;
1845 goto lt_expr;
1847 if (!cst2n)
1848 sgnbit = wi::zero (nprec);
1850 break;
1852 case GE_EXPR:
1853 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1854 is VAL and maximum unsigned value is ~0. For signed
1855 comparison, if CST2 doesn't have most significant bit
1856 set, handle it similarly. If CST2 has MSB set,
1857 the minimum is the same, and maximum is ~0U/2. */
1858 if (minv != valv)
1860 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1861 VAL. */
1862 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1863 if (minv == valv)
1864 break;
1866 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1867 valid_p = true;
1868 break;
1870 case GT_EXPR:
1871 gt_expr:
1872 /* Find out smallest MINV where MINV > VAL
1873 && (MINV & CST2) == MINV, if any. If VAL is signed and
1874 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
1875 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1876 if (minv == valv)
1877 break;
1878 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1879 valid_p = true;
1880 break;
1882 case LE_EXPR:
1883 /* Minimum unsigned value for <= is 0 and maximum
1884 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1885 Otherwise, find smallest VAL2 where VAL2 > VAL
1886 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1887 as maximum.
1888 For signed comparison, if CST2 doesn't have most
1889 significant bit set, handle it similarly. If CST2 has
1890 MSB set, the maximum is the same and minimum is INT_MIN. */
1891 if (minv == valv)
1892 maxv = valv;
1893 else
1895 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1896 if (maxv == valv)
1897 break;
1898 maxv -= 1;
1900 maxv |= ~cst2v;
1901 minv = sgnbit;
1902 valid_p = true;
1903 break;
1905 case LT_EXPR:
1906 lt_expr:
1907 /* Minimum unsigned value for < is 0 and maximum
1908 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1909 Otherwise, find smallest VAL2 where VAL2 > VAL
1910 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1911 as maximum.
1912 For signed comparison, if CST2 doesn't have most
1913 significant bit set, handle it similarly. If CST2 has
1914 MSB set, the maximum is the same and minimum is INT_MIN. */
1915 if (minv == valv)
1917 if (valv == sgnbit)
1918 break;
1919 maxv = valv;
1921 else
1923 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1924 if (maxv == valv)
1925 break;
1927 maxv -= 1;
1928 maxv |= ~cst2v;
1929 minv = sgnbit;
1930 valid_p = true;
1931 break;
1933 default:
1934 break;
1936 if (valid_p
1937 && (maxv - minv) != -1)
1939 tree tmp, new_val, type;
1940 int i;
1942 for (i = 0; i < 2; i++)
1943 if (names[i])
1945 wide_int maxv2 = maxv;
1946 tmp = names[i];
1947 type = TREE_TYPE (names[i]);
1948 if (!TYPE_UNSIGNED (type))
1950 type = build_nonstandard_integer_type (nprec, 1);
1951 tmp = build1 (NOP_EXPR, type, names[i]);
1953 if (minv != 0)
1955 tmp = build2 (PLUS_EXPR, type, tmp,
1956 wide_int_to_tree (type, -minv));
1957 maxv2 = maxv - minv;
1959 new_val = wide_int_to_tree (type, maxv2);
1960 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1967 /* OP is an operand of a truth value expression which is known to have
1968 a particular value. Register any asserts for OP and for any
1969 operands in OP's defining statement.
1971 If CODE is EQ_EXPR, then we want to register OP is zero (false),
1972 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
1974 static void
1975 register_edge_assert_for_1 (tree op, enum tree_code code,
1976 edge e, vec<assert_info> &asserts)
1978 gimple *op_def;
1979 tree val;
1980 enum tree_code rhs_code;
1982 /* We only care about SSA_NAMEs. */
1983 if (TREE_CODE (op) != SSA_NAME)
1984 return;
1986 /* We know that OP will have a zero or nonzero value. */
1987 val = build_int_cst (TREE_TYPE (op), 0);
1988 add_assert_info (asserts, op, op, code, val);
1990 /* Now look at how OP is set. If it's set from a comparison,
1991 a truth operation or some bit operations, then we may be able
1992 to register information about the operands of that assignment. */
1993 op_def = SSA_NAME_DEF_STMT (op);
1994 if (gimple_code (op_def) != GIMPLE_ASSIGN)
1995 return;
1997 rhs_code = gimple_assign_rhs_code (op_def);
1999 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2001 bool invert = (code == EQ_EXPR ? true : false);
2002 tree op0 = gimple_assign_rhs1 (op_def);
2003 tree op1 = gimple_assign_rhs2 (op_def);
2005 if (TREE_CODE (op0) == SSA_NAME)
2006 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2007 if (TREE_CODE (op1) == SSA_NAME)
2008 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2010 else if ((code == NE_EXPR
2011 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2012 || (code == EQ_EXPR
2013 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2015 /* Recurse on each operand. */
2016 tree op0 = gimple_assign_rhs1 (op_def);
2017 tree op1 = gimple_assign_rhs2 (op_def);
2018 if (TREE_CODE (op0) == SSA_NAME
2019 && has_single_use (op0))
2020 register_edge_assert_for_1 (op0, code, e, asserts);
2021 if (TREE_CODE (op1) == SSA_NAME
2022 && has_single_use (op1))
2023 register_edge_assert_for_1 (op1, code, e, asserts);
2025 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2026 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2028 /* Recurse, flipping CODE. */
2029 code = invert_tree_comparison (code, false);
2030 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2032 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2034 /* Recurse through the copy. */
2035 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2037 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2039 /* Recurse through the type conversion, unless it is a narrowing
2040 conversion or conversion from non-integral type. */
2041 tree rhs = gimple_assign_rhs1 (op_def);
2042 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2043 && (TYPE_PRECISION (TREE_TYPE (rhs))
2044 <= TYPE_PRECISION (TREE_TYPE (op))))
2045 register_edge_assert_for_1 (rhs, code, e, asserts);
2049 /* Check if comparison
2050 NAME COND_OP INTEGER_CST
2051 has a form of
2052 (X & 11...100..0) COND_OP XX...X00...0
2053 Such comparison can yield assertions like
2054 X >= XX...X00...0
2055 X <= XX...X11...1
2056 in case of COND_OP being EQ_EXPR or
2057 X < XX...X00...0
2058 X > XX...X11...1
2059 in case of NE_EXPR. */
2061 static bool
2062 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2063 tree *new_name, tree *low, enum tree_code *low_code,
2064 tree *high, enum tree_code *high_code)
2066 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2068 if (!is_gimple_assign (def_stmt)
2069 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2070 return false;
2072 tree t = gimple_assign_rhs1 (def_stmt);
2073 tree maskt = gimple_assign_rhs2 (def_stmt);
2074 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2075 return false;
2077 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2078 wide_int inv_mask = ~mask;
2079 /* Must have been removed by now so don't bother optimizing. */
2080 if (mask == 0 || inv_mask == 0)
2081 return false;
2083 /* Assume VALT is INTEGER_CST. */
2084 wi::tree_to_wide_ref val = wi::to_wide (valt);
2086 if ((inv_mask & (inv_mask + 1)) != 0
2087 || (val & mask) != val)
2088 return false;
2090 bool is_range = cond_code == EQ_EXPR;
2092 tree type = TREE_TYPE (t);
2093 wide_int min = wi::min_value (type),
2094 max = wi::max_value (type);
2096 if (is_range)
2098 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2099 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2101 else
2103 /* We can still generate assertion if one of alternatives
2104 is known to always be false. */
2105 if (val == min)
2107 *low_code = (enum tree_code) 0;
2108 *high_code = GT_EXPR;
2110 else if ((val | inv_mask) == max)
2112 *low_code = LT_EXPR;
2113 *high_code = (enum tree_code) 0;
2115 else
2116 return false;
2119 *new_name = t;
2120 *low = wide_int_to_tree (type, val);
2121 *high = wide_int_to_tree (type, val | inv_mask);
2123 return true;
2126 /* Try to register an edge assertion for SSA name NAME on edge E for
2127 the condition COND contributing to the conditional jump pointed to by
2128 SI. */
2130 void
2131 register_edge_assert_for (tree name, edge e,
2132 enum tree_code cond_code, tree cond_op0,
2133 tree cond_op1, vec<assert_info> &asserts)
2135 tree val;
2136 enum tree_code comp_code;
2137 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2139 /* Do not attempt to infer anything in names that flow through
2140 abnormal edges. */
2141 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2142 return;
2144 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2145 cond_op0, cond_op1,
2146 is_else_edge,
2147 &comp_code, &val))
2148 return;
2150 /* Register ASSERT_EXPRs for name. */
2151 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2152 cond_op1, is_else_edge, asserts);
2155 /* If COND is effectively an equality test of an SSA_NAME against
2156 the value zero or one, then we may be able to assert values
2157 for SSA_NAMEs which flow into COND. */
2159 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2160 statement of NAME we can assert both operands of the BIT_AND_EXPR
2161 have nonzero value. */
2162 if ((comp_code == EQ_EXPR && integer_onep (val))
2163 || (comp_code == NE_EXPR && integer_zerop (val)))
2165 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2167 if (is_gimple_assign (def_stmt)
2168 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2170 tree op0 = gimple_assign_rhs1 (def_stmt);
2171 tree op1 = gimple_assign_rhs2 (def_stmt);
2172 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2173 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2175 else if (is_gimple_assign (def_stmt)
2176 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2177 == tcc_comparison))
2178 register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2181 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2182 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2183 have zero value. */
2184 if ((comp_code == EQ_EXPR && integer_zerop (val))
2185 || (comp_code == NE_EXPR
2186 && integer_onep (val)
2187 && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2189 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2191 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2192 necessarily zero value, or if type-precision is one. */
2193 if (is_gimple_assign (def_stmt)
2194 && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2196 tree op0 = gimple_assign_rhs1 (def_stmt);
2197 tree op1 = gimple_assign_rhs2 (def_stmt);
2198 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2199 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2201 else if (is_gimple_assign (def_stmt)
2202 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2203 == tcc_comparison))
2204 register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2207 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2208 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2209 && TREE_CODE (val) == INTEGER_CST)
2211 enum tree_code low_code, high_code;
2212 tree low, high;
2213 if (is_masked_range_test (name, val, comp_code, &name, &low,
2214 &low_code, &high, &high_code))
2216 if (low_code != ERROR_MARK)
2217 register_edge_assert_for_2 (name, e, low_code, name,
2218 low, /*invert*/false, asserts);
2219 if (high_code != ERROR_MARK)
2220 register_edge_assert_for_2 (name, e, high_code, name,
2221 high, /*invert*/false, asserts);
2226 /* Handle
2227 _4 = x_3 & 31;
2228 if (_4 != 0)
2229 goto <bb 6>;
2230 else
2231 goto <bb 7>;
2232 <bb 6>:
2233 __builtin_unreachable ();
2234 <bb 7>:
2235 x_5 = ASSERT_EXPR <x_3, ...>;
2236 If x_3 has no other immediate uses (checked by caller),
2237 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2238 from the non-zero bitmask. */
2240 void
2241 maybe_set_nonzero_bits (edge e, tree var)
2243 basic_block cond_bb = e->src;
2244 gimple *stmt = last_stmt (cond_bb);
2245 tree cst;
2247 if (stmt == NULL
2248 || gimple_code (stmt) != GIMPLE_COND
2249 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2250 ? EQ_EXPR : NE_EXPR)
2251 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2252 || !integer_zerop (gimple_cond_rhs (stmt)))
2253 return;
2255 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2256 if (!is_gimple_assign (stmt)
2257 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2258 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2259 return;
2260 if (gimple_assign_rhs1 (stmt) != var)
2262 gimple *stmt2;
2264 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2265 return;
2266 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2267 if (!gimple_assign_cast_p (stmt2)
2268 || gimple_assign_rhs1 (stmt2) != var
2269 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2270 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2271 != TYPE_PRECISION (TREE_TYPE (var))))
2272 return;
2274 cst = gimple_assign_rhs2 (stmt);
2275 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2276 wi::to_wide (cst)));
2279 /* Return true if STMT is interesting for VRP. */
2281 bool
2282 stmt_interesting_for_vrp (gimple *stmt)
2284 if (gimple_code (stmt) == GIMPLE_PHI)
2286 tree res = gimple_phi_result (stmt);
2287 return (!virtual_operand_p (res)
2288 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2289 || POINTER_TYPE_P (TREE_TYPE (res))));
2291 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2293 tree lhs = gimple_get_lhs (stmt);
2295 /* In general, assignments with virtual operands are not useful
2296 for deriving ranges, with the obvious exception of calls to
2297 builtin functions. */
2298 if (lhs && TREE_CODE (lhs) == SSA_NAME
2299 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2300 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2301 && (is_gimple_call (stmt)
2302 || !gimple_vuse (stmt)))
2303 return true;
2304 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2305 switch (gimple_call_internal_fn (stmt))
2307 case IFN_ADD_OVERFLOW:
2308 case IFN_SUB_OVERFLOW:
2309 case IFN_MUL_OVERFLOW:
2310 case IFN_ATOMIC_COMPARE_EXCHANGE:
2311 /* These internal calls return _Complex integer type,
2312 but are interesting to VRP nevertheless. */
2313 if (lhs && TREE_CODE (lhs) == SSA_NAME)
2314 return true;
2315 break;
2316 default:
2317 break;
2320 else if (gimple_code (stmt) == GIMPLE_COND
2321 || gimple_code (stmt) == GIMPLE_SWITCH)
2322 return true;
2324 return false;
2327 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2328 that includes the value VAL. The search is restricted to the range
2329 [START_IDX, n - 1] where n is the size of VEC.
2331 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2332 returned.
2334 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2335 it is placed in IDX and false is returned.
2337 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2338 returned. */
2340 bool
2341 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2343 size_t n = gimple_switch_num_labels (stmt);
2344 size_t low, high;
2346 /* Find case label for minimum of the value range or the next one.
2347 At each iteration we are searching in [low, high - 1]. */
2349 for (low = start_idx, high = n; high != low; )
2351 tree t;
2352 int cmp;
2353 /* Note that i != high, so we never ask for n. */
2354 size_t i = (high + low) / 2;
2355 t = gimple_switch_label (stmt, i);
2357 /* Cache the result of comparing CASE_LOW and val. */
2358 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2360 if (cmp == 0)
2362 /* Ranges cannot be empty. */
2363 *idx = i;
2364 return true;
2366 else if (cmp > 0)
2367 high = i;
2368 else
2370 low = i + 1;
2371 if (CASE_HIGH (t) != NULL
2372 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2374 *idx = i;
2375 return true;
2380 *idx = high;
2381 return false;
2384 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2385 for values between MIN and MAX. The first index is placed in MIN_IDX. The
2386 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2387 then MAX_IDX < MIN_IDX.
2388 Returns true if the default label is not needed. */
2390 bool
2391 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2392 size_t *max_idx)
2394 size_t i, j;
2395 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2396 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2398 if (i == j
2399 && min_take_default
2400 && max_take_default)
2402 /* Only the default case label reached.
2403 Return an empty range. */
2404 *min_idx = 1;
2405 *max_idx = 0;
2406 return false;
2408 else
2410 bool take_default = min_take_default || max_take_default;
2411 tree low, high;
2412 size_t k;
2414 if (max_take_default)
2415 j--;
2417 /* If the case label range is continuous, we do not need
2418 the default case label. Verify that. */
2419 high = CASE_LOW (gimple_switch_label (stmt, i));
2420 if (CASE_HIGH (gimple_switch_label (stmt, i)))
2421 high = CASE_HIGH (gimple_switch_label (stmt, i));
2422 for (k = i + 1; k <= j; ++k)
2424 low = CASE_LOW (gimple_switch_label (stmt, k));
2425 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2427 take_default = true;
2428 break;
2430 high = low;
2431 if (CASE_HIGH (gimple_switch_label (stmt, k)))
2432 high = CASE_HIGH (gimple_switch_label (stmt, k));
2435 *min_idx = i;
2436 *max_idx = j;
2437 return !take_default;
2441 /* Given a SWITCH_STMT, return the case label that encompasses the
2442 known possible values for the switch operand. RANGE_OF_OP is a
2443 range for the known values of the switch operand. */
2445 tree
2446 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2448 if (range_of_op->undefined_p ()
2449 || range_of_op->varying_p ()
2450 || range_of_op->symbolic_p ())
2451 return NULL_TREE;
2453 size_t i, j;
2454 tree op = gimple_switch_index (switch_stmt);
2455 tree type = TREE_TYPE (op);
2456 tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2457 tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2458 find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2459 if (i == j)
2461 /* Look for exactly one label that encompasses the range of
2462 the operand. */
2463 tree label = gimple_switch_label (switch_stmt, i);
2464 tree case_high
2465 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2466 int_range_max label_range (CASE_LOW (label), case_high);
2467 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2468 range_cast (label_range, range_of_op->type ());
2469 label_range.intersect (*range_of_op);
2470 if (label_range == *range_of_op)
2471 return label;
2473 else if (i > j)
2475 /* If there are no labels at all, take the default. */
2476 return gimple_switch_label (switch_stmt, 0);
2478 else
2480 /* Otherwise, there are various labels that can encompass
2481 the range of operand. In which case, see if the range of
2482 the operand is entirely *outside* the bounds of all the
2483 (non-default) case labels. If so, take the default. */
2484 unsigned n = gimple_switch_num_labels (switch_stmt);
2485 tree min_label = gimple_switch_label (switch_stmt, 1);
2486 tree max_label = gimple_switch_label (switch_stmt, n - 1);
2487 tree case_high = CASE_HIGH (max_label);
2488 if (!case_high)
2489 case_high = CASE_LOW (max_label);
2490 int_range_max label_range (CASE_LOW (min_label), case_high);
2491 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2492 range_cast (label_range, range_of_op->type ());
2493 label_range.intersect (*range_of_op);
2494 if (label_range.undefined_p ())
2495 return gimple_switch_label (switch_stmt, 0);
2497 return NULL_TREE;
2500 struct case_info
2502 tree expr;
2503 basic_block bb;
2506 /* Location information for ASSERT_EXPRs. Each instance of this
2507 structure describes an ASSERT_EXPR for an SSA name. Since a single
2508 SSA name may have more than one assertion associated with it, these
2509 locations are kept in a linked list attached to the corresponding
2510 SSA name. */
2511 struct assert_locus
2513 /* Basic block where the assertion would be inserted. */
2514 basic_block bb;
2516 /* Some assertions need to be inserted on an edge (e.g., assertions
2517 generated by COND_EXPRs). In those cases, BB will be NULL. */
2518 edge e;
2520 /* Pointer to the statement that generated this assertion. */
2521 gimple_stmt_iterator si;
2523 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
2524 enum tree_code comp_code;
2526 /* Value being compared against. */
2527 tree val;
2529 /* Expression to compare. */
2530 tree expr;
2532 /* Next node in the linked list. */
2533 assert_locus *next;
2536 /* Class to traverse the flowgraph looking for conditional jumps to
2537 insert ASSERT_EXPR range expressions. These range expressions are
2538 meant to provide information to optimizations that need to reason
2539 in terms of value ranges. They will not be expanded into RTL. */
2541 class vrp_asserts
2543 public:
2544 vrp_asserts (struct function *fn) : fun (fn) { }
2546 void insert_range_assertions ();
2548 /* Convert range assertion expressions into the implied copies and
2549 copy propagate away the copies. */
2550 void remove_range_assertions ();
2552 /* Dump all the registered assertions for all the names to FILE. */
2553 void dump (FILE *);
2555 /* Dump all the registered assertions for NAME to FILE. */
2556 void dump (FILE *file, tree name);
2558 /* Dump all the registered assertions for NAME to stderr. */
2559 void debug (tree name)
2561 dump (stderr, name);
2564 /* Dump all the registered assertions for all the names to stderr. */
2565 void debug ()
2567 dump (stderr);
2570 private:
2571 /* Set of SSA names found live during the RPO traversal of the function
2572 for still active basic-blocks. */
2573 live_names live;
2575 /* Function to work on. */
2576 struct function *fun;
2578 /* If bit I is present, it means that SSA name N_i has a list of
2579 assertions that should be inserted in the IL. */
2580 bitmap need_assert_for;
2582 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
2583 holds a list of ASSERT_LOCUS_T nodes that describe where
2584 ASSERT_EXPRs for SSA name N_I should be inserted. */
2585 assert_locus **asserts_for;
2587 /* Finish found ASSERTS for E and register them at GSI. */
2588 void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2589 vec<assert_info> &asserts);
2591 /* Determine whether the outgoing edges of BB should receive an
2592 ASSERT_EXPR for each of the operands of BB's LAST statement. The
2593 last statement of BB must be a SWITCH_EXPR.
2595 If any of the sub-graphs rooted at BB have an interesting use of
2596 the predicate operands, an assert location node is added to the
2597 list of assertions for the corresponding operands. */
2598 void find_switch_asserts (basic_block bb, gswitch *last);
2600 /* Do an RPO walk over the function computing SSA name liveness
2601 on-the-fly and deciding on assert expressions to insert. */
2602 void find_assert_locations ();
2604 /* Traverse all the statements in block BB looking for statements that
2605 may generate useful assertions for the SSA names in their operand.
2606 See method implementation comentary for more information. */
2607 void find_assert_locations_in_bb (basic_block bb);
2609 /* Determine whether the outgoing edges of BB should receive an
2610 ASSERT_EXPR for each of the operands of BB's LAST statement.
2611 The last statement of BB must be a COND_EXPR.
2613 If any of the sub-graphs rooted at BB have an interesting use of
2614 the predicate operands, an assert location node is added to the
2615 list of assertions for the corresponding operands. */
2616 void find_conditional_asserts (basic_block bb, gcond *last);
2618 /* Process all the insertions registered for every name N_i registered
2619 in NEED_ASSERT_FOR. The list of assertions to be inserted are
2620 found in ASSERTS_FOR[i]. */
2621 void process_assert_insertions ();
2623 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2624 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2625 E->DEST, then register this location as a possible insertion point
2626 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2628 BB, E and SI provide the exact insertion point for the new
2629 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2630 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2631 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2632 must not be NULL. */
2633 void register_new_assert_for (tree name, tree expr,
2634 enum tree_code comp_code,
2635 tree val, basic_block bb,
2636 edge e, gimple_stmt_iterator si);
2638 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2639 create a new SSA name N and return the assertion assignment
2640 'N = ASSERT_EXPR <V, V OP W>'. */
2641 gimple *build_assert_expr_for (tree cond, tree v);
2643 /* Create an ASSERT_EXPR for NAME and insert it in the location
2644 indicated by LOC. Return true if we made any edge insertions. */
2645 bool process_assert_insertions_for (tree name, assert_locus *loc);
2647 /* Qsort callback for sorting assert locations. */
2648 template <bool stable> static int compare_assert_loc (const void *,
2649 const void *);
2651 /* Return false if EXPR is a predicate expression involving floating
2652 point values. */
2653 bool fp_predicate (gimple *stmt)
2655 GIMPLE_CHECK (stmt, GIMPLE_COND);
2656 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2659 bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2660 basic_block cond_bb);
2662 static int compare_case_labels (const void *, const void *);
2665 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2666 create a new SSA name N and return the assertion assignment
2667 'N = ASSERT_EXPR <V, V OP W>'. */
2669 gimple *
2670 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2672 tree a;
2673 gassign *assertion;
2675 gcc_assert (TREE_CODE (v) == SSA_NAME
2676 && COMPARISON_CLASS_P (cond));
2678 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2679 assertion = gimple_build_assign (NULL_TREE, a);
2681 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2682 operand of the ASSERT_EXPR. Create it so the new name and the old one
2683 are registered in the replacement table so that we can fix the SSA web
2684 after adding all the ASSERT_EXPRs. */
2685 tree new_def = create_new_def_for (v, assertion, NULL);
2686 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2687 given we have to be able to fully propagate those out to re-create
2688 valid SSA when removing the asserts. */
2689 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2690 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2692 return assertion;
2695 /* Dump all the registered assertions for NAME to FILE. */
2697 void
2698 vrp_asserts::dump (FILE *file, tree name)
2700 assert_locus *loc;
2702 fprintf (file, "Assertions to be inserted for ");
2703 print_generic_expr (file, name);
2704 fprintf (file, "\n");
2706 loc = asserts_for[SSA_NAME_VERSION (name)];
2707 while (loc)
2709 fprintf (file, "\t");
2710 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2711 fprintf (file, "\n\tBB #%d", loc->bb->index);
2712 if (loc->e)
2714 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2715 loc->e->dest->index);
2716 dump_edge_info (file, loc->e, dump_flags, 0);
2718 fprintf (file, "\n\tPREDICATE: ");
2719 print_generic_expr (file, loc->expr);
2720 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2721 print_generic_expr (file, loc->val);
2722 fprintf (file, "\n\n");
2723 loc = loc->next;
2726 fprintf (file, "\n");
2729 /* Dump all the registered assertions for all the names to FILE. */
2731 void
2732 vrp_asserts::dump (FILE *file)
2734 unsigned i;
2735 bitmap_iterator bi;
2737 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2738 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2739 dump (file, ssa_name (i));
2740 fprintf (file, "\n");
2743 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2744 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2745 E->DEST, then register this location as a possible insertion point
2746 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2748 BB, E and SI provide the exact insertion point for the new
2749 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2750 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2751 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2752 must not be NULL. */
2754 void
2755 vrp_asserts::register_new_assert_for (tree name, tree expr,
2756 enum tree_code comp_code,
2757 tree val,
2758 basic_block bb,
2759 edge e,
2760 gimple_stmt_iterator si)
2762 assert_locus *n, *loc, *last_loc;
2763 basic_block dest_bb;
2765 gcc_checking_assert (bb == NULL || e == NULL);
2767 if (e == NULL)
2768 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2769 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2771 /* Never build an assert comparing against an integer constant with
2772 TREE_OVERFLOW set. This confuses our undefined overflow warning
2773 machinery. */
2774 if (TREE_OVERFLOW_P (val))
2775 val = drop_tree_overflow (val);
2777 /* The new assertion A will be inserted at BB or E. We need to
2778 determine if the new location is dominated by a previously
2779 registered location for A. If we are doing an edge insertion,
2780 assume that A will be inserted at E->DEST. Note that this is not
2781 necessarily true.
2783 If E is a critical edge, it will be split. But even if E is
2784 split, the new block will dominate the same set of blocks that
2785 E->DEST dominates.
2787 The reverse, however, is not true, blocks dominated by E->DEST
2788 will not be dominated by the new block created to split E. So,
2789 if the insertion location is on a critical edge, we will not use
2790 the new location to move another assertion previously registered
2791 at a block dominated by E->DEST. */
2792 dest_bb = (bb) ? bb : e->dest;
2794 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2795 VAL at a block dominating DEST_BB, then we don't need to insert a new
2796 one. Similarly, if the same assertion already exists at a block
2797 dominated by DEST_BB and the new location is not on a critical
2798 edge, then update the existing location for the assertion (i.e.,
2799 move the assertion up in the dominance tree).
2801 Note, this is implemented as a simple linked list because there
2802 should not be more than a handful of assertions registered per
2803 name. If this becomes a performance problem, a table hashed by
2804 COMP_CODE and VAL could be implemented. */
2805 loc = asserts_for[SSA_NAME_VERSION (name)];
2806 last_loc = loc;
2807 while (loc)
2809 if (loc->comp_code == comp_code
2810 && (loc->val == val
2811 || operand_equal_p (loc->val, val, 0))
2812 && (loc->expr == expr
2813 || operand_equal_p (loc->expr, expr, 0)))
2815 /* If E is not a critical edge and DEST_BB
2816 dominates the existing location for the assertion, move
2817 the assertion up in the dominance tree by updating its
2818 location information. */
2819 if ((e == NULL || !EDGE_CRITICAL_P (e))
2820 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2822 loc->bb = dest_bb;
2823 loc->e = e;
2824 loc->si = si;
2825 return;
2829 /* Update the last node of the list and move to the next one. */
2830 last_loc = loc;
2831 loc = loc->next;
2834 /* If we didn't find an assertion already registered for
2835 NAME COMP_CODE VAL, add a new one at the end of the list of
2836 assertions associated with NAME. */
2837 n = XNEW (struct assert_locus);
2838 n->bb = dest_bb;
2839 n->e = e;
2840 n->si = si;
2841 n->comp_code = comp_code;
2842 n->val = val;
2843 n->expr = expr;
2844 n->next = NULL;
2846 if (last_loc)
2847 last_loc->next = n;
2848 else
2849 asserts_for[SSA_NAME_VERSION (name)] = n;
2851 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2854 /* Finish found ASSERTS for E and register them at GSI. */
2856 void
2857 vrp_asserts::finish_register_edge_assert_for (edge e,
2858 gimple_stmt_iterator gsi,
2859 vec<assert_info> &asserts)
2861 for (unsigned i = 0; i < asserts.length (); ++i)
2862 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2863 reachable from E. */
2864 if (live.live_on_edge_p (asserts[i].name, e))
2865 register_new_assert_for (asserts[i].name, asserts[i].expr,
2866 asserts[i].comp_code, asserts[i].val,
2867 NULL, e, gsi);
2870 /* Determine whether the outgoing edges of BB should receive an
2871 ASSERT_EXPR for each of the operands of BB's LAST statement.
2872 The last statement of BB must be a COND_EXPR.
2874 If any of the sub-graphs rooted at BB have an interesting use of
2875 the predicate operands, an assert location node is added to the
2876 list of assertions for the corresponding operands. */
2878 void
2879 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2881 gimple_stmt_iterator bsi;
2882 tree op;
2883 edge_iterator ei;
2884 edge e;
2885 ssa_op_iter iter;
2887 bsi = gsi_for_stmt (last);
2889 /* Look for uses of the operands in each of the sub-graphs
2890 rooted at BB. We need to check each of the outgoing edges
2891 separately, so that we know what kind of ASSERT_EXPR to
2892 insert. */
2893 FOR_EACH_EDGE (e, ei, bb->succs)
2895 if (e->dest == bb)
2896 continue;
2898 /* Register the necessary assertions for each operand in the
2899 conditional predicate. */
2900 auto_vec<assert_info, 8> asserts;
2901 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2902 register_edge_assert_for (op, e,
2903 gimple_cond_code (last),
2904 gimple_cond_lhs (last),
2905 gimple_cond_rhs (last), asserts);
2906 finish_register_edge_assert_for (e, bsi, asserts);
2910 /* Compare two case labels sorting first by the destination bb index
2911 and then by the case value. */
2914 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2916 const struct case_info *ci1 = (const struct case_info *) p1;
2917 const struct case_info *ci2 = (const struct case_info *) p2;
2918 int idx1 = ci1->bb->index;
2919 int idx2 = ci2->bb->index;
2921 if (idx1 < idx2)
2922 return -1;
2923 else if (idx1 == idx2)
2925 /* Make sure the default label is first in a group. */
2926 if (!CASE_LOW (ci1->expr))
2927 return -1;
2928 else if (!CASE_LOW (ci2->expr))
2929 return 1;
2930 else
2931 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2932 CASE_LOW (ci2->expr));
2934 else
2935 return 1;
2938 /* Determine whether the outgoing edges of BB should receive an
2939 ASSERT_EXPR for each of the operands of BB's LAST statement.
2940 The last statement of BB must be a SWITCH_EXPR.
2942 If any of the sub-graphs rooted at BB have an interesting use of
2943 the predicate operands, an assert location node is added to the
2944 list of assertions for the corresponding operands. */
2946 void
2947 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
2949 gimple_stmt_iterator bsi;
2950 tree op;
2951 edge e;
2952 struct case_info *ci;
2953 size_t n = gimple_switch_num_labels (last);
2954 #if GCC_VERSION >= 4000
2955 unsigned int idx;
2956 #else
2957 /* Work around GCC 3.4 bug (PR 37086). */
2958 volatile unsigned int idx;
2959 #endif
2961 bsi = gsi_for_stmt (last);
2962 op = gimple_switch_index (last);
2963 if (TREE_CODE (op) != SSA_NAME)
2964 return;
2966 /* Build a vector of case labels sorted by destination label. */
2967 ci = XNEWVEC (struct case_info, n);
2968 for (idx = 0; idx < n; ++idx)
2970 ci[idx].expr = gimple_switch_label (last, idx);
2971 ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
2973 edge default_edge = find_edge (bb, ci[0].bb);
2974 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2976 for (idx = 0; idx < n; ++idx)
2978 tree min, max;
2979 tree cl = ci[idx].expr;
2980 basic_block cbb = ci[idx].bb;
2982 min = CASE_LOW (cl);
2983 max = CASE_HIGH (cl);
2985 /* If there are multiple case labels with the same destination
2986 we need to combine them to a single value range for the edge. */
2987 if (idx + 1 < n && cbb == ci[idx + 1].bb)
2989 /* Skip labels until the last of the group. */
2990 do {
2991 ++idx;
2992 } while (idx < n && cbb == ci[idx].bb);
2993 --idx;
2995 /* Pick up the maximum of the case label range. */
2996 if (CASE_HIGH (ci[idx].expr))
2997 max = CASE_HIGH (ci[idx].expr);
2998 else
2999 max = CASE_LOW (ci[idx].expr);
3002 /* Can't extract a useful assertion out of a range that includes the
3003 default label. */
3004 if (min == NULL_TREE)
3005 continue;
3007 /* Find the edge to register the assert expr on. */
3008 e = find_edge (bb, cbb);
3010 /* Register the necessary assertions for the operand in the
3011 SWITCH_EXPR. */
3012 auto_vec<assert_info, 8> asserts;
3013 register_edge_assert_for (op, e,
3014 max ? GE_EXPR : EQ_EXPR,
3015 op, fold_convert (TREE_TYPE (op), min),
3016 asserts);
3017 if (max)
3018 register_edge_assert_for (op, e, LE_EXPR, op,
3019 fold_convert (TREE_TYPE (op), max),
3020 asserts);
3021 finish_register_edge_assert_for (e, bsi, asserts);
3024 XDELETEVEC (ci);
3026 if (!live.live_on_edge_p (op, default_edge))
3027 return;
3029 /* Now register along the default label assertions that correspond to the
3030 anti-range of each label. */
3031 int insertion_limit = param_max_vrp_switch_assertions;
3032 if (insertion_limit == 0)
3033 return;
3035 /* We can't do this if the default case shares a label with another case. */
3036 tree default_cl = gimple_switch_default_label (last);
3037 for (idx = 1; idx < n; idx++)
3039 tree min, max;
3040 tree cl = gimple_switch_label (last, idx);
3041 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3042 continue;
3044 min = CASE_LOW (cl);
3045 max = CASE_HIGH (cl);
3047 /* Combine contiguous case ranges to reduce the number of assertions
3048 to insert. */
3049 for (idx = idx + 1; idx < n; idx++)
3051 tree next_min, next_max;
3052 tree next_cl = gimple_switch_label (last, idx);
3053 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3054 break;
3056 next_min = CASE_LOW (next_cl);
3057 next_max = CASE_HIGH (next_cl);
3059 wide_int difference = (wi::to_wide (next_min)
3060 - wi::to_wide (max ? max : min));
3061 if (wi::eq_p (difference, 1))
3062 max = next_max ? next_max : next_min;
3063 else
3064 break;
3066 idx--;
3068 if (max == NULL_TREE)
3070 /* Register the assertion OP != MIN. */
3071 auto_vec<assert_info, 8> asserts;
3072 min = fold_convert (TREE_TYPE (op), min);
3073 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3074 asserts);
3075 finish_register_edge_assert_for (default_edge, bsi, asserts);
3077 else
3079 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3080 which will give OP the anti-range ~[MIN,MAX]. */
3081 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3082 min = fold_convert (TREE_TYPE (uop), min);
3083 max = fold_convert (TREE_TYPE (uop), max);
3085 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3086 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3087 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3088 NULL, default_edge, bsi);
3091 if (--insertion_limit == 0)
3092 break;
3096 /* Traverse all the statements in block BB looking for statements that
3097 may generate useful assertions for the SSA names in their operand.
3098 If a statement produces a useful assertion A for name N_i, then the
3099 list of assertions already generated for N_i is scanned to
3100 determine if A is actually needed.
3102 If N_i already had the assertion A at a location dominating the
3103 current location, then nothing needs to be done. Otherwise, the
3104 new location for A is recorded instead.
3106 1- For every statement S in BB, all the variables used by S are
3107 added to bitmap FOUND_IN_SUBGRAPH.
3109 2- If statement S uses an operand N in a way that exposes a known
3110 value range for N, then if N was not already generated by an
3111 ASSERT_EXPR, create a new assert location for N. For instance,
3112 if N is a pointer and the statement dereferences it, we can
3113 assume that N is not NULL.
3115 3- COND_EXPRs are a special case of #2. We can derive range
3116 information from the predicate but need to insert different
3117 ASSERT_EXPRs for each of the sub-graphs rooted at the
3118 conditional block. If the last statement of BB is a conditional
3119 expression of the form 'X op Y', then
3121 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3123 b) If the conditional is the only entry point to the sub-graph
3124 corresponding to the THEN_CLAUSE, recurse into it. On
3125 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3126 an ASSERT_EXPR is added for the corresponding variable.
3128 c) Repeat step (b) on the ELSE_CLAUSE.
3130 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3132 For instance,
3134 if (a == 9)
3135 b = a;
3136 else
3137 b = c + 1;
3139 In this case, an assertion on the THEN clause is useful to
3140 determine that 'a' is always 9 on that edge. However, an assertion
3141 on the ELSE clause would be unnecessary.
3143 4- If BB does not end in a conditional expression, then we recurse
3144 into BB's dominator children.
3146 At the end of the recursive traversal, every SSA name will have a
3147 list of locations where ASSERT_EXPRs should be added. When a new
3148 location for name N is found, it is registered by calling
3149 register_new_assert_for. That function keeps track of all the
3150 registered assertions to prevent adding unnecessary assertions.
3151 For instance, if a pointer P_4 is dereferenced more than once in a
3152 dominator tree, only the location dominating all the dereference of
3153 P_4 will receive an ASSERT_EXPR. */
3155 void
3156 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3158 gimple *last;
3160 last = last_stmt (bb);
3162 /* If BB's last statement is a conditional statement involving integer
3163 operands, determine if we need to add ASSERT_EXPRs. */
3164 if (last
3165 && gimple_code (last) == GIMPLE_COND
3166 && !fp_predicate (last)
3167 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3168 find_conditional_asserts (bb, as_a <gcond *> (last));
3170 /* If BB's last statement is a switch statement involving integer
3171 operands, determine if we need to add ASSERT_EXPRs. */
3172 if (last
3173 && gimple_code (last) == GIMPLE_SWITCH
3174 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3175 find_switch_asserts (bb, as_a <gswitch *> (last));
3177 /* Traverse all the statements in BB marking used names and looking
3178 for statements that may infer assertions for their used operands. */
3179 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3180 gsi_prev (&si))
3182 gimple *stmt;
3183 tree op;
3184 ssa_op_iter i;
3186 stmt = gsi_stmt (si);
3188 if (is_gimple_debug (stmt))
3189 continue;
3191 /* See if we can derive an assertion for any of STMT's operands. */
3192 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3194 tree value;
3195 enum tree_code comp_code;
3197 /* If op is not live beyond this stmt, do not bother to insert
3198 asserts for it. */
3199 if (!live.live_on_block_p (op, bb))
3200 continue;
3202 /* If OP is used in such a way that we can infer a value
3203 range for it, and we don't find a previous assertion for
3204 it, create a new assertion location node for OP. */
3205 if (infer_value_range (stmt, op, &comp_code, &value))
3207 /* If we are able to infer a nonzero value range for OP,
3208 then walk backwards through the use-def chain to see if OP
3209 was set via a typecast.
3211 If so, then we can also infer a nonzero value range
3212 for the operand of the NOP_EXPR. */
3213 if (comp_code == NE_EXPR && integer_zerop (value))
3215 tree t = op;
3216 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3218 while (is_gimple_assign (def_stmt)
3219 && CONVERT_EXPR_CODE_P
3220 (gimple_assign_rhs_code (def_stmt))
3221 && TREE_CODE
3222 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3223 && POINTER_TYPE_P
3224 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3226 t = gimple_assign_rhs1 (def_stmt);
3227 def_stmt = SSA_NAME_DEF_STMT (t);
3229 /* Note we want to register the assert for the
3230 operand of the NOP_EXPR after SI, not after the
3231 conversion. */
3232 if (live.live_on_block_p (t, bb))
3233 register_new_assert_for (t, t, comp_code, value,
3234 bb, NULL, si);
3238 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3242 /* Update live. */
3243 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3244 live.set (op, bb);
3245 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3246 live.clear (op, bb);
3249 /* Traverse all PHI nodes in BB, updating live. */
3250 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3251 gsi_next (&si))
3253 use_operand_p arg_p;
3254 ssa_op_iter i;
3255 gphi *phi = si.phi ();
3256 tree res = gimple_phi_result (phi);
3258 if (virtual_operand_p (res))
3259 continue;
3261 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3263 tree arg = USE_FROM_PTR (arg_p);
3264 if (TREE_CODE (arg) == SSA_NAME)
3265 live.set (arg, bb);
3268 live.clear (res, bb);
3272 /* Do an RPO walk over the function computing SSA name liveness
3273 on-the-fly and deciding on assert expressions to insert. */
3275 void
3276 vrp_asserts::find_assert_locations (void)
3278 int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3279 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3280 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3281 int rpo_cnt, i;
3283 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3284 for (i = 0; i < rpo_cnt; ++i)
3285 bb_rpo[rpo[i]] = i;
3287 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3288 the order we compute liveness and insert asserts we otherwise
3289 fail to insert asserts into the loop latch. */
3290 for (auto loop : loops_list (cfun, 0))
3292 i = loop->latch->index;
3293 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3294 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3295 !gsi_end_p (gsi); gsi_next (&gsi))
3297 gphi *phi = gsi.phi ();
3298 if (virtual_operand_p (gimple_phi_result (phi)))
3299 continue;
3300 tree arg = gimple_phi_arg_def (phi, j);
3301 if (TREE_CODE (arg) == SSA_NAME)
3302 live.set (arg, loop->latch);
3306 for (i = rpo_cnt - 1; i >= 0; --i)
3308 basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3309 edge e;
3310 edge_iterator ei;
3312 /* Process BB and update the live information with uses in
3313 this block. */
3314 find_assert_locations_in_bb (bb);
3316 /* Merge liveness into the predecessor blocks and free it. */
3317 if (!live.block_has_live_names_p (bb))
3319 int pred_rpo = i;
3320 FOR_EACH_EDGE (e, ei, bb->preds)
3322 int pred = e->src->index;
3323 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3324 continue;
3326 live.merge (e->src, bb);
3328 if (bb_rpo[pred] < pred_rpo)
3329 pred_rpo = bb_rpo[pred];
3332 /* Record the RPO number of the last visited block that needs
3333 live information from this block. */
3334 last_rpo[rpo[i]] = pred_rpo;
3336 else
3337 live.clear_block (bb);
3339 /* We can free all successors live bitmaps if all their
3340 predecessors have been visited already. */
3341 FOR_EACH_EDGE (e, ei, bb->succs)
3342 if (last_rpo[e->dest->index] == i)
3343 live.clear_block (e->dest);
3346 XDELETEVEC (rpo);
3347 XDELETEVEC (bb_rpo);
3348 XDELETEVEC (last_rpo);
3351 /* Create an ASSERT_EXPR for NAME and insert it in the location
3352 indicated by LOC. Return true if we made any edge insertions. */
3354 bool
3355 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3357 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3358 gimple *stmt;
3359 tree cond;
3360 gimple *assert_stmt;
3361 edge_iterator ei;
3362 edge e;
3364 /* If we have X <=> X do not insert an assert expr for that. */
3365 if (loc->expr == loc->val)
3366 return false;
3368 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3369 assert_stmt = build_assert_expr_for (cond, name);
3370 if (loc->e)
3372 /* We have been asked to insert the assertion on an edge. This
3373 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3374 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3375 || (gimple_code (gsi_stmt (loc->si))
3376 == GIMPLE_SWITCH));
3378 gsi_insert_on_edge (loc->e, assert_stmt);
3379 return true;
3382 /* If the stmt iterator points at the end then this is an insertion
3383 at the beginning of a block. */
3384 if (gsi_end_p (loc->si))
3386 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3387 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3388 return false;
3391 /* Otherwise, we can insert right after LOC->SI iff the
3392 statement must not be the last statement in the block. */
3393 stmt = gsi_stmt (loc->si);
3394 if (!stmt_ends_bb_p (stmt))
3396 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3397 return false;
3400 /* If STMT must be the last statement in BB, we can only insert new
3401 assertions on the non-abnormal edge out of BB. Note that since
3402 STMT is not control flow, there may only be one non-abnormal/eh edge
3403 out of BB. */
3404 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3405 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3407 gsi_insert_on_edge (e, assert_stmt);
3408 return true;
3411 gcc_unreachable ();
3414 /* Qsort helper for sorting assert locations. If stable is true, don't
3415 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3416 on the other side some pointers might be NULL. */
3418 template <bool stable>
3420 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3422 assert_locus * const a = *(assert_locus * const *)pa;
3423 assert_locus * const b = *(assert_locus * const *)pb;
3425 /* If stable, some asserts might be optimized away already, sort
3426 them last. */
3427 if (stable)
3429 if (a == NULL)
3430 return b != NULL;
3431 else if (b == NULL)
3432 return -1;
3435 if (a->e == NULL && b->e != NULL)
3436 return 1;
3437 else if (a->e != NULL && b->e == NULL)
3438 return -1;
3440 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3441 no need to test both a->e and b->e. */
3443 /* Sort after destination index. */
3444 if (a->e == NULL)
3446 else if (a->e->dest->index > b->e->dest->index)
3447 return 1;
3448 else if (a->e->dest->index < b->e->dest->index)
3449 return -1;
3451 /* Sort after comp_code. */
3452 if (a->comp_code > b->comp_code)
3453 return 1;
3454 else if (a->comp_code < b->comp_code)
3455 return -1;
3457 hashval_t ha, hb;
3459 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3460 uses DECL_UID of the VAR_DECL, so sorting might differ between
3461 -g and -g0. When doing the removal of redundant assert exprs
3462 and commonization to successors, this does not matter, but for
3463 the final sort needs to be stable. */
3464 if (stable)
3466 ha = 0;
3467 hb = 0;
3469 else
3471 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3472 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3475 /* Break the tie using hashing and source/bb index. */
3476 if (ha == hb)
3477 return (a->e != NULL
3478 ? a->e->src->index - b->e->src->index
3479 : a->bb->index - b->bb->index);
3480 return ha > hb ? 1 : -1;
3483 /* Process all the insertions registered for every name N_i registered
3484 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3485 found in ASSERTS_FOR[i]. */
3487 void
3488 vrp_asserts::process_assert_insertions ()
3490 unsigned i;
3491 bitmap_iterator bi;
3492 bool update_edges_p = false;
3493 int num_asserts = 0;
3495 if (dump_file && (dump_flags & TDF_DETAILS))
3496 dump (dump_file);
3498 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3500 assert_locus *loc = asserts_for[i];
3501 gcc_assert (loc);
3503 auto_vec<assert_locus *, 16> asserts;
3504 for (; loc; loc = loc->next)
3505 asserts.safe_push (loc);
3506 asserts.qsort (compare_assert_loc<false>);
3508 /* Push down common asserts to successors and remove redundant ones. */
3509 unsigned ecnt = 0;
3510 assert_locus *common = NULL;
3511 unsigned commonj = 0;
3512 for (unsigned j = 0; j < asserts.length (); ++j)
3514 loc = asserts[j];
3515 if (! loc->e)
3516 common = NULL;
3517 else if (! common
3518 || loc->e->dest != common->e->dest
3519 || loc->comp_code != common->comp_code
3520 || ! operand_equal_p (loc->val, common->val, 0)
3521 || ! operand_equal_p (loc->expr, common->expr, 0))
3523 commonj = j;
3524 common = loc;
3525 ecnt = 1;
3527 else if (loc->e == asserts[j-1]->e)
3529 /* Remove duplicate asserts. */
3530 if (commonj == j - 1)
3532 commonj = j;
3533 common = loc;
3535 free (asserts[j-1]);
3536 asserts[j-1] = NULL;
3538 else
3540 ecnt++;
3541 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3543 /* We have the same assertion on all incoming edges of a BB.
3544 Insert it at the beginning of that block. */
3545 loc->bb = loc->e->dest;
3546 loc->e = NULL;
3547 loc->si = gsi_none ();
3548 common = NULL;
3549 /* Clear asserts commoned. */
3550 for (; commonj != j; ++commonj)
3551 if (asserts[commonj])
3553 free (asserts[commonj]);
3554 asserts[commonj] = NULL;
3560 /* The asserts vector sorting above might be unstable for
3561 -fcompare-debug, sort again to ensure a stable sort. */
3562 asserts.qsort (compare_assert_loc<true>);
3563 for (unsigned j = 0; j < asserts.length (); ++j)
3565 loc = asserts[j];
3566 if (! loc)
3567 break;
3568 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3569 num_asserts++;
3570 free (loc);
3574 if (update_edges_p)
3575 gsi_commit_edge_inserts ();
3577 statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3578 num_asserts);
3581 /* Traverse the flowgraph looking for conditional jumps to insert range
3582 expressions. These range expressions are meant to provide information
3583 to optimizations that need to reason in terms of value ranges. They
3584 will not be expanded into RTL. For instance, given:
3586 x = ...
3587 y = ...
3588 if (x < y)
3589 y = x - 2;
3590 else
3591 x = y + 3;
3593 this pass will transform the code into:
3595 x = ...
3596 y = ...
3597 if (x < y)
3599 x = ASSERT_EXPR <x, x < y>
3600 y = x - 2
3602 else
3604 y = ASSERT_EXPR <y, x >= y>
3605 x = y + 3
3608 The idea is that once copy and constant propagation have run, other
3609 optimizations will be able to determine what ranges of values can 'x'
3610 take in different paths of the code, simply by checking the reaching
3611 definition of 'x'. */
3613 void
3614 vrp_asserts::insert_range_assertions (void)
3616 need_assert_for = BITMAP_ALLOC (NULL);
3617 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3619 calculate_dominance_info (CDI_DOMINATORS);
3621 find_assert_locations ();
3622 if (!bitmap_empty_p (need_assert_for))
3624 process_assert_insertions ();
3625 update_ssa (TODO_update_ssa_no_phi);
3628 if (dump_file && (dump_flags & TDF_DETAILS))
3630 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3631 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3634 free (asserts_for);
3635 BITMAP_FREE (need_assert_for);
3638 /* Return true if all imm uses of VAR are either in STMT, or
3639 feed (optionally through a chain of single imm uses) GIMPLE_COND
3640 in basic block COND_BB. */
3642 bool
3643 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3644 gimple *stmt,
3645 basic_block cond_bb)
3647 use_operand_p use_p, use2_p;
3648 imm_use_iterator iter;
3650 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3651 if (USE_STMT (use_p) != stmt)
3653 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3654 if (is_gimple_debug (use_stmt))
3655 continue;
3656 while (is_gimple_assign (use_stmt)
3657 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3658 && single_imm_use (gimple_assign_lhs (use_stmt),
3659 &use2_p, &use_stmt2))
3660 use_stmt = use_stmt2;
3661 if (gimple_code (use_stmt) != GIMPLE_COND
3662 || gimple_bb (use_stmt) != cond_bb)
3663 return false;
3665 return true;
3668 /* Convert range assertion expressions into the implied copies and
3669 copy propagate away the copies. Doing the trivial copy propagation
3670 here avoids the need to run the full copy propagation pass after
3671 VRP.
3673 FIXME, this will eventually lead to copy propagation removing the
3674 names that had useful range information attached to them. For
3675 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3676 then N_i will have the range [3, +INF].
3678 However, by converting the assertion into the implied copy
3679 operation N_i = N_j, we will then copy-propagate N_j into the uses
3680 of N_i and lose the range information.
3682 The problem with keeping ASSERT_EXPRs around is that passes after
3683 VRP need to handle them appropriately.
3685 Another approach would be to make the range information a first
3686 class property of the SSA_NAME so that it can be queried from
3687 any pass. This is made somewhat more complex by the need for
3688 multiple ranges to be associated with one SSA_NAME. */
3690 void
3691 vrp_asserts::remove_range_assertions ()
3693 basic_block bb;
3694 gimple_stmt_iterator si;
3695 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3696 a basic block preceeded by GIMPLE_COND branching to it and
3697 __builtin_trap, -1 if not yet checked, 0 otherwise. */
3698 int is_unreachable;
3700 /* Note that the BSI iterator bump happens at the bottom of the
3701 loop and no bump is necessary if we're removing the statement
3702 referenced by the current BSI. */
3703 FOR_EACH_BB_FN (bb, fun)
3704 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3706 gimple *stmt = gsi_stmt (si);
3708 if (is_gimple_assign (stmt)
3709 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3711 tree lhs = gimple_assign_lhs (stmt);
3712 tree rhs = gimple_assign_rhs1 (stmt);
3713 tree var;
3715 var = ASSERT_EXPR_VAR (rhs);
3717 if (TREE_CODE (var) == SSA_NAME
3718 && !POINTER_TYPE_P (TREE_TYPE (lhs))
3719 && SSA_NAME_RANGE_INFO (lhs))
3721 if (is_unreachable == -1)
3723 is_unreachable = 0;
3724 if (single_pred_p (bb)
3725 && assert_unreachable_fallthru_edge_p
3726 (single_pred_edge (bb)))
3727 is_unreachable = 1;
3729 /* Handle
3730 if (x_7 >= 10 && x_7 < 20)
3731 __builtin_unreachable ();
3732 x_8 = ASSERT_EXPR <x_7, ...>;
3733 if the only uses of x_7 are in the ASSERT_EXPR and
3734 in the condition. In that case, we can copy the
3735 range info from x_8 computed in this pass also
3736 for x_7. */
3737 if (is_unreachable
3738 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3739 single_pred (bb)))
3741 /* We could use duplicate_ssa_name_range_info here
3742 instead of peeking inside SSA_NAME_RANGE_INFO,
3743 but the aforementioned asserts that the
3744 destination has no global range. This is
3745 slated for removal anyhow. */
3746 value_range r (TREE_TYPE (lhs),
3747 SSA_NAME_RANGE_INFO (lhs)->get_min (),
3748 SSA_NAME_RANGE_INFO (lhs)->get_max (),
3749 SSA_NAME_RANGE_TYPE (lhs));
3750 set_range_info (var, r);
3751 maybe_set_nonzero_bits (single_pred_edge (bb), var);
3755 /* Propagate the RHS into every use of the LHS. For SSA names
3756 also propagate abnormals as it merely restores the original
3757 IL in this case (an replace_uses_by would assert). */
3758 if (TREE_CODE (var) == SSA_NAME)
3760 imm_use_iterator iter;
3761 use_operand_p use_p;
3762 gimple *use_stmt;
3763 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3764 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3765 SET_USE (use_p, var);
3767 else
3768 replace_uses_by (lhs, var);
3770 /* And finally, remove the copy, it is not needed. */
3771 gsi_remove (&si, true);
3772 release_defs (stmt);
3774 else
3776 if (!is_gimple_debug (gsi_stmt (si)))
3777 is_unreachable = 0;
3778 gsi_next (&si);
3783 class vrp_prop : public ssa_propagation_engine
3785 public:
3786 vrp_prop (vr_values *v)
3787 : ssa_propagation_engine (),
3788 m_vr_values (v) { }
3790 void initialize (struct function *);
3791 void finalize ();
3793 private:
3794 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
3795 enum ssa_prop_result visit_phi (gphi *) final override;
3797 struct function *fun;
3798 vr_values *m_vr_values;
3801 /* Initialization required by ssa_propagate engine. */
3803 void
3804 vrp_prop::initialize (struct function *fn)
3806 basic_block bb;
3807 fun = fn;
3809 FOR_EACH_BB_FN (bb, fun)
3811 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3812 gsi_next (&si))
3814 gphi *phi = si.phi ();
3815 if (!stmt_interesting_for_vrp (phi))
3817 tree lhs = PHI_RESULT (phi);
3818 m_vr_values->set_def_to_varying (lhs);
3819 prop_set_simulate_again (phi, false);
3821 else
3822 prop_set_simulate_again (phi, true);
3825 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3826 gsi_next (&si))
3828 gimple *stmt = gsi_stmt (si);
3830 /* If the statement is a control insn, then we do not
3831 want to avoid simulating the statement once. Failure
3832 to do so means that those edges will never get added. */
3833 if (stmt_ends_bb_p (stmt))
3834 prop_set_simulate_again (stmt, true);
3835 else if (!stmt_interesting_for_vrp (stmt))
3837 m_vr_values->set_defs_to_varying (stmt);
3838 prop_set_simulate_again (stmt, false);
3840 else
3841 prop_set_simulate_again (stmt, true);
3846 /* Evaluate statement STMT. If the statement produces a useful range,
3847 return SSA_PROP_INTERESTING and record the SSA name with the
3848 interesting range into *OUTPUT_P.
3850 If STMT is a conditional branch and we can determine its truth
3851 value, the taken edge is recorded in *TAKEN_EDGE_P.
3853 If STMT produces a varying value, return SSA_PROP_VARYING. */
3855 enum ssa_prop_result
3856 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3858 tree lhs = gimple_get_lhs (stmt);
3859 value_range_equiv vr;
3860 m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3862 if (*output_p)
3864 if (m_vr_values->update_value_range (*output_p, &vr))
3866 if (dump_file && (dump_flags & TDF_DETAILS))
3868 fprintf (dump_file, "Found new range for ");
3869 print_generic_expr (dump_file, *output_p);
3870 fprintf (dump_file, ": ");
3871 dump_value_range (dump_file, &vr);
3872 fprintf (dump_file, "\n");
3875 if (vr.varying_p ())
3876 return SSA_PROP_VARYING;
3878 return SSA_PROP_INTERESTING;
3880 return SSA_PROP_NOT_INTERESTING;
3883 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3884 switch (gimple_call_internal_fn (stmt))
3886 case IFN_ADD_OVERFLOW:
3887 case IFN_SUB_OVERFLOW:
3888 case IFN_MUL_OVERFLOW:
3889 case IFN_ATOMIC_COMPARE_EXCHANGE:
3890 /* These internal calls return _Complex integer type,
3891 which VRP does not track, but the immediate uses
3892 thereof might be interesting. */
3893 if (lhs && TREE_CODE (lhs) == SSA_NAME)
3895 imm_use_iterator iter;
3896 use_operand_p use_p;
3897 enum ssa_prop_result res = SSA_PROP_VARYING;
3899 m_vr_values->set_def_to_varying (lhs);
3901 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3903 gimple *use_stmt = USE_STMT (use_p);
3904 if (!is_gimple_assign (use_stmt))
3905 continue;
3906 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3907 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3908 continue;
3909 tree rhs1 = gimple_assign_rhs1 (use_stmt);
3910 tree use_lhs = gimple_assign_lhs (use_stmt);
3911 if (TREE_CODE (rhs1) != rhs_code
3912 || TREE_OPERAND (rhs1, 0) != lhs
3913 || TREE_CODE (use_lhs) != SSA_NAME
3914 || !stmt_interesting_for_vrp (use_stmt)
3915 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3916 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3917 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3918 continue;
3920 /* If there is a change in the value range for any of the
3921 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3922 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
3923 or IMAGPART_EXPR immediate uses, but none of them have
3924 a change in their value ranges, return
3925 SSA_PROP_NOT_INTERESTING. If there are no
3926 {REAL,IMAG}PART_EXPR uses at all,
3927 return SSA_PROP_VARYING. */
3928 value_range_equiv new_vr;
3929 m_vr_values->extract_range_basic (&new_vr, use_stmt);
3930 const value_range_equiv *old_vr
3931 = m_vr_values->get_value_range (use_lhs);
3932 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3933 res = SSA_PROP_INTERESTING;
3934 else
3935 res = SSA_PROP_NOT_INTERESTING;
3936 new_vr.equiv_clear ();
3937 if (res == SSA_PROP_INTERESTING)
3939 *output_p = lhs;
3940 return res;
3944 return res;
3946 break;
3947 default:
3948 break;
3951 /* All other statements produce nothing of interest for VRP, so mark
3952 their outputs varying and prevent further simulation. */
3953 m_vr_values->set_defs_to_varying (stmt);
3955 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
3958 /* Visit all arguments for PHI node PHI that flow through executable
3959 edges. If a valid value range can be derived from all the incoming
3960 value ranges, set a new range for the LHS of PHI. */
3962 enum ssa_prop_result
3963 vrp_prop::visit_phi (gphi *phi)
3965 tree lhs = PHI_RESULT (phi);
3966 value_range_equiv vr_result;
3967 m_vr_values->extract_range_from_phi_node (phi, &vr_result);
3968 if (m_vr_values->update_value_range (lhs, &vr_result))
3970 if (dump_file && (dump_flags & TDF_DETAILS))
3972 fprintf (dump_file, "Found new range for ");
3973 print_generic_expr (dump_file, lhs);
3974 fprintf (dump_file, ": ");
3975 dump_value_range (dump_file, &vr_result);
3976 fprintf (dump_file, "\n");
3979 if (vr_result.varying_p ())
3980 return SSA_PROP_VARYING;
3982 return SSA_PROP_INTERESTING;
3985 /* Nothing changed, don't add outgoing edges. */
3986 return SSA_PROP_NOT_INTERESTING;
3989 /* Traverse all the blocks folding conditionals with known ranges. */
3991 void
3992 vrp_prop::finalize ()
3994 size_t i;
3996 /* We have completed propagating through the lattice. */
3997 m_vr_values->set_lattice_propagation_complete ();
3999 if (dump_file)
4001 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4002 m_vr_values->dump (dump_file);
4003 fprintf (dump_file, "\n");
4006 /* Set value range to non pointer SSA_NAMEs. */
4007 for (i = 0; i < num_ssa_names; i++)
4009 tree name = ssa_name (i);
4010 if (!name)
4011 continue;
4013 const value_range_equiv *vr = m_vr_values->get_value_range (name);
4014 if (!name || vr->varying_p () || !vr->constant_p ())
4015 continue;
4017 if (POINTER_TYPE_P (TREE_TYPE (name))
4018 && range_includes_zero_p (vr) == 0)
4019 set_ptr_nonnull (name);
4020 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4021 set_range_info (name, *vr);
4025 class vrp_folder : public substitute_and_fold_engine
4027 public:
4028 vrp_folder (vr_values *v)
4029 : substitute_and_fold_engine (/* Fold all stmts. */ true),
4030 m_vr_values (v), simplifier (v)
4032 void simplify_casted_conds (function *fun);
4034 private:
4035 tree value_of_expr (tree name, gimple *stmt) override
4037 return m_vr_values->value_of_expr (name, stmt);
4039 bool fold_stmt (gimple_stmt_iterator *) final override;
4040 bool fold_predicate_in (gimple_stmt_iterator *);
4042 vr_values *m_vr_values;
4043 simplify_using_ranges simplifier;
4046 /* If the statement pointed by SI has a predicate whose value can be
4047 computed using the value range information computed by VRP, compute
4048 its value and return true. Otherwise, return false. */
4050 bool
4051 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4053 bool assignment_p = false;
4054 tree val;
4055 gimple *stmt = gsi_stmt (*si);
4057 if (is_gimple_assign (stmt)
4058 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4060 assignment_p = true;
4061 val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4062 gimple_assign_rhs1 (stmt),
4063 gimple_assign_rhs2 (stmt),
4064 stmt);
4066 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4067 val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4068 gimple_cond_lhs (cond_stmt),
4069 gimple_cond_rhs (cond_stmt),
4070 stmt);
4071 else
4072 return false;
4074 if (val)
4076 if (assignment_p)
4077 val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
4079 if (dump_file)
4081 fprintf (dump_file, "Folding predicate ");
4082 print_gimple_expr (dump_file, stmt, 0);
4083 fprintf (dump_file, " to ");
4084 print_generic_expr (dump_file, val);
4085 fprintf (dump_file, "\n");
4088 if (is_gimple_assign (stmt))
4089 gimple_assign_set_rhs_from_tree (si, val);
4090 else
4092 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4093 gcond *cond_stmt = as_a <gcond *> (stmt);
4094 if (integer_zerop (val))
4095 gimple_cond_make_false (cond_stmt);
4096 else if (integer_onep (val))
4097 gimple_cond_make_true (cond_stmt);
4098 else
4099 gcc_unreachable ();
4102 return true;
4105 return false;
4108 /* Callback for substitute_and_fold folding the stmt at *SI. */
4110 bool
4111 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4113 if (fold_predicate_in (si))
4114 return true;
4116 return simplifier.simplify (si);
4119 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4120 was set by a type conversion can often be rewritten to use the RHS
4121 of the type conversion. Do this optimization for all conditionals
4122 in FUN. */
4124 void
4125 vrp_folder::simplify_casted_conds (function *fun)
4127 basic_block bb;
4128 FOR_EACH_BB_FN (bb, fun)
4130 gimple *last = last_stmt (bb);
4131 if (last && gimple_code (last) == GIMPLE_COND)
4133 if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
4135 if (dump_file && (dump_flags & TDF_DETAILS))
4137 fprintf (dump_file, "Folded into: ");
4138 print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
4139 fprintf (dump_file, "\n");
4146 /* Main entry point to VRP (Value Range Propagation). This pass is
4147 loosely based on J. R. C. Patterson, ``Accurate Static Branch
4148 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4149 Programming Language Design and Implementation, pp. 67-78, 1995.
4150 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4152 This is essentially an SSA-CCP pass modified to deal with ranges
4153 instead of constants.
4155 While propagating ranges, we may find that two or more SSA name
4156 have equivalent, though distinct ranges. For instance,
4158 1 x_9 = p_3->a;
4159 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4160 3 if (p_4 == q_2)
4161 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4162 5 endif
4163 6 if (q_2)
4165 In the code above, pointer p_5 has range [q_2, q_2], but from the
4166 code we can also determine that p_5 cannot be NULL and, if q_2 had
4167 a non-varying range, p_5's range should also be compatible with it.
4169 These equivalences are created by two expressions: ASSERT_EXPR and
4170 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
4171 result of another assertion, then we can use the fact that p_5 and
4172 p_4 are equivalent when evaluating p_5's range.
4174 Together with value ranges, we also propagate these equivalences
4175 between names so that we can take advantage of information from
4176 multiple ranges when doing final replacement. Note that this
4177 equivalency relation is transitive but not symmetric.
4179 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4180 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4181 in contexts where that assertion does not hold (e.g., in line 6).
4183 TODO, the main difference between this pass and Patterson's is that
4184 we do not propagate edge probabilities. We only compute whether
4185 edges can be taken or not. That is, instead of having a spectrum
4186 of jump probabilities between 0 and 1, we only deal with 0, 1 and
4187 DON'T KNOW. In the future, it may be worthwhile to propagate
4188 probabilities to aid branch prediction. */
4190 static unsigned int
4191 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4193 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4194 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4195 scev_initialize ();
4197 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
4198 Inserting assertions may split edges which will invalidate
4199 EDGE_DFS_BACK. */
4200 vrp_asserts assert_engine (fun);
4201 assert_engine.insert_range_assertions ();
4203 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
4204 mark_dfs_back_edges ();
4206 vr_values vrp_vr_values;
4208 class vrp_prop vrp_prop (&vrp_vr_values);
4209 vrp_prop.initialize (fun);
4210 vrp_prop.ssa_propagate ();
4212 /* Instantiate the folder here, so that edge cleanups happen at the
4213 end of this function. */
4214 vrp_folder folder (&vrp_vr_values);
4215 vrp_prop.finalize ();
4217 /* If we're checking array refs, we want to merge information on
4218 the executability of each edge between vrp_folder and the
4219 check_array_bounds_dom_walker: each can clear the
4220 EDGE_EXECUTABLE flag on edges, in different ways.
4222 Hence, if we're going to call check_all_array_refs, set
4223 the flag on every edge now, rather than in
4224 check_array_bounds_dom_walker's ctor; vrp_folder may clear
4225 it from some edges. */
4226 if (warn_array_bounds && warn_array_bounds_p)
4227 set_all_edges_as_executable (fun);
4229 folder.substitute_and_fold ();
4231 if (warn_array_bounds && warn_array_bounds_p)
4233 array_bounds_checker array_checker (fun, &vrp_vr_values);
4234 array_checker.check ();
4237 folder.simplify_casted_conds (fun);
4239 free_numbers_of_iterations_estimates (fun);
4241 assert_engine.remove_range_assertions ();
4243 scev_finalize ();
4244 loop_optimizer_finalize ();
4245 return 0;
4248 // This is a ranger based folder which continues to use the dominator
4249 // walk to access the substitute and fold machinery. Ranges are calculated
4250 // on demand.
4252 class rvrp_folder : public substitute_and_fold_engine
4254 public:
4256 rvrp_folder (gimple_ranger *r) : substitute_and_fold_engine (),
4257 m_simplifier (r, r->non_executable_edge_flag)
4259 m_ranger = r;
4260 m_pta = new pointer_equiv_analyzer (m_ranger);
4263 ~rvrp_folder ()
4265 delete m_pta;
4268 tree value_of_expr (tree name, gimple *s = NULL) override
4270 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4271 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4272 return NULL;
4273 tree ret = m_ranger->value_of_expr (name, s);
4274 if (!ret && supported_pointer_equiv_p (name))
4275 ret = m_pta->get_equiv (name);
4276 return ret;
4279 tree value_on_edge (edge e, tree name) override
4281 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4282 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4283 return NULL;
4284 tree ret = m_ranger->value_on_edge (e, name);
4285 if (!ret && supported_pointer_equiv_p (name))
4286 ret = m_pta->get_equiv (name);
4287 return ret;
4290 tree value_of_stmt (gimple *s, tree name = NULL) override
4292 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4293 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4294 return NULL;
4295 return m_ranger->value_of_stmt (s, name);
4298 void pre_fold_bb (basic_block bb) override
4300 m_pta->enter (bb);
4301 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
4302 gsi_next (&gsi))
4303 m_ranger->register_inferred_ranges (gsi.phi ());
4306 void post_fold_bb (basic_block bb) override
4308 m_pta->leave (bb);
4311 void pre_fold_stmt (gimple *stmt) override
4313 m_pta->visit_stmt (stmt);
4316 bool fold_stmt (gimple_stmt_iterator *gsi) override
4318 bool ret = m_simplifier.simplify (gsi);
4319 if (!ret)
4320 ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
4321 m_ranger->register_inferred_ranges (gsi_stmt (*gsi));
4322 return ret;
4325 private:
4326 DISABLE_COPY_AND_ASSIGN (rvrp_folder);
4327 gimple_ranger *m_ranger;
4328 simplify_using_ranges m_simplifier;
4329 pointer_equiv_analyzer *m_pta;
4332 /* Main entry point for a VRP pass using just ranger. This can be called
4333 from anywhere to perform a VRP pass, including from EVRP. */
4335 unsigned int
4336 execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p)
4338 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4339 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4340 scev_initialize ();
4341 calculate_dominance_info (CDI_DOMINATORS);
4343 set_all_edges_as_executable (fun);
4344 gimple_ranger *ranger = enable_ranger (fun, false);
4345 rvrp_folder folder (ranger);
4346 folder.substitute_and_fold ();
4347 if (dump_file && (dump_flags & TDF_DETAILS))
4348 ranger->dump (dump_file);
4350 if (warn_array_bounds && warn_array_bounds_p)
4352 // Set all edges as executable, except those ranger says aren't.
4353 int non_exec_flag = ranger->non_executable_edge_flag;
4354 basic_block bb;
4355 FOR_ALL_BB_FN (bb, fun)
4357 edge_iterator ei;
4358 edge e;
4359 FOR_EACH_EDGE (e, ei, bb->succs)
4360 if (e->flags & non_exec_flag)
4361 e->flags &= ~EDGE_EXECUTABLE;
4362 else
4363 e->flags |= EDGE_EXECUTABLE;
4365 scev_reset ();
4366 array_bounds_checker array_checker (fun, ranger);
4367 array_checker.check ();
4370 disable_ranger (fun);
4371 scev_finalize ();
4372 loop_optimizer_finalize ();
4373 return 0;
4376 namespace {
4378 const pass_data pass_data_vrp =
4380 GIMPLE_PASS, /* type */
4381 "vrp", /* name */
4382 OPTGROUP_NONE, /* optinfo_flags */
4383 TV_TREE_VRP, /* tv_id */
4384 PROP_ssa, /* properties_required */
4385 0, /* properties_provided */
4386 0, /* properties_destroyed */
4387 0, /* todo_flags_start */
4388 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4391 static int vrp_pass_num = 0;
4392 class pass_vrp : public gimple_opt_pass
4394 public:
4395 pass_vrp (gcc::context *ctxt)
4396 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false),
4397 my_pass (++vrp_pass_num)
4400 /* opt_pass methods: */
4401 opt_pass * clone () { return new pass_vrp (m_ctxt); }
4402 void set_pass_param (unsigned int n, bool param)
4404 gcc_assert (n == 0);
4405 warn_array_bounds_p = param;
4407 virtual bool gate (function *) { return flag_tree_vrp != 0; }
4408 virtual unsigned int execute (function *fun)
4410 if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
4411 || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
4412 return execute_ranger_vrp (fun, warn_array_bounds_p);
4413 return execute_vrp (fun, warn_array_bounds_p);
4416 private:
4417 bool warn_array_bounds_p;
4418 int my_pass;
4419 }; // class pass_vrp
4421 } // anon namespace
4423 gimple_opt_pass *
4424 make_pass_vrp (gcc::context *ctxt)
4426 return new pass_vrp (ctxt);