compiler: only build thunk struct type when it is needed
[official-gcc.git] / gcc / tree-vrp.cc
blob93482e5d1027778a53fabf97f1f5e5f6fa8c953e
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "basic-block.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "options.h"
28 #include "dominance.h"
29 #include "function.h"
30 #include "cfg.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
46 #include "domwalk.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
54 /* Set of SSA names found live during the RPO traversal of the function
55 for still active basic-blocks. */
56 class live_names
58 public:
59 live_names ();
60 ~live_names ();
61 void set (tree, basic_block);
62 void clear (tree, basic_block);
63 void merge (basic_block dest, basic_block src);
64 bool live_on_block_p (tree, basic_block);
65 bool live_on_edge_p (tree, edge);
66 bool block_has_live_names_p (basic_block);
67 void clear_block (basic_block);
69 private:
70 sbitmap *live;
71 unsigned num_blocks;
72 void init_bitmap_if_needed (basic_block);
75 void
76 live_names::init_bitmap_if_needed (basic_block bb)
78 unsigned i = bb->index;
79 if (!live[i])
81 live[i] = sbitmap_alloc (num_ssa_names);
82 bitmap_clear (live[i]);
86 bool
87 live_names::block_has_live_names_p (basic_block bb)
89 unsigned i = bb->index;
90 return live[i] && bitmap_empty_p (live[i]);
93 void
94 live_names::clear_block (basic_block bb)
96 unsigned i = bb->index;
97 if (live[i])
99 sbitmap_free (live[i]);
100 live[i] = NULL;
104 void
105 live_names::merge (basic_block dest, basic_block src)
107 init_bitmap_if_needed (dest);
108 init_bitmap_if_needed (src);
109 bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
112 void
113 live_names::set (tree name, basic_block bb)
115 init_bitmap_if_needed (bb);
116 bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
119 void
120 live_names::clear (tree name, basic_block bb)
122 unsigned i = bb->index;
123 if (live[i])
124 bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
127 live_names::live_names ()
129 num_blocks = last_basic_block_for_fn (cfun);
130 live = XCNEWVEC (sbitmap, num_blocks);
133 live_names::~live_names ()
135 for (unsigned i = 0; i < num_blocks; ++i)
136 if (live[i])
137 sbitmap_free (live[i]);
138 XDELETEVEC (live);
141 bool
142 live_names::live_on_block_p (tree name, basic_block bb)
144 return (live[bb->index]
145 && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
148 /* Return true if the SSA name NAME is live on the edge E. */
150 bool
151 live_names::live_on_edge_p (tree name, edge e)
153 return live_on_block_p (name, e->dest);
157 /* VR_TYPE describes a range with mininum value *MIN and maximum
158 value *MAX. Restrict the range to the set of values that have
159 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
160 return the new range type.
162 SGN gives the sign of the values described by the range. */
164 enum value_range_kind
165 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
166 wide_int *min, wide_int *max,
167 const wide_int &nonzero_bits,
168 signop sgn)
170 if (vr_type == VR_ANTI_RANGE)
172 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
173 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
174 to create an inclusive upper bound for A and an inclusive lower
175 bound for B. */
176 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
177 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
179 /* If the calculation of A_MAX wrapped, A is effectively empty
180 and A_MAX is the highest value that satisfies NONZERO_BITS.
181 Likewise if the calculation of B_MIN wrapped, B is effectively
182 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
183 bool a_empty = wi::ge_p (a_max, *min, sgn);
184 bool b_empty = wi::le_p (b_min, *max, sgn);
186 /* If both A and B are empty, there are no valid values. */
187 if (a_empty && b_empty)
188 return VR_UNDEFINED;
190 /* If exactly one of A or B is empty, return a VR_RANGE for the
191 other one. */
192 if (a_empty || b_empty)
194 *min = b_min;
195 *max = a_max;
196 gcc_checking_assert (wi::le_p (*min, *max, sgn));
197 return VR_RANGE;
200 /* Update the VR_ANTI_RANGE bounds. */
201 *min = a_max + 1;
202 *max = b_min - 1;
203 gcc_checking_assert (wi::le_p (*min, *max, sgn));
205 /* Now check whether the excluded range includes any values that
206 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
207 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
209 unsigned int precision = min->get_precision ();
210 *min = wi::min_value (precision, sgn);
211 *max = wi::max_value (precision, sgn);
212 vr_type = VR_RANGE;
215 if (vr_type == VR_RANGE || vr_type == VR_VARYING)
217 *max = wi::round_down_for_mask (*max, nonzero_bits);
219 /* Check that the range contains at least one valid value. */
220 if (wi::gt_p (*min, *max, sgn))
221 return VR_UNDEFINED;
223 *min = wi::round_up_for_mask (*min, nonzero_bits);
224 gcc_checking_assert (wi::le_p (*min, *max, sgn));
226 return vr_type;
229 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
230 a singleton. */
232 bool
233 range_int_cst_p (const value_range *vr)
235 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
238 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
239 otherwise. We only handle additive operations and set NEG to true if the
240 symbol is negated and INV to the invariant part, if any. */
242 tree
243 get_single_symbol (tree t, bool *neg, tree *inv)
245 bool neg_;
246 tree inv_;
248 *inv = NULL_TREE;
249 *neg = false;
251 if (TREE_CODE (t) == PLUS_EXPR
252 || TREE_CODE (t) == POINTER_PLUS_EXPR
253 || TREE_CODE (t) == MINUS_EXPR)
255 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
257 neg_ = (TREE_CODE (t) == MINUS_EXPR);
258 inv_ = TREE_OPERAND (t, 0);
259 t = TREE_OPERAND (t, 1);
261 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
263 neg_ = false;
264 inv_ = TREE_OPERAND (t, 1);
265 t = TREE_OPERAND (t, 0);
267 else
268 return NULL_TREE;
270 else
272 neg_ = false;
273 inv_ = NULL_TREE;
276 if (TREE_CODE (t) == NEGATE_EXPR)
278 t = TREE_OPERAND (t, 0);
279 neg_ = !neg_;
282 if (TREE_CODE (t) != SSA_NAME)
283 return NULL_TREE;
285 if (inv_ && TREE_OVERFLOW_P (inv_))
286 inv_ = drop_tree_overflow (inv_);
288 *neg = neg_;
289 *inv = inv_;
290 return t;
293 /* The reverse operation: build a symbolic expression with TYPE
294 from symbol SYM, negated according to NEG, and invariant INV. */
296 static tree
297 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
299 const bool pointer_p = POINTER_TYPE_P (type);
300 tree t = sym;
302 if (neg)
303 t = build1 (NEGATE_EXPR, type, t);
305 if (integer_zerop (inv))
306 return t;
308 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
311 /* Return
312 1 if VAL < VAL2
313 0 if !(VAL < VAL2)
314 -2 if those are incomparable. */
316 operand_less_p (tree val, tree val2)
318 /* LT is folded faster than GE and others. Inline the common case. */
319 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
320 return tree_int_cst_lt (val, val2);
321 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
322 return val == val2 ? 0 : -2;
323 else
325 int cmp = compare_values (val, val2);
326 if (cmp == -1)
327 return 1;
328 else if (cmp == 0 || cmp == 1)
329 return 0;
330 else
331 return -2;
335 /* Compare two values VAL1 and VAL2. Return
337 -2 if VAL1 and VAL2 cannot be compared at compile-time,
338 -1 if VAL1 < VAL2,
339 0 if VAL1 == VAL2,
340 +1 if VAL1 > VAL2, and
341 +2 if VAL1 != VAL2
343 This is similar to tree_int_cst_compare but supports pointer values
344 and values that cannot be compared at compile time.
346 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
347 true if the return value is only valid if we assume that signed
348 overflow is undefined. */
351 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
353 if (val1 == val2)
354 return 0;
356 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
357 both integers. */
358 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
359 == POINTER_TYPE_P (TREE_TYPE (val2)));
361 /* Convert the two values into the same type. This is needed because
362 sizetype causes sign extension even for unsigned types. */
363 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
364 val2 = fold_convert (TREE_TYPE (val1), val2);
366 const bool overflow_undefined
367 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
368 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
369 tree inv1, inv2;
370 bool neg1, neg2;
371 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
372 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
374 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
375 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
376 if (sym1 && sym2)
378 /* Both values must use the same name with the same sign. */
379 if (sym1 != sym2 || neg1 != neg2)
380 return -2;
382 /* [-]NAME + CST == [-]NAME + CST. */
383 if (inv1 == inv2)
384 return 0;
386 /* If overflow is defined we cannot simplify more. */
387 if (!overflow_undefined)
388 return -2;
390 if (strict_overflow_p != NULL
391 /* Symbolic range building sets the no-warning bit to declare
392 that overflow doesn't happen. */
393 && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
394 && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
395 *strict_overflow_p = true;
397 if (!inv1)
398 inv1 = build_int_cst (TREE_TYPE (val1), 0);
399 if (!inv2)
400 inv2 = build_int_cst (TREE_TYPE (val2), 0);
402 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
403 TYPE_SIGN (TREE_TYPE (val1)));
406 const bool cst1 = is_gimple_min_invariant (val1);
407 const bool cst2 = is_gimple_min_invariant (val2);
409 /* If one is of the form '[-]NAME + CST' and the other is constant, then
410 it might be possible to say something depending on the constants. */
411 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
413 if (!overflow_undefined)
414 return -2;
416 if (strict_overflow_p != NULL
417 /* Symbolic range building sets the no-warning bit to declare
418 that overflow doesn't happen. */
419 && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
420 && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
421 *strict_overflow_p = true;
423 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
424 tree cst = cst1 ? val1 : val2;
425 tree inv = cst1 ? inv2 : inv1;
427 /* Compute the difference between the constants. If it overflows or
428 underflows, this means that we can trivially compare the NAME with
429 it and, consequently, the two values with each other. */
430 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
431 if (wi::cmp (0, wi::to_wide (inv), sgn)
432 != wi::cmp (diff, wi::to_wide (cst), sgn))
434 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
435 return cst1 ? res : -res;
438 return -2;
441 /* We cannot say anything more for non-constants. */
442 if (!cst1 || !cst2)
443 return -2;
445 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
447 /* We cannot compare overflowed values. */
448 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
449 return -2;
451 if (TREE_CODE (val1) == INTEGER_CST
452 && TREE_CODE (val2) == INTEGER_CST)
453 return tree_int_cst_compare (val1, val2);
455 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
457 if (known_eq (wi::to_poly_widest (val1),
458 wi::to_poly_widest (val2)))
459 return 0;
460 if (known_lt (wi::to_poly_widest (val1),
461 wi::to_poly_widest (val2)))
462 return -1;
463 if (known_gt (wi::to_poly_widest (val1),
464 wi::to_poly_widest (val2)))
465 return 1;
468 return -2;
470 else
472 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
474 /* We cannot compare overflowed values. */
475 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
476 return -2;
478 return tree_int_cst_compare (val1, val2);
481 /* First see if VAL1 and VAL2 are not the same. */
482 if (operand_equal_p (val1, val2, 0))
483 return 0;
485 fold_defer_overflow_warnings ();
487 /* If VAL1 is a lower address than VAL2, return -1. */
488 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
489 if (t && integer_onep (t))
491 fold_undefer_and_ignore_overflow_warnings ();
492 return -1;
495 /* If VAL1 is a higher address than VAL2, return +1. */
496 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
497 if (t && integer_onep (t))
499 fold_undefer_and_ignore_overflow_warnings ();
500 return 1;
503 /* If VAL1 is different than VAL2, return +2. */
504 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
505 fold_undefer_and_ignore_overflow_warnings ();
506 if (t && integer_onep (t))
507 return 2;
509 return -2;
513 /* Compare values like compare_values_warnv. */
516 compare_values (tree val1, tree val2)
518 bool sop;
519 return compare_values_warnv (val1, val2, &sop);
522 /* If BOUND will include a symbolic bound, adjust it accordingly,
523 otherwise leave it as is.
525 CODE is the original operation that combined the bounds (PLUS_EXPR
526 or MINUS_EXPR).
528 TYPE is the type of the original operation.
530 SYM_OPn is the symbolic for OPn if it has a symbolic.
532 NEG_OPn is TRUE if the OPn was negated. */
534 static void
535 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
536 tree sym_op0, tree sym_op1,
537 bool neg_op0, bool neg_op1)
539 bool minus_p = (code == MINUS_EXPR);
540 /* If the result bound is constant, we're done; otherwise, build the
541 symbolic lower bound. */
542 if (sym_op0 == sym_op1)
544 else if (sym_op0)
545 bound = build_symbolic_expr (type, sym_op0,
546 neg_op0, bound);
547 else if (sym_op1)
549 /* We may not negate if that might introduce
550 undefined overflow. */
551 if (!minus_p
552 || neg_op1
553 || TYPE_OVERFLOW_WRAPS (type))
554 bound = build_symbolic_expr (type, sym_op1,
555 neg_op1 ^ minus_p, bound);
556 else
557 bound = NULL_TREE;
561 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
562 int bound according to CODE. CODE is the operation combining the
563 bound (either a PLUS_EXPR or a MINUS_EXPR).
565 TYPE is the type of the combine operation.
567 WI is the wide int to store the result.
569 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
570 if over/underflow occurred. */
572 static void
573 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
574 tree type, tree op0, tree op1)
576 bool minus_p = (code == MINUS_EXPR);
577 const signop sgn = TYPE_SIGN (type);
578 const unsigned int prec = TYPE_PRECISION (type);
580 /* Combine the bounds, if any. */
581 if (op0 && op1)
583 if (minus_p)
584 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
585 else
586 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
588 else if (op0)
589 wi = wi::to_wide (op0);
590 else if (op1)
592 if (minus_p)
593 wi = wi::neg (wi::to_wide (op1), &ovf);
594 else
595 wi = wi::to_wide (op1);
597 else
598 wi = wi::shwi (0, prec);
601 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
602 put the result in VR.
604 TYPE is the type of the range.
606 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
607 occurred while originally calculating WMIN or WMAX. -1 indicates
608 underflow. +1 indicates overflow. 0 indicates neither. */
610 static void
611 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
612 tree type,
613 const wide_int &wmin, const wide_int &wmax,
614 wi::overflow_type min_ovf,
615 wi::overflow_type max_ovf)
617 const signop sgn = TYPE_SIGN (type);
618 const unsigned int prec = TYPE_PRECISION (type);
620 /* For one bit precision if max < min, then the swapped
621 range covers all values. */
622 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
624 kind = VR_VARYING;
625 return;
628 if (TYPE_OVERFLOW_WRAPS (type))
630 /* If overflow wraps, truncate the values and adjust the
631 range kind and bounds appropriately. */
632 wide_int tmin = wide_int::from (wmin, prec, sgn);
633 wide_int tmax = wide_int::from (wmax, prec, sgn);
634 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
636 /* If the limits are swapped, we wrapped around and cover
637 the entire range. */
638 if (wi::gt_p (tmin, tmax, sgn))
639 kind = VR_VARYING;
640 else
642 kind = VR_RANGE;
643 /* No overflow or both overflow or underflow. The
644 range kind stays VR_RANGE. */
645 min = wide_int_to_tree (type, tmin);
646 max = wide_int_to_tree (type, tmax);
648 return;
650 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
651 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
653 /* Min underflow or max overflow. The range kind
654 changes to VR_ANTI_RANGE. */
655 bool covers = false;
656 wide_int tem = tmin;
657 tmin = tmax + 1;
658 if (wi::cmp (tmin, tmax, sgn) < 0)
659 covers = true;
660 tmax = tem - 1;
661 if (wi::cmp (tmax, tem, sgn) > 0)
662 covers = true;
663 /* If the anti-range would cover nothing, drop to varying.
664 Likewise if the anti-range bounds are outside of the
665 types values. */
666 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
668 kind = VR_VARYING;
669 return;
671 kind = VR_ANTI_RANGE;
672 min = wide_int_to_tree (type, tmin);
673 max = wide_int_to_tree (type, tmax);
674 return;
676 else
678 /* Other underflow and/or overflow, drop to VR_VARYING. */
679 kind = VR_VARYING;
680 return;
683 else
685 /* If overflow does not wrap, saturate to the types min/max
686 value. */
687 wide_int type_min = wi::min_value (prec, sgn);
688 wide_int type_max = wi::max_value (prec, sgn);
689 kind = VR_RANGE;
690 if (min_ovf == wi::OVF_UNDERFLOW)
691 min = wide_int_to_tree (type, type_min);
692 else if (min_ovf == wi::OVF_OVERFLOW)
693 min = wide_int_to_tree (type, type_max);
694 else
695 min = wide_int_to_tree (type, wmin);
697 if (max_ovf == wi::OVF_UNDERFLOW)
698 max = wide_int_to_tree (type, type_min);
699 else if (max_ovf == wi::OVF_OVERFLOW)
700 max = wide_int_to_tree (type, type_max);
701 else
702 max = wide_int_to_tree (type, wmax);
706 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
708 static void
709 extract_range_from_pointer_plus_expr (value_range *vr,
710 enum tree_code code,
711 tree expr_type,
712 const value_range *vr0,
713 const value_range *vr1)
715 gcc_checking_assert (POINTER_TYPE_P (expr_type)
716 && code == POINTER_PLUS_EXPR);
717 /* For pointer types, we are really only interested in asserting
718 whether the expression evaluates to non-NULL.
719 With -fno-delete-null-pointer-checks we need to be more
720 conservative. As some object might reside at address 0,
721 then some offset could be added to it and the same offset
722 subtracted again and the result would be NULL.
723 E.g.
724 static int a[12]; where &a[0] is NULL and
725 ptr = &a[6];
726 ptr -= 6;
727 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
728 where the first range doesn't include zero and the second one
729 doesn't either. As the second operand is sizetype (unsigned),
730 consider all ranges where the MSB could be set as possible
731 subtractions where the result might be NULL. */
732 if ((!range_includes_zero_p (vr0)
733 || !range_includes_zero_p (vr1))
734 && !TYPE_OVERFLOW_WRAPS (expr_type)
735 && (flag_delete_null_pointer_checks
736 || (range_int_cst_p (vr1)
737 && !tree_int_cst_sign_bit (vr1->max ()))))
738 vr->set_nonzero (expr_type);
739 else if (vr0->zero_p () && vr1->zero_p ())
740 vr->set_zero (expr_type);
741 else
742 vr->set_varying (expr_type);
745 /* Extract range information from a PLUS/MINUS_EXPR and store the
746 result in *VR. */
748 static void
749 extract_range_from_plus_minus_expr (value_range *vr,
750 enum tree_code code,
751 tree expr_type,
752 const value_range *vr0_,
753 const value_range *vr1_)
755 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
757 value_range vr0 = *vr0_, vr1 = *vr1_;
758 value_range vrtem0, vrtem1;
760 /* Now canonicalize anti-ranges to ranges when they are not symbolic
761 and express ~[] op X as ([]' op X) U ([]'' op X). */
762 if (vr0.kind () == VR_ANTI_RANGE
763 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
765 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
766 if (!vrtem1.undefined_p ())
768 value_range vrres;
769 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
770 &vrtem1, vr1_);
771 vr->union_ (vrres);
773 return;
775 /* Likewise for X op ~[]. */
776 if (vr1.kind () == VR_ANTI_RANGE
777 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
779 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
780 if (!vrtem1.undefined_p ())
782 value_range vrres;
783 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
784 vr0_, &vrtem1);
785 vr->union_ (vrres);
787 return;
790 value_range_kind kind;
791 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
792 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
793 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
794 tree min = NULL_TREE, max = NULL_TREE;
796 /* This will normalize things such that calculating
797 [0,0] - VR_VARYING is not dropped to varying, but is
798 calculated as [MIN+1, MAX]. */
799 if (vr0.varying_p ())
801 vr0_kind = VR_RANGE;
802 vr0_min = vrp_val_min (expr_type);
803 vr0_max = vrp_val_max (expr_type);
805 if (vr1.varying_p ())
807 vr1_kind = VR_RANGE;
808 vr1_min = vrp_val_min (expr_type);
809 vr1_max = vrp_val_max (expr_type);
812 const bool minus_p = (code == MINUS_EXPR);
813 tree min_op0 = vr0_min;
814 tree min_op1 = minus_p ? vr1_max : vr1_min;
815 tree max_op0 = vr0_max;
816 tree max_op1 = minus_p ? vr1_min : vr1_max;
817 tree sym_min_op0 = NULL_TREE;
818 tree sym_min_op1 = NULL_TREE;
819 tree sym_max_op0 = NULL_TREE;
820 tree sym_max_op1 = NULL_TREE;
821 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
823 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
825 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
826 single-symbolic ranges, try to compute the precise resulting range,
827 but only if we know that this resulting range will also be constant
828 or single-symbolic. */
829 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
830 && (TREE_CODE (min_op0) == INTEGER_CST
831 || (sym_min_op0
832 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
833 && (TREE_CODE (min_op1) == INTEGER_CST
834 || (sym_min_op1
835 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
836 && (!(sym_min_op0 && sym_min_op1)
837 || (sym_min_op0 == sym_min_op1
838 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
839 && (TREE_CODE (max_op0) == INTEGER_CST
840 || (sym_max_op0
841 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
842 && (TREE_CODE (max_op1) == INTEGER_CST
843 || (sym_max_op1
844 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
845 && (!(sym_max_op0 && sym_max_op1)
846 || (sym_max_op0 == sym_max_op1
847 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
849 wide_int wmin, wmax;
850 wi::overflow_type min_ovf = wi::OVF_NONE;
851 wi::overflow_type max_ovf = wi::OVF_NONE;
853 /* Build the bounds. */
854 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
855 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
857 /* If the resulting range will be symbolic, we need to eliminate any
858 explicit or implicit overflow introduced in the above computation
859 because compare_values could make an incorrect use of it. That's
860 why we require one of the ranges to be a singleton. */
861 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
862 && ((bool)min_ovf || (bool)max_ovf
863 || (min_op0 != max_op0 && min_op1 != max_op1)))
865 vr->set_varying (expr_type);
866 return;
869 /* Adjust the range for possible overflow. */
870 set_value_range_with_overflow (kind, min, max, expr_type,
871 wmin, wmax, min_ovf, max_ovf);
872 if (kind == VR_VARYING)
874 vr->set_varying (expr_type);
875 return;
878 /* Build the symbolic bounds if needed. */
879 adjust_symbolic_bound (min, code, expr_type,
880 sym_min_op0, sym_min_op1,
881 neg_min_op0, neg_min_op1);
882 adjust_symbolic_bound (max, code, expr_type,
883 sym_max_op0, sym_max_op1,
884 neg_max_op0, neg_max_op1);
886 else
888 /* For other cases, for example if we have a PLUS_EXPR with two
889 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
890 to compute a precise range for such a case.
891 ??? General even mixed range kind operations can be expressed
892 by for example transforming ~[3, 5] + [1, 2] to range-only
893 operations and a union primitive:
894 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
895 [-INF+1, 4] U [6, +INF(OVF)]
896 though usually the union is not exactly representable with
897 a single range or anti-range as the above is
898 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
899 but one could use a scheme similar to equivalences for this. */
900 vr->set_varying (expr_type);
901 return;
904 /* If either MIN or MAX overflowed, then set the resulting range to
905 VARYING. */
906 if (min == NULL_TREE
907 || TREE_OVERFLOW_P (min)
908 || max == NULL_TREE
909 || TREE_OVERFLOW_P (max))
911 vr->set_varying (expr_type);
912 return;
915 int cmp = compare_values (min, max);
916 if (cmp == -2 || cmp == 1)
918 /* If the new range has its limits swapped around (MIN > MAX),
919 then the operation caused one of them to wrap around, mark
920 the new range VARYING. */
921 vr->set_varying (expr_type);
923 else
924 vr->set (min, max, kind);
927 /* If the types passed are supported, return TRUE, otherwise set VR to
928 VARYING and return FALSE. */
930 static bool
931 supported_types_p (value_range *vr,
932 tree type0,
933 tree type1 = NULL)
935 if (!value_range_equiv::supports_p (type0)
936 || (type1 && !value_range_equiv::supports_p (type1)))
938 vr->set_varying (type0);
939 return false;
941 return true;
944 /* If any of the ranges passed are defined, return TRUE, otherwise set
945 VR to UNDEFINED and return FALSE. */
947 static bool
948 defined_ranges_p (value_range *vr,
949 const value_range *vr0, const value_range *vr1 = NULL)
951 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
953 vr->set_undefined ();
954 return false;
956 return true;
959 static value_range
960 drop_undefines_to_varying (const value_range *vr, tree expr_type)
962 if (vr->undefined_p ())
963 return value_range (expr_type);
964 else
965 return *vr;
968 /* If any operand is symbolic, perform a binary operation on them and
969 return TRUE, otherwise return FALSE. */
971 static bool
972 range_fold_binary_symbolics_p (value_range *vr,
973 tree_code code,
974 tree expr_type,
975 const value_range *vr0_,
976 const value_range *vr1_)
978 if (vr0_->symbolic_p () || vr1_->symbolic_p ())
980 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
981 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
982 if ((code == PLUS_EXPR || code == MINUS_EXPR))
984 extract_range_from_plus_minus_expr (vr, code, expr_type,
985 &vr0, &vr1);
986 return true;
988 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
990 extract_range_from_pointer_plus_expr (vr, code, expr_type,
991 &vr0, &vr1);
992 return true;
994 range_op_handler op (code, expr_type);
995 if (!op)
996 vr->set_varying (expr_type);
997 vr0.normalize_symbolics ();
998 vr1.normalize_symbolics ();
999 return op.fold_range (*vr, expr_type, vr0, vr1);
1001 return false;
1004 /* If operand is symbolic, perform a unary operation on it and return
1005 TRUE, otherwise return FALSE. */
1007 static bool
1008 range_fold_unary_symbolics_p (value_range *vr,
1009 tree_code code,
1010 tree expr_type,
1011 const value_range *vr0)
1013 if (vr0->symbolic_p ())
1015 if (code == NEGATE_EXPR)
1017 /* -X is simply 0 - X. */
1018 value_range zero;
1019 zero.set_zero (vr0->type ());
1020 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1021 return true;
1023 if (code == BIT_NOT_EXPR)
1025 /* ~X is simply -1 - X. */
1026 value_range minusone;
1027 tree t = build_int_cst (vr0->type (), -1);
1028 minusone.set (t, t);
1029 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1030 return true;
1032 range_op_handler op (code, expr_type);
1033 if (!op)
1034 vr->set_varying (expr_type);
1035 value_range vr0_cst (*vr0);
1036 vr0_cst.normalize_symbolics ();
1037 return op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1039 return false;
1042 /* Perform a binary operation on a pair of ranges. */
1044 void
1045 range_fold_binary_expr (value_range *vr,
1046 enum tree_code code,
1047 tree expr_type,
1048 const value_range *vr0_,
1049 const value_range *vr1_)
1051 if (!supported_types_p (vr, expr_type)
1052 || !defined_ranges_p (vr, vr0_, vr1_))
1053 return;
1054 range_op_handler op (code, expr_type);
1055 if (!op)
1057 vr->set_varying (expr_type);
1058 return;
1061 if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1062 return;
1064 value_range vr0 (*vr0_);
1065 value_range vr1 (*vr1_);
1066 if (vr0.undefined_p ())
1067 vr0.set_varying (expr_type);
1068 if (vr1.undefined_p ())
1069 vr1.set_varying (expr_type);
1070 vr0.normalize_addresses ();
1071 vr1.normalize_addresses ();
1072 if (!op.fold_range (*vr, expr_type, vr0, vr1))
1073 vr->set_varying (expr_type);
1076 /* Perform a unary operation on a range. */
1078 void
1079 range_fold_unary_expr (value_range *vr,
1080 enum tree_code code, tree expr_type,
1081 const value_range *vr0,
1082 tree vr0_type)
1084 if (!supported_types_p (vr, expr_type, vr0_type)
1085 || !defined_ranges_p (vr, vr0))
1086 return;
1087 range_op_handler op (code, expr_type);
1088 if (!op)
1090 vr->set_varying (expr_type);
1091 return;
1094 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1095 return;
1097 value_range vr0_cst (*vr0);
1098 vr0_cst.normalize_addresses ();
1099 if (!op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type)))
1100 vr->set_varying (expr_type);
1103 /* If the range of values taken by OP can be inferred after STMT executes,
1104 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1105 describes the inferred range. Return true if a range could be
1106 inferred. */
1108 bool
1109 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1111 *val_p = NULL_TREE;
1112 *comp_code_p = ERROR_MARK;
1114 /* Do not attempt to infer anything in names that flow through
1115 abnormal edges. */
1116 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1117 return false;
1119 /* If STMT is the last statement of a basic block with no normal
1120 successors, there is no point inferring anything about any of its
1121 operands. We would not be able to find a proper insertion point
1122 for the assertion, anyway. */
1123 if (stmt_ends_bb_p (stmt))
1125 edge_iterator ei;
1126 edge e;
1128 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1129 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1130 break;
1131 if (e == NULL)
1132 return false;
1135 if (infer_nonnull_range (stmt, op))
1137 *val_p = build_int_cst (TREE_TYPE (op), 0);
1138 *comp_code_p = NE_EXPR;
1139 return true;
1142 return false;
1145 /* Dump assert_info structure. */
1147 void
1148 dump_assert_info (FILE *file, const assert_info &assert)
1150 fprintf (file, "Assert for: ");
1151 print_generic_expr (file, assert.name);
1152 fprintf (file, "\n\tPREDICATE: expr=[");
1153 print_generic_expr (file, assert.expr);
1154 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1155 fprintf (file, "val=[");
1156 print_generic_expr (file, assert.val);
1157 fprintf (file, "]\n\n");
1160 DEBUG_FUNCTION void
1161 debug (const assert_info &assert)
1163 dump_assert_info (stderr, assert);
1166 /* Dump a vector of assert_info's. */
1168 void
1169 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1171 for (unsigned i = 0; i < asserts.length (); ++i)
1173 dump_assert_info (file, asserts[i]);
1174 fprintf (file, "\n");
1178 DEBUG_FUNCTION void
1179 debug (const vec<assert_info> &asserts)
1181 dump_asserts_info (stderr, asserts);
1184 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1186 static void
1187 add_assert_info (vec<assert_info> &asserts,
1188 tree name, tree expr, enum tree_code comp_code, tree val)
1190 assert_info info;
1191 info.comp_code = comp_code;
1192 info.name = name;
1193 if (TREE_OVERFLOW_P (val))
1194 val = drop_tree_overflow (val);
1195 info.val = val;
1196 info.expr = expr;
1197 asserts.safe_push (info);
1198 if (dump_enabled_p ())
1199 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1200 "Adding assert for %T from %T %s %T\n",
1201 name, expr, op_symbol_code (comp_code), val);
1204 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1205 Extract a suitable test code and value and store them into *CODE_P and
1206 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1208 If no extraction was possible, return FALSE, otherwise return TRUE.
1210 If INVERT is true, then we invert the result stored into *CODE_P. */
1212 static bool
1213 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1214 tree cond_op0, tree cond_op1,
1215 bool invert, enum tree_code *code_p,
1216 tree *val_p)
1218 enum tree_code comp_code;
1219 tree val;
1221 /* Otherwise, we have a comparison of the form NAME COMP VAL
1222 or VAL COMP NAME. */
1223 if (name == cond_op1)
1225 /* If the predicate is of the form VAL COMP NAME, flip
1226 COMP around because we need to register NAME as the
1227 first operand in the predicate. */
1228 comp_code = swap_tree_comparison (cond_code);
1229 val = cond_op0;
1231 else if (name == cond_op0)
1233 /* The comparison is of the form NAME COMP VAL, so the
1234 comparison code remains unchanged. */
1235 comp_code = cond_code;
1236 val = cond_op1;
1238 else
1239 gcc_unreachable ();
1241 /* Invert the comparison code as necessary. */
1242 if (invert)
1243 comp_code = invert_tree_comparison (comp_code, 0);
1245 /* VRP only handles integral and pointer types. */
1246 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1247 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1248 return false;
1250 /* Do not register always-false predicates.
1251 FIXME: this works around a limitation in fold() when dealing with
1252 enumerations. Given 'enum { N1, N2 } x;', fold will not
1253 fold 'if (x > N2)' to 'if (0)'. */
1254 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1255 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1257 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1258 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1260 if (comp_code == GT_EXPR
1261 && (!max
1262 || compare_values (val, max) == 0))
1263 return false;
1265 if (comp_code == LT_EXPR
1266 && (!min
1267 || compare_values (val, min) == 0))
1268 return false;
1270 *code_p = comp_code;
1271 *val_p = val;
1272 return true;
1275 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1276 (otherwise return VAL). VAL and MASK must be zero-extended for
1277 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1278 (to transform signed values into unsigned) and at the end xor
1279 SGNBIT back. */
1281 wide_int
1282 masked_increment (const wide_int &val_in, const wide_int &mask,
1283 const wide_int &sgnbit, unsigned int prec)
1285 wide_int bit = wi::one (prec), res;
1286 unsigned int i;
1288 wide_int val = val_in ^ sgnbit;
1289 for (i = 0; i < prec; i++, bit += bit)
1291 res = mask;
1292 if ((res & bit) == 0)
1293 continue;
1294 res = bit - 1;
1295 res = wi::bit_and_not (val + bit, res);
1296 res &= mask;
1297 if (wi::gtu_p (res, val))
1298 return res ^ sgnbit;
1300 return val ^ sgnbit;
1303 /* Helper for overflow_comparison_p
1305 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1306 OP1's defining statement to see if it ultimately has the form
1307 OP0 CODE (OP0 PLUS INTEGER_CST)
1309 If so, return TRUE indicating this is an overflow test and store into
1310 *NEW_CST an updated constant that can be used in a narrowed range test.
1312 REVERSED indicates if the comparison was originally:
1314 OP1 CODE' OP0.
1316 This affects how we build the updated constant. */
1318 static bool
1319 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1320 bool follow_assert_exprs, bool reversed, tree *new_cst)
1322 /* See if this is a relational operation between two SSA_NAMES with
1323 unsigned, overflow wrapping values. If so, check it more deeply. */
1324 if ((code == LT_EXPR || code == LE_EXPR
1325 || code == GE_EXPR || code == GT_EXPR)
1326 && TREE_CODE (op0) == SSA_NAME
1327 && TREE_CODE (op1) == SSA_NAME
1328 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1329 && TYPE_UNSIGNED (TREE_TYPE (op0))
1330 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1332 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1334 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1335 if (follow_assert_exprs)
1337 while (gimple_assign_single_p (op1_def)
1338 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1340 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1341 if (TREE_CODE (op1) != SSA_NAME)
1342 break;
1343 op1_def = SSA_NAME_DEF_STMT (op1);
1347 /* Now look at the defining statement of OP1 to see if it adds
1348 or subtracts a nonzero constant from another operand. */
1349 if (op1_def
1350 && is_gimple_assign (op1_def)
1351 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1352 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1353 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1355 tree target = gimple_assign_rhs1 (op1_def);
1357 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1358 for one where TARGET appears on the RHS. */
1359 if (follow_assert_exprs)
1361 /* Now see if that "other operand" is op0, following the chain
1362 of ASSERT_EXPRs if necessary. */
1363 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1364 while (op0 != target
1365 && gimple_assign_single_p (op0_def)
1366 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1368 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1369 if (TREE_CODE (op0) != SSA_NAME)
1370 break;
1371 op0_def = SSA_NAME_DEF_STMT (op0);
1375 /* If we did not find our target SSA_NAME, then this is not
1376 an overflow test. */
1377 if (op0 != target)
1378 return false;
1380 tree type = TREE_TYPE (op0);
1381 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1382 tree inc = gimple_assign_rhs2 (op1_def);
1383 if (reversed)
1384 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1385 else
1386 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1387 return true;
1390 return false;
1393 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1394 OP1's defining statement to see if it ultimately has the form
1395 OP0 CODE (OP0 PLUS INTEGER_CST)
1397 If so, return TRUE indicating this is an overflow test and store into
1398 *NEW_CST an updated constant that can be used in a narrowed range test.
1400 These statements are left as-is in the IL to facilitate discovery of
1401 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1402 the alternate range representation is often useful within VRP. */
1404 bool
1405 overflow_comparison_p (tree_code code, tree name, tree val,
1406 bool use_equiv_p, tree *new_cst)
1408 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1409 return true;
1410 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1411 use_equiv_p, true, new_cst);
1415 /* Try to register an edge assertion for SSA name NAME on edge E for
1416 the condition COND contributing to the conditional jump pointed to by BSI.
1417 Invert the condition COND if INVERT is true. */
1419 static void
1420 register_edge_assert_for_2 (tree name, edge e,
1421 enum tree_code cond_code,
1422 tree cond_op0, tree cond_op1, bool invert,
1423 vec<assert_info> &asserts)
1425 tree val;
1426 enum tree_code comp_code;
1428 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1429 cond_op0,
1430 cond_op1,
1431 invert, &comp_code, &val))
1432 return;
1434 /* Queue the assert. */
1435 tree x;
1436 if (overflow_comparison_p (comp_code, name, val, false, &x))
1438 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1439 ? GT_EXPR : LE_EXPR);
1440 add_assert_info (asserts, name, name, new_code, x);
1442 add_assert_info (asserts, name, name, comp_code, val);
1444 /* In the case of NAME <= CST and NAME being defined as
1445 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1446 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1447 This catches range and anti-range tests. */
1448 if ((comp_code == LE_EXPR
1449 || comp_code == GT_EXPR)
1450 && TREE_CODE (val) == INTEGER_CST
1451 && TYPE_UNSIGNED (TREE_TYPE (val)))
1453 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1454 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1456 /* Extract CST2 from the (optional) addition. */
1457 if (is_gimple_assign (def_stmt)
1458 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1460 name2 = gimple_assign_rhs1 (def_stmt);
1461 cst2 = gimple_assign_rhs2 (def_stmt);
1462 if (TREE_CODE (name2) == SSA_NAME
1463 && TREE_CODE (cst2) == INTEGER_CST)
1464 def_stmt = SSA_NAME_DEF_STMT (name2);
1467 /* Extract NAME2 from the (optional) sign-changing cast. */
1468 if (gassign *ass = dyn_cast <gassign *> (def_stmt))
1470 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
1471 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
1472 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
1473 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
1474 name3 = gimple_assign_rhs1 (ass);
1477 /* If name3 is used later, create an ASSERT_EXPR for it. */
1478 if (name3 != NULL_TREE
1479 && TREE_CODE (name3) == SSA_NAME
1480 && (cst2 == NULL_TREE
1481 || TREE_CODE (cst2) == INTEGER_CST)
1482 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1484 tree tmp;
1486 /* Build an expression for the range test. */
1487 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1488 if (cst2 != NULL_TREE)
1489 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1490 add_assert_info (asserts, name3, tmp, comp_code, val);
1493 /* If name2 is used later, create an ASSERT_EXPR for it. */
1494 if (name2 != NULL_TREE
1495 && TREE_CODE (name2) == SSA_NAME
1496 && TREE_CODE (cst2) == INTEGER_CST
1497 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1499 tree tmp;
1501 /* Build an expression for the range test. */
1502 tmp = name2;
1503 if (TREE_TYPE (name) != TREE_TYPE (name2))
1504 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1505 if (cst2 != NULL_TREE)
1506 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1507 add_assert_info (asserts, name2, tmp, comp_code, val);
1511 /* In the case of post-in/decrement tests like if (i++) ... and uses
1512 of the in/decremented value on the edge the extra name we want to
1513 assert for is not on the def chain of the name compared. Instead
1514 it is in the set of use stmts.
1515 Similar cases happen for conversions that were simplified through
1516 fold_{sign_changed,widened}_comparison. */
1517 if ((comp_code == NE_EXPR
1518 || comp_code == EQ_EXPR)
1519 && TREE_CODE (val) == INTEGER_CST)
1521 imm_use_iterator ui;
1522 gimple *use_stmt;
1523 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1525 if (!is_gimple_assign (use_stmt))
1526 continue;
1528 /* Cut off to use-stmts that are dominating the predecessor. */
1529 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1530 continue;
1532 tree name2 = gimple_assign_lhs (use_stmt);
1533 if (TREE_CODE (name2) != SSA_NAME)
1534 continue;
1536 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1537 tree cst;
1538 if (code == PLUS_EXPR
1539 || code == MINUS_EXPR)
1541 cst = gimple_assign_rhs2 (use_stmt);
1542 if (TREE_CODE (cst) != INTEGER_CST)
1543 continue;
1544 cst = int_const_binop (code, val, cst);
1546 else if (CONVERT_EXPR_CODE_P (code))
1548 /* For truncating conversions we cannot record
1549 an inequality. */
1550 if (comp_code == NE_EXPR
1551 && (TYPE_PRECISION (TREE_TYPE (name2))
1552 < TYPE_PRECISION (TREE_TYPE (name))))
1553 continue;
1554 cst = fold_convert (TREE_TYPE (name2), val);
1556 else
1557 continue;
1559 if (TREE_OVERFLOW_P (cst))
1560 cst = drop_tree_overflow (cst);
1561 add_assert_info (asserts, name2, name2, comp_code, cst);
1565 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1566 && TREE_CODE (val) == INTEGER_CST)
1568 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1569 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1570 tree val2 = NULL_TREE;
1571 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1572 wide_int mask = wi::zero (prec);
1573 unsigned int nprec = prec;
1574 enum tree_code rhs_code = ERROR_MARK;
1576 if (is_gimple_assign (def_stmt))
1577 rhs_code = gimple_assign_rhs_code (def_stmt);
1579 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1580 assert that A != CST1 -+ CST2. */
1581 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1582 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1584 tree op0 = gimple_assign_rhs1 (def_stmt);
1585 tree op1 = gimple_assign_rhs2 (def_stmt);
1586 if (TREE_CODE (op0) == SSA_NAME
1587 && TREE_CODE (op1) == INTEGER_CST)
1589 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1590 ? MINUS_EXPR : PLUS_EXPR);
1591 op1 = int_const_binop (reverse_op, val, op1);
1592 if (TREE_OVERFLOW (op1))
1593 op1 = drop_tree_overflow (op1);
1594 add_assert_info (asserts, op0, op0, comp_code, op1);
1598 /* Add asserts for NAME cmp CST and NAME being defined
1599 as NAME = (int) NAME2. */
1600 if (!TYPE_UNSIGNED (TREE_TYPE (val))
1601 && (comp_code == LE_EXPR || comp_code == LT_EXPR
1602 || comp_code == GT_EXPR || comp_code == GE_EXPR)
1603 && gimple_assign_cast_p (def_stmt))
1605 name2 = gimple_assign_rhs1 (def_stmt);
1606 if (CONVERT_EXPR_CODE_P (rhs_code)
1607 && TREE_CODE (name2) == SSA_NAME
1608 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1609 && TYPE_UNSIGNED (TREE_TYPE (name2))
1610 && prec == TYPE_PRECISION (TREE_TYPE (name2))
1611 && (comp_code == LE_EXPR || comp_code == GT_EXPR
1612 || !tree_int_cst_equal (val,
1613 TYPE_MIN_VALUE (TREE_TYPE (val)))))
1615 tree tmp, cst;
1616 enum tree_code new_comp_code = comp_code;
1618 cst = fold_convert (TREE_TYPE (name2),
1619 TYPE_MIN_VALUE (TREE_TYPE (val)));
1620 /* Build an expression for the range test. */
1621 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1622 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1623 fold_convert (TREE_TYPE (name2), val));
1624 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1626 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1627 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1628 build_int_cst (TREE_TYPE (name2), 1));
1630 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1634 /* Add asserts for NAME cmp CST and NAME being defined as
1635 NAME = NAME2 >> CST2.
1637 Extract CST2 from the right shift. */
1638 if (rhs_code == RSHIFT_EXPR)
1640 name2 = gimple_assign_rhs1 (def_stmt);
1641 cst2 = gimple_assign_rhs2 (def_stmt);
1642 if (TREE_CODE (name2) == SSA_NAME
1643 && tree_fits_uhwi_p (cst2)
1644 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1645 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1646 && type_has_mode_precision_p (TREE_TYPE (val)))
1648 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1649 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1652 if (val2 != NULL_TREE
1653 && TREE_CODE (val2) == INTEGER_CST
1654 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1655 TREE_TYPE (val),
1656 val2, cst2), val))
1658 enum tree_code new_comp_code = comp_code;
1659 tree tmp, new_val;
1661 tmp = name2;
1662 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1664 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1666 tree type = build_nonstandard_integer_type (prec, 1);
1667 tmp = build1 (NOP_EXPR, type, name2);
1668 val2 = fold_convert (type, val2);
1670 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1671 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1672 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1674 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1676 wide_int minval
1677 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1678 new_val = val2;
1679 if (minval == wi::to_wide (new_val))
1680 new_val = NULL_TREE;
1682 else
1684 wide_int maxval
1685 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1686 mask |= wi::to_wide (val2);
1687 if (wi::eq_p (mask, maxval))
1688 new_val = NULL_TREE;
1689 else
1690 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1693 if (new_val)
1694 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1697 /* If we have a conversion that doesn't change the value of the source
1698 simply register the same assert for it. */
1699 if (CONVERT_EXPR_CODE_P (rhs_code))
1701 value_range vr;
1702 tree rhs1 = gimple_assign_rhs1 (def_stmt);
1703 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1704 && TREE_CODE (rhs1) == SSA_NAME
1705 /* Make sure the relation preserves the upper/lower boundary of
1706 the range conservatively. */
1707 && (comp_code == NE_EXPR
1708 || comp_code == EQ_EXPR
1709 || (TYPE_SIGN (TREE_TYPE (name))
1710 == TYPE_SIGN (TREE_TYPE (rhs1)))
1711 || ((comp_code == LE_EXPR
1712 || comp_code == LT_EXPR)
1713 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1714 || ((comp_code == GE_EXPR
1715 || comp_code == GT_EXPR)
1716 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1717 /* And the conversion does not alter the value we compare
1718 against and all values in rhs1 can be represented in
1719 the converted to type. */
1720 && int_fits_type_p (val, TREE_TYPE (rhs1))
1721 && ((TYPE_PRECISION (TREE_TYPE (name))
1722 > TYPE_PRECISION (TREE_TYPE (rhs1)))
1723 || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
1724 && vr.kind () == VR_RANGE)
1725 && wi::fits_to_tree_p
1726 (widest_int::from (vr.lower_bound (),
1727 TYPE_SIGN (TREE_TYPE (rhs1))),
1728 TREE_TYPE (name))
1729 && wi::fits_to_tree_p
1730 (widest_int::from (vr.upper_bound (),
1731 TYPE_SIGN (TREE_TYPE (rhs1))),
1732 TREE_TYPE (name)))))
1733 add_assert_info (asserts, rhs1, rhs1,
1734 comp_code, fold_convert (TREE_TYPE (rhs1), val));
1737 /* Add asserts for NAME cmp CST and NAME being defined as
1738 NAME = NAME2 & CST2.
1740 Extract CST2 from the and.
1742 Also handle
1743 NAME = (unsigned) NAME2;
1744 casts where NAME's type is unsigned and has smaller precision
1745 than NAME2's type as if it was NAME = NAME2 & MASK. */
1746 names[0] = NULL_TREE;
1747 names[1] = NULL_TREE;
1748 cst2 = NULL_TREE;
1749 if (rhs_code == BIT_AND_EXPR
1750 || (CONVERT_EXPR_CODE_P (rhs_code)
1751 && INTEGRAL_TYPE_P (TREE_TYPE (val))
1752 && TYPE_UNSIGNED (TREE_TYPE (val))
1753 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1754 > prec))
1756 name2 = gimple_assign_rhs1 (def_stmt);
1757 if (rhs_code == BIT_AND_EXPR)
1758 cst2 = gimple_assign_rhs2 (def_stmt);
1759 else
1761 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1762 nprec = TYPE_PRECISION (TREE_TYPE (name2));
1764 if (TREE_CODE (name2) == SSA_NAME
1765 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1766 && TREE_CODE (cst2) == INTEGER_CST
1767 && !integer_zerop (cst2)
1768 && (nprec > 1
1769 || TYPE_UNSIGNED (TREE_TYPE (val))))
1771 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1772 if (gimple_assign_cast_p (def_stmt2))
1774 names[1] = gimple_assign_rhs1 (def_stmt2);
1775 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1776 || TREE_CODE (names[1]) != SSA_NAME
1777 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1778 || (TYPE_PRECISION (TREE_TYPE (name2))
1779 != TYPE_PRECISION (TREE_TYPE (names[1]))))
1780 names[1] = NULL_TREE;
1782 names[0] = name2;
1785 if (names[0] || names[1])
1787 wide_int minv, maxv, valv, cst2v;
1788 wide_int tem, sgnbit;
1789 bool valid_p = false, valn, cst2n;
1790 enum tree_code ccode = comp_code;
1792 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1793 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1794 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1795 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1796 /* If CST2 doesn't have most significant bit set,
1797 but VAL is negative, we have comparison like
1798 if ((x & 0x123) > -4) (always true). Just give up. */
1799 if (!cst2n && valn)
1800 ccode = ERROR_MARK;
1801 if (cst2n)
1802 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1803 else
1804 sgnbit = wi::zero (nprec);
1805 minv = valv & cst2v;
1806 switch (ccode)
1808 case EQ_EXPR:
1809 /* Minimum unsigned value for equality is VAL & CST2
1810 (should be equal to VAL, otherwise we probably should
1811 have folded the comparison into false) and
1812 maximum unsigned value is VAL | ~CST2. */
1813 maxv = valv | ~cst2v;
1814 valid_p = true;
1815 break;
1817 case NE_EXPR:
1818 tem = valv | ~cst2v;
1819 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
1820 if (valv == 0)
1822 cst2n = false;
1823 sgnbit = wi::zero (nprec);
1824 goto gt_expr;
1826 /* If (VAL | ~CST2) is all ones, handle it as
1827 (X & CST2) < VAL. */
1828 if (tem == -1)
1830 cst2n = false;
1831 valn = false;
1832 sgnbit = wi::zero (nprec);
1833 goto lt_expr;
1835 if (!cst2n && wi::neg_p (cst2v))
1836 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1837 if (sgnbit != 0)
1839 if (valv == sgnbit)
1841 cst2n = true;
1842 valn = true;
1843 goto gt_expr;
1845 if (tem == wi::mask (nprec - 1, false, nprec))
1847 cst2n = true;
1848 goto lt_expr;
1850 if (!cst2n)
1851 sgnbit = wi::zero (nprec);
1853 break;
1855 case GE_EXPR:
1856 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1857 is VAL and maximum unsigned value is ~0. For signed
1858 comparison, if CST2 doesn't have most significant bit
1859 set, handle it similarly. If CST2 has MSB set,
1860 the minimum is the same, and maximum is ~0U/2. */
1861 if (minv != valv)
1863 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1864 VAL. */
1865 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1866 if (minv == valv)
1867 break;
1869 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1870 valid_p = true;
1871 break;
1873 case GT_EXPR:
1874 gt_expr:
1875 /* Find out smallest MINV where MINV > VAL
1876 && (MINV & CST2) == MINV, if any. If VAL is signed and
1877 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
1878 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1879 if (minv == valv)
1880 break;
1881 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1882 valid_p = true;
1883 break;
1885 case LE_EXPR:
1886 /* Minimum unsigned value for <= is 0 and maximum
1887 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1888 Otherwise, find smallest VAL2 where VAL2 > VAL
1889 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1890 as maximum.
1891 For signed comparison, if CST2 doesn't have most
1892 significant bit set, handle it similarly. If CST2 has
1893 MSB set, the maximum is the same and minimum is INT_MIN. */
1894 if (minv == valv)
1895 maxv = valv;
1896 else
1898 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1899 if (maxv == valv)
1900 break;
1901 maxv -= 1;
1903 maxv |= ~cst2v;
1904 minv = sgnbit;
1905 valid_p = true;
1906 break;
1908 case LT_EXPR:
1909 lt_expr:
1910 /* Minimum unsigned value for < is 0 and maximum
1911 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1912 Otherwise, find smallest VAL2 where VAL2 > VAL
1913 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1914 as maximum.
1915 For signed comparison, if CST2 doesn't have most
1916 significant bit set, handle it similarly. If CST2 has
1917 MSB set, the maximum is the same and minimum is INT_MIN. */
1918 if (minv == valv)
1920 if (valv == sgnbit)
1921 break;
1922 maxv = valv;
1924 else
1926 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1927 if (maxv == valv)
1928 break;
1930 maxv -= 1;
1931 maxv |= ~cst2v;
1932 minv = sgnbit;
1933 valid_p = true;
1934 break;
1936 default:
1937 break;
1939 if (valid_p
1940 && (maxv - minv) != -1)
1942 tree tmp, new_val, type;
1943 int i;
1945 for (i = 0; i < 2; i++)
1946 if (names[i])
1948 wide_int maxv2 = maxv;
1949 tmp = names[i];
1950 type = TREE_TYPE (names[i]);
1951 if (!TYPE_UNSIGNED (type))
1953 type = build_nonstandard_integer_type (nprec, 1);
1954 tmp = build1 (NOP_EXPR, type, names[i]);
1956 if (minv != 0)
1958 tmp = build2 (PLUS_EXPR, type, tmp,
1959 wide_int_to_tree (type, -minv));
1960 maxv2 = maxv - minv;
1962 new_val = wide_int_to_tree (type, maxv2);
1963 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1970 /* OP is an operand of a truth value expression which is known to have
1971 a particular value. Register any asserts for OP and for any
1972 operands in OP's defining statement.
1974 If CODE is EQ_EXPR, then we want to register OP is zero (false),
1975 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
1977 static void
1978 register_edge_assert_for_1 (tree op, enum tree_code code,
1979 edge e, vec<assert_info> &asserts)
1981 gimple *op_def;
1982 tree val;
1983 enum tree_code rhs_code;
1985 /* We only care about SSA_NAMEs. */
1986 if (TREE_CODE (op) != SSA_NAME)
1987 return;
1989 /* We know that OP will have a zero or nonzero value. */
1990 val = build_int_cst (TREE_TYPE (op), 0);
1991 add_assert_info (asserts, op, op, code, val);
1993 /* Now look at how OP is set. If it's set from a comparison,
1994 a truth operation or some bit operations, then we may be able
1995 to register information about the operands of that assignment. */
1996 op_def = SSA_NAME_DEF_STMT (op);
1997 if (gimple_code (op_def) != GIMPLE_ASSIGN)
1998 return;
2000 rhs_code = gimple_assign_rhs_code (op_def);
2002 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2004 bool invert = (code == EQ_EXPR ? true : false);
2005 tree op0 = gimple_assign_rhs1 (op_def);
2006 tree op1 = gimple_assign_rhs2 (op_def);
2008 if (TREE_CODE (op0) == SSA_NAME)
2009 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2010 if (TREE_CODE (op1) == SSA_NAME)
2011 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2013 else if ((code == NE_EXPR
2014 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2015 || (code == EQ_EXPR
2016 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2018 /* Recurse on each operand. */
2019 tree op0 = gimple_assign_rhs1 (op_def);
2020 tree op1 = gimple_assign_rhs2 (op_def);
2021 if (TREE_CODE (op0) == SSA_NAME
2022 && has_single_use (op0))
2023 register_edge_assert_for_1 (op0, code, e, asserts);
2024 if (TREE_CODE (op1) == SSA_NAME
2025 && has_single_use (op1))
2026 register_edge_assert_for_1 (op1, code, e, asserts);
2028 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2029 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2031 /* Recurse, flipping CODE. */
2032 code = invert_tree_comparison (code, false);
2033 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2035 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2037 /* Recurse through the copy. */
2038 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2040 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2042 /* Recurse through the type conversion, unless it is a narrowing
2043 conversion or conversion from non-integral type. */
2044 tree rhs = gimple_assign_rhs1 (op_def);
2045 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2046 && (TYPE_PRECISION (TREE_TYPE (rhs))
2047 <= TYPE_PRECISION (TREE_TYPE (op))))
2048 register_edge_assert_for_1 (rhs, code, e, asserts);
2052 /* Check if comparison
2053 NAME COND_OP INTEGER_CST
2054 has a form of
2055 (X & 11...100..0) COND_OP XX...X00...0
2056 Such comparison can yield assertions like
2057 X >= XX...X00...0
2058 X <= XX...X11...1
2059 in case of COND_OP being EQ_EXPR or
2060 X < XX...X00...0
2061 X > XX...X11...1
2062 in case of NE_EXPR. */
2064 static bool
2065 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2066 tree *new_name, tree *low, enum tree_code *low_code,
2067 tree *high, enum tree_code *high_code)
2069 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2071 if (!is_gimple_assign (def_stmt)
2072 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2073 return false;
2075 tree t = gimple_assign_rhs1 (def_stmt);
2076 tree maskt = gimple_assign_rhs2 (def_stmt);
2077 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2078 return false;
2080 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2081 wide_int inv_mask = ~mask;
2082 /* Must have been removed by now so don't bother optimizing. */
2083 if (mask == 0 || inv_mask == 0)
2084 return false;
2086 /* Assume VALT is INTEGER_CST. */
2087 wi::tree_to_wide_ref val = wi::to_wide (valt);
2089 if ((inv_mask & (inv_mask + 1)) != 0
2090 || (val & mask) != val)
2091 return false;
2093 bool is_range = cond_code == EQ_EXPR;
2095 tree type = TREE_TYPE (t);
2096 wide_int min = wi::min_value (type),
2097 max = wi::max_value (type);
2099 if (is_range)
2101 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2102 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2104 else
2106 /* We can still generate assertion if one of alternatives
2107 is known to always be false. */
2108 if (val == min)
2110 *low_code = (enum tree_code) 0;
2111 *high_code = GT_EXPR;
2113 else if ((val | inv_mask) == max)
2115 *low_code = LT_EXPR;
2116 *high_code = (enum tree_code) 0;
2118 else
2119 return false;
2122 *new_name = t;
2123 *low = wide_int_to_tree (type, val);
2124 *high = wide_int_to_tree (type, val | inv_mask);
2126 return true;
2129 /* Try to register an edge assertion for SSA name NAME on edge E for
2130 the condition COND contributing to the conditional jump pointed to by
2131 SI. */
2133 void
2134 register_edge_assert_for (tree name, edge e,
2135 enum tree_code cond_code, tree cond_op0,
2136 tree cond_op1, vec<assert_info> &asserts)
2138 tree val;
2139 enum tree_code comp_code;
2140 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2142 /* Do not attempt to infer anything in names that flow through
2143 abnormal edges. */
2144 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2145 return;
2147 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2148 cond_op0, cond_op1,
2149 is_else_edge,
2150 &comp_code, &val))
2151 return;
2153 /* Register ASSERT_EXPRs for name. */
2154 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2155 cond_op1, is_else_edge, asserts);
2158 /* If COND is effectively an equality test of an SSA_NAME against
2159 the value zero or one, then we may be able to assert values
2160 for SSA_NAMEs which flow into COND. */
2162 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2163 statement of NAME we can assert both operands of the BIT_AND_EXPR
2164 have nonzero value. */
2165 if ((comp_code == EQ_EXPR && integer_onep (val))
2166 || (comp_code == NE_EXPR && integer_zerop (val)))
2168 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2170 if (is_gimple_assign (def_stmt)
2171 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2173 tree op0 = gimple_assign_rhs1 (def_stmt);
2174 tree op1 = gimple_assign_rhs2 (def_stmt);
2175 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2176 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2178 else if (is_gimple_assign (def_stmt)
2179 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2180 == tcc_comparison))
2181 register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2184 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2185 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2186 have zero value. */
2187 if ((comp_code == EQ_EXPR && integer_zerop (val))
2188 || (comp_code == NE_EXPR
2189 && integer_onep (val)
2190 && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2192 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2194 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2195 necessarily zero value, or if type-precision is one. */
2196 if (is_gimple_assign (def_stmt)
2197 && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2199 tree op0 = gimple_assign_rhs1 (def_stmt);
2200 tree op1 = gimple_assign_rhs2 (def_stmt);
2201 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2202 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2204 else if (is_gimple_assign (def_stmt)
2205 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2206 == tcc_comparison))
2207 register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2210 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2211 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2212 && TREE_CODE (val) == INTEGER_CST)
2214 enum tree_code low_code, high_code;
2215 tree low, high;
2216 if (is_masked_range_test (name, val, comp_code, &name, &low,
2217 &low_code, &high, &high_code))
2219 if (low_code != ERROR_MARK)
2220 register_edge_assert_for_2 (name, e, low_code, name,
2221 low, /*invert*/false, asserts);
2222 if (high_code != ERROR_MARK)
2223 register_edge_assert_for_2 (name, e, high_code, name,
2224 high, /*invert*/false, asserts);
2229 /* Handle
2230 _4 = x_3 & 31;
2231 if (_4 != 0)
2232 goto <bb 6>;
2233 else
2234 goto <bb 7>;
2235 <bb 6>:
2236 __builtin_unreachable ();
2237 <bb 7>:
2238 x_5 = ASSERT_EXPR <x_3, ...>;
2239 If x_3 has no other immediate uses (checked by caller),
2240 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2241 from the non-zero bitmask. */
2243 void
2244 maybe_set_nonzero_bits (edge e, tree var)
2246 basic_block cond_bb = e->src;
2247 gimple *stmt = last_stmt (cond_bb);
2248 tree cst;
2250 if (stmt == NULL
2251 || gimple_code (stmt) != GIMPLE_COND
2252 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2253 ? EQ_EXPR : NE_EXPR)
2254 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2255 || !integer_zerop (gimple_cond_rhs (stmt)))
2256 return;
2258 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2259 if (!is_gimple_assign (stmt)
2260 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2261 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2262 return;
2263 if (gimple_assign_rhs1 (stmt) != var)
2265 gimple *stmt2;
2267 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2268 return;
2269 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2270 if (!gimple_assign_cast_p (stmt2)
2271 || gimple_assign_rhs1 (stmt2) != var
2272 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2273 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2274 != TYPE_PRECISION (TREE_TYPE (var))))
2275 return;
2277 cst = gimple_assign_rhs2 (stmt);
2278 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2279 wi::to_wide (cst)));
2282 /* Return true if STMT is interesting for VRP. */
2284 bool
2285 stmt_interesting_for_vrp (gimple *stmt)
2287 if (gimple_code (stmt) == GIMPLE_PHI)
2289 tree res = gimple_phi_result (stmt);
2290 return (!virtual_operand_p (res)
2291 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2292 || POINTER_TYPE_P (TREE_TYPE (res))));
2294 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2296 tree lhs = gimple_get_lhs (stmt);
2298 /* In general, assignments with virtual operands are not useful
2299 for deriving ranges, with the obvious exception of calls to
2300 builtin functions. */
2301 if (lhs && TREE_CODE (lhs) == SSA_NAME
2302 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2303 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2304 && (is_gimple_call (stmt)
2305 || !gimple_vuse (stmt)))
2306 return true;
2307 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2308 switch (gimple_call_internal_fn (stmt))
2310 case IFN_ADD_OVERFLOW:
2311 case IFN_SUB_OVERFLOW:
2312 case IFN_MUL_OVERFLOW:
2313 case IFN_ATOMIC_COMPARE_EXCHANGE:
2314 /* These internal calls return _Complex integer type,
2315 but are interesting to VRP nevertheless. */
2316 if (lhs && TREE_CODE (lhs) == SSA_NAME)
2317 return true;
2318 break;
2319 default:
2320 break;
2323 else if (gimple_code (stmt) == GIMPLE_COND
2324 || gimple_code (stmt) == GIMPLE_SWITCH)
2325 return true;
2327 return false;
2330 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2331 that includes the value VAL. The search is restricted to the range
2332 [START_IDX, n - 1] where n is the size of VEC.
2334 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2335 returned.
2337 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2338 it is placed in IDX and false is returned.
2340 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2341 returned. */
2343 bool
2344 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2346 size_t n = gimple_switch_num_labels (stmt);
2347 size_t low, high;
2349 /* Find case label for minimum of the value range or the next one.
2350 At each iteration we are searching in [low, high - 1]. */
2352 for (low = start_idx, high = n; high != low; )
2354 tree t;
2355 int cmp;
2356 /* Note that i != high, so we never ask for n. */
2357 size_t i = (high + low) / 2;
2358 t = gimple_switch_label (stmt, i);
2360 /* Cache the result of comparing CASE_LOW and val. */
2361 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2363 if (cmp == 0)
2365 /* Ranges cannot be empty. */
2366 *idx = i;
2367 return true;
2369 else if (cmp > 0)
2370 high = i;
2371 else
2373 low = i + 1;
2374 if (CASE_HIGH (t) != NULL
2375 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2377 *idx = i;
2378 return true;
2383 *idx = high;
2384 return false;
2387 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2388 for values between MIN and MAX. The first index is placed in MIN_IDX. The
2389 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2390 then MAX_IDX < MIN_IDX.
2391 Returns true if the default label is not needed. */
2393 bool
2394 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2395 size_t *max_idx)
2397 size_t i, j;
2398 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2399 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2401 if (i == j
2402 && min_take_default
2403 && max_take_default)
2405 /* Only the default case label reached.
2406 Return an empty range. */
2407 *min_idx = 1;
2408 *max_idx = 0;
2409 return false;
2411 else
2413 bool take_default = min_take_default || max_take_default;
2414 tree low, high;
2415 size_t k;
2417 if (max_take_default)
2418 j--;
2420 /* If the case label range is continuous, we do not need
2421 the default case label. Verify that. */
2422 high = CASE_LOW (gimple_switch_label (stmt, i));
2423 if (CASE_HIGH (gimple_switch_label (stmt, i)))
2424 high = CASE_HIGH (gimple_switch_label (stmt, i));
2425 for (k = i + 1; k <= j; ++k)
2427 low = CASE_LOW (gimple_switch_label (stmt, k));
2428 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2430 take_default = true;
2431 break;
2433 high = low;
2434 if (CASE_HIGH (gimple_switch_label (stmt, k)))
2435 high = CASE_HIGH (gimple_switch_label (stmt, k));
2438 *min_idx = i;
2439 *max_idx = j;
2440 return !take_default;
2444 /* Given a SWITCH_STMT, return the case label that encompasses the
2445 known possible values for the switch operand. RANGE_OF_OP is a
2446 range for the known values of the switch operand. */
2448 tree
2449 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2451 if (range_of_op->undefined_p ()
2452 || range_of_op->varying_p ()
2453 || range_of_op->symbolic_p ())
2454 return NULL_TREE;
2456 size_t i, j;
2457 tree op = gimple_switch_index (switch_stmt);
2458 tree type = TREE_TYPE (op);
2459 tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2460 tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2461 find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2462 if (i == j)
2464 /* Look for exactly one label that encompasses the range of
2465 the operand. */
2466 tree label = gimple_switch_label (switch_stmt, i);
2467 tree case_high
2468 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2469 int_range_max label_range (CASE_LOW (label), case_high);
2470 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2471 range_cast (label_range, range_of_op->type ());
2472 label_range.intersect (*range_of_op);
2473 if (label_range == *range_of_op)
2474 return label;
2476 else if (i > j)
2478 /* If there are no labels at all, take the default. */
2479 return gimple_switch_label (switch_stmt, 0);
2481 else
2483 /* Otherwise, there are various labels that can encompass
2484 the range of operand. In which case, see if the range of
2485 the operand is entirely *outside* the bounds of all the
2486 (non-default) case labels. If so, take the default. */
2487 unsigned n = gimple_switch_num_labels (switch_stmt);
2488 tree min_label = gimple_switch_label (switch_stmt, 1);
2489 tree max_label = gimple_switch_label (switch_stmt, n - 1);
2490 tree case_high = CASE_HIGH (max_label);
2491 if (!case_high)
2492 case_high = CASE_LOW (max_label);
2493 int_range_max label_range (CASE_LOW (min_label), case_high);
2494 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2495 range_cast (label_range, range_of_op->type ());
2496 label_range.intersect (*range_of_op);
2497 if (label_range.undefined_p ())
2498 return gimple_switch_label (switch_stmt, 0);
2500 return NULL_TREE;
2503 struct case_info
2505 tree expr;
2506 basic_block bb;
2509 /* Location information for ASSERT_EXPRs. Each instance of this
2510 structure describes an ASSERT_EXPR for an SSA name. Since a single
2511 SSA name may have more than one assertion associated with it, these
2512 locations are kept in a linked list attached to the corresponding
2513 SSA name. */
2514 struct assert_locus
2516 /* Basic block where the assertion would be inserted. */
2517 basic_block bb;
2519 /* Some assertions need to be inserted on an edge (e.g., assertions
2520 generated by COND_EXPRs). In those cases, BB will be NULL. */
2521 edge e;
2523 /* Pointer to the statement that generated this assertion. */
2524 gimple_stmt_iterator si;
2526 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
2527 enum tree_code comp_code;
2529 /* Value being compared against. */
2530 tree val;
2532 /* Expression to compare. */
2533 tree expr;
2535 /* Next node in the linked list. */
2536 assert_locus *next;
2539 /* Class to traverse the flowgraph looking for conditional jumps to
2540 insert ASSERT_EXPR range expressions. These range expressions are
2541 meant to provide information to optimizations that need to reason
2542 in terms of value ranges. They will not be expanded into RTL. */
2544 class vrp_asserts
2546 public:
2547 vrp_asserts (struct function *fn) : fun (fn) { }
2549 void insert_range_assertions ();
2551 /* Convert range assertion expressions into the implied copies and
2552 copy propagate away the copies. */
2553 void remove_range_assertions ();
2555 /* Dump all the registered assertions for all the names to FILE. */
2556 void dump (FILE *);
2558 /* Dump all the registered assertions for NAME to FILE. */
2559 void dump (FILE *file, tree name);
2561 /* Dump all the registered assertions for NAME to stderr. */
2562 void debug (tree name)
2564 dump (stderr, name);
2567 /* Dump all the registered assertions for all the names to stderr. */
2568 void debug ()
2570 dump (stderr);
2573 private:
2574 /* Set of SSA names found live during the RPO traversal of the function
2575 for still active basic-blocks. */
2576 live_names live;
2578 /* Function to work on. */
2579 struct function *fun;
2581 /* If bit I is present, it means that SSA name N_i has a list of
2582 assertions that should be inserted in the IL. */
2583 bitmap need_assert_for;
2585 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
2586 holds a list of ASSERT_LOCUS_T nodes that describe where
2587 ASSERT_EXPRs for SSA name N_I should be inserted. */
2588 assert_locus **asserts_for;
2590 /* Finish found ASSERTS for E and register them at GSI. */
2591 void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2592 vec<assert_info> &asserts);
2594 /* Determine whether the outgoing edges of BB should receive an
2595 ASSERT_EXPR for each of the operands of BB's LAST statement. The
2596 last statement of BB must be a SWITCH_EXPR.
2598 If any of the sub-graphs rooted at BB have an interesting use of
2599 the predicate operands, an assert location node is added to the
2600 list of assertions for the corresponding operands. */
2601 void find_switch_asserts (basic_block bb, gswitch *last);
2603 /* Do an RPO walk over the function computing SSA name liveness
2604 on-the-fly and deciding on assert expressions to insert. */
2605 void find_assert_locations ();
2607 /* Traverse all the statements in block BB looking for statements that
2608 may generate useful assertions for the SSA names in their operand.
2609 See method implementation comentary for more information. */
2610 void find_assert_locations_in_bb (basic_block bb);
2612 /* Determine whether the outgoing edges of BB should receive an
2613 ASSERT_EXPR for each of the operands of BB's LAST statement.
2614 The last statement of BB must be a COND_EXPR.
2616 If any of the sub-graphs rooted at BB have an interesting use of
2617 the predicate operands, an assert location node is added to the
2618 list of assertions for the corresponding operands. */
2619 void find_conditional_asserts (basic_block bb, gcond *last);
2621 /* Process all the insertions registered for every name N_i registered
2622 in NEED_ASSERT_FOR. The list of assertions to be inserted are
2623 found in ASSERTS_FOR[i]. */
2624 void process_assert_insertions ();
2626 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2627 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2628 E->DEST, then register this location as a possible insertion point
2629 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2631 BB, E and SI provide the exact insertion point for the new
2632 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2633 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2634 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2635 must not be NULL. */
2636 void register_new_assert_for (tree name, tree expr,
2637 enum tree_code comp_code,
2638 tree val, basic_block bb,
2639 edge e, gimple_stmt_iterator si);
2641 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2642 create a new SSA name N and return the assertion assignment
2643 'N = ASSERT_EXPR <V, V OP W>'. */
2644 gimple *build_assert_expr_for (tree cond, tree v);
2646 /* Create an ASSERT_EXPR for NAME and insert it in the location
2647 indicated by LOC. Return true if we made any edge insertions. */
2648 bool process_assert_insertions_for (tree name, assert_locus *loc);
2650 /* Qsort callback for sorting assert locations. */
2651 template <bool stable> static int compare_assert_loc (const void *,
2652 const void *);
2654 /* Return false if EXPR is a predicate expression involving floating
2655 point values. */
2656 bool fp_predicate (gimple *stmt)
2658 GIMPLE_CHECK (stmt, GIMPLE_COND);
2659 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2662 bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2663 basic_block cond_bb);
2665 static int compare_case_labels (const void *, const void *);
2668 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2669 create a new SSA name N and return the assertion assignment
2670 'N = ASSERT_EXPR <V, V OP W>'. */
2672 gimple *
2673 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2675 tree a;
2676 gassign *assertion;
2678 gcc_assert (TREE_CODE (v) == SSA_NAME
2679 && COMPARISON_CLASS_P (cond));
2681 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2682 assertion = gimple_build_assign (NULL_TREE, a);
2684 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2685 operand of the ASSERT_EXPR. Create it so the new name and the old one
2686 are registered in the replacement table so that we can fix the SSA web
2687 after adding all the ASSERT_EXPRs. */
2688 tree new_def = create_new_def_for (v, assertion, NULL);
2689 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2690 given we have to be able to fully propagate those out to re-create
2691 valid SSA when removing the asserts. */
2692 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2693 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2695 return assertion;
2698 /* Dump all the registered assertions for NAME to FILE. */
2700 void
2701 vrp_asserts::dump (FILE *file, tree name)
2703 assert_locus *loc;
2705 fprintf (file, "Assertions to be inserted for ");
2706 print_generic_expr (file, name);
2707 fprintf (file, "\n");
2709 loc = asserts_for[SSA_NAME_VERSION (name)];
2710 while (loc)
2712 fprintf (file, "\t");
2713 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2714 fprintf (file, "\n\tBB #%d", loc->bb->index);
2715 if (loc->e)
2717 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2718 loc->e->dest->index);
2719 dump_edge_info (file, loc->e, dump_flags, 0);
2721 fprintf (file, "\n\tPREDICATE: ");
2722 print_generic_expr (file, loc->expr);
2723 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2724 print_generic_expr (file, loc->val);
2725 fprintf (file, "\n\n");
2726 loc = loc->next;
2729 fprintf (file, "\n");
2732 /* Dump all the registered assertions for all the names to FILE. */
2734 void
2735 vrp_asserts::dump (FILE *file)
2737 unsigned i;
2738 bitmap_iterator bi;
2740 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2741 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2742 dump (file, ssa_name (i));
2743 fprintf (file, "\n");
2746 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2747 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2748 E->DEST, then register this location as a possible insertion point
2749 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2751 BB, E and SI provide the exact insertion point for the new
2752 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2753 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2754 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2755 must not be NULL. */
2757 void
2758 vrp_asserts::register_new_assert_for (tree name, tree expr,
2759 enum tree_code comp_code,
2760 tree val,
2761 basic_block bb,
2762 edge e,
2763 gimple_stmt_iterator si)
2765 assert_locus *n, *loc, *last_loc;
2766 basic_block dest_bb;
2768 gcc_checking_assert (bb == NULL || e == NULL);
2770 if (e == NULL)
2771 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2772 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2774 /* Never build an assert comparing against an integer constant with
2775 TREE_OVERFLOW set. This confuses our undefined overflow warning
2776 machinery. */
2777 if (TREE_OVERFLOW_P (val))
2778 val = drop_tree_overflow (val);
2780 /* The new assertion A will be inserted at BB or E. We need to
2781 determine if the new location is dominated by a previously
2782 registered location for A. If we are doing an edge insertion,
2783 assume that A will be inserted at E->DEST. Note that this is not
2784 necessarily true.
2786 If E is a critical edge, it will be split. But even if E is
2787 split, the new block will dominate the same set of blocks that
2788 E->DEST dominates.
2790 The reverse, however, is not true, blocks dominated by E->DEST
2791 will not be dominated by the new block created to split E. So,
2792 if the insertion location is on a critical edge, we will not use
2793 the new location to move another assertion previously registered
2794 at a block dominated by E->DEST. */
2795 dest_bb = (bb) ? bb : e->dest;
2797 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2798 VAL at a block dominating DEST_BB, then we don't need to insert a new
2799 one. Similarly, if the same assertion already exists at a block
2800 dominated by DEST_BB and the new location is not on a critical
2801 edge, then update the existing location for the assertion (i.e.,
2802 move the assertion up in the dominance tree).
2804 Note, this is implemented as a simple linked list because there
2805 should not be more than a handful of assertions registered per
2806 name. If this becomes a performance problem, a table hashed by
2807 COMP_CODE and VAL could be implemented. */
2808 loc = asserts_for[SSA_NAME_VERSION (name)];
2809 last_loc = loc;
2810 while (loc)
2812 if (loc->comp_code == comp_code
2813 && (loc->val == val
2814 || operand_equal_p (loc->val, val, 0))
2815 && (loc->expr == expr
2816 || operand_equal_p (loc->expr, expr, 0)))
2818 /* If E is not a critical edge and DEST_BB
2819 dominates the existing location for the assertion, move
2820 the assertion up in the dominance tree by updating its
2821 location information. */
2822 if ((e == NULL || !EDGE_CRITICAL_P (e))
2823 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2825 loc->bb = dest_bb;
2826 loc->e = e;
2827 loc->si = si;
2828 return;
2832 /* Update the last node of the list and move to the next one. */
2833 last_loc = loc;
2834 loc = loc->next;
2837 /* If we didn't find an assertion already registered for
2838 NAME COMP_CODE VAL, add a new one at the end of the list of
2839 assertions associated with NAME. */
2840 n = XNEW (struct assert_locus);
2841 n->bb = dest_bb;
2842 n->e = e;
2843 n->si = si;
2844 n->comp_code = comp_code;
2845 n->val = val;
2846 n->expr = expr;
2847 n->next = NULL;
2849 if (last_loc)
2850 last_loc->next = n;
2851 else
2852 asserts_for[SSA_NAME_VERSION (name)] = n;
2854 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2857 /* Finish found ASSERTS for E and register them at GSI. */
2859 void
2860 vrp_asserts::finish_register_edge_assert_for (edge e,
2861 gimple_stmt_iterator gsi,
2862 vec<assert_info> &asserts)
2864 for (unsigned i = 0; i < asserts.length (); ++i)
2865 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2866 reachable from E. */
2867 if (live.live_on_edge_p (asserts[i].name, e))
2868 register_new_assert_for (asserts[i].name, asserts[i].expr,
2869 asserts[i].comp_code, asserts[i].val,
2870 NULL, e, gsi);
2873 /* Determine whether the outgoing edges of BB should receive an
2874 ASSERT_EXPR for each of the operands of BB's LAST statement.
2875 The last statement of BB must be a COND_EXPR.
2877 If any of the sub-graphs rooted at BB have an interesting use of
2878 the predicate operands, an assert location node is added to the
2879 list of assertions for the corresponding operands. */
2881 void
2882 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2884 gimple_stmt_iterator bsi;
2885 tree op;
2886 edge_iterator ei;
2887 edge e;
2888 ssa_op_iter iter;
2890 bsi = gsi_for_stmt (last);
2892 /* Look for uses of the operands in each of the sub-graphs
2893 rooted at BB. We need to check each of the outgoing edges
2894 separately, so that we know what kind of ASSERT_EXPR to
2895 insert. */
2896 FOR_EACH_EDGE (e, ei, bb->succs)
2898 if (e->dest == bb)
2899 continue;
2901 /* Register the necessary assertions for each operand in the
2902 conditional predicate. */
2903 auto_vec<assert_info, 8> asserts;
2904 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2905 register_edge_assert_for (op, e,
2906 gimple_cond_code (last),
2907 gimple_cond_lhs (last),
2908 gimple_cond_rhs (last), asserts);
2909 finish_register_edge_assert_for (e, bsi, asserts);
2913 /* Compare two case labels sorting first by the destination bb index
2914 and then by the case value. */
2917 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2919 const struct case_info *ci1 = (const struct case_info *) p1;
2920 const struct case_info *ci2 = (const struct case_info *) p2;
2921 int idx1 = ci1->bb->index;
2922 int idx2 = ci2->bb->index;
2924 if (idx1 < idx2)
2925 return -1;
2926 else if (idx1 == idx2)
2928 /* Make sure the default label is first in a group. */
2929 if (!CASE_LOW (ci1->expr))
2930 return -1;
2931 else if (!CASE_LOW (ci2->expr))
2932 return 1;
2933 else
2934 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2935 CASE_LOW (ci2->expr));
2937 else
2938 return 1;
2941 /* Determine whether the outgoing edges of BB should receive an
2942 ASSERT_EXPR for each of the operands of BB's LAST statement.
2943 The last statement of BB must be a SWITCH_EXPR.
2945 If any of the sub-graphs rooted at BB have an interesting use of
2946 the predicate operands, an assert location node is added to the
2947 list of assertions for the corresponding operands. */
2949 void
2950 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
2952 gimple_stmt_iterator bsi;
2953 tree op;
2954 edge e;
2955 struct case_info *ci;
2956 size_t n = gimple_switch_num_labels (last);
2957 #if GCC_VERSION >= 4000
2958 unsigned int idx;
2959 #else
2960 /* Work around GCC 3.4 bug (PR 37086). */
2961 volatile unsigned int idx;
2962 #endif
2964 bsi = gsi_for_stmt (last);
2965 op = gimple_switch_index (last);
2966 if (TREE_CODE (op) != SSA_NAME)
2967 return;
2969 /* Build a vector of case labels sorted by destination label. */
2970 ci = XNEWVEC (struct case_info, n);
2971 for (idx = 0; idx < n; ++idx)
2973 ci[idx].expr = gimple_switch_label (last, idx);
2974 ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
2976 edge default_edge = find_edge (bb, ci[0].bb);
2977 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2979 for (idx = 0; idx < n; ++idx)
2981 tree min, max;
2982 tree cl = ci[idx].expr;
2983 basic_block cbb = ci[idx].bb;
2985 min = CASE_LOW (cl);
2986 max = CASE_HIGH (cl);
2988 /* If there are multiple case labels with the same destination
2989 we need to combine them to a single value range for the edge. */
2990 if (idx + 1 < n && cbb == ci[idx + 1].bb)
2992 /* Skip labels until the last of the group. */
2993 do {
2994 ++idx;
2995 } while (idx < n && cbb == ci[idx].bb);
2996 --idx;
2998 /* Pick up the maximum of the case label range. */
2999 if (CASE_HIGH (ci[idx].expr))
3000 max = CASE_HIGH (ci[idx].expr);
3001 else
3002 max = CASE_LOW (ci[idx].expr);
3005 /* Can't extract a useful assertion out of a range that includes the
3006 default label. */
3007 if (min == NULL_TREE)
3008 continue;
3010 /* Find the edge to register the assert expr on. */
3011 e = find_edge (bb, cbb);
3013 /* Register the necessary assertions for the operand in the
3014 SWITCH_EXPR. */
3015 auto_vec<assert_info, 8> asserts;
3016 register_edge_assert_for (op, e,
3017 max ? GE_EXPR : EQ_EXPR,
3018 op, fold_convert (TREE_TYPE (op), min),
3019 asserts);
3020 if (max)
3021 register_edge_assert_for (op, e, LE_EXPR, op,
3022 fold_convert (TREE_TYPE (op), max),
3023 asserts);
3024 finish_register_edge_assert_for (e, bsi, asserts);
3027 XDELETEVEC (ci);
3029 if (!live.live_on_edge_p (op, default_edge))
3030 return;
3032 /* Now register along the default label assertions that correspond to the
3033 anti-range of each label. */
3034 int insertion_limit = param_max_vrp_switch_assertions;
3035 if (insertion_limit == 0)
3036 return;
3038 /* We can't do this if the default case shares a label with another case. */
3039 tree default_cl = gimple_switch_default_label (last);
3040 for (idx = 1; idx < n; idx++)
3042 tree min, max;
3043 tree cl = gimple_switch_label (last, idx);
3044 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3045 continue;
3047 min = CASE_LOW (cl);
3048 max = CASE_HIGH (cl);
3050 /* Combine contiguous case ranges to reduce the number of assertions
3051 to insert. */
3052 for (idx = idx + 1; idx < n; idx++)
3054 tree next_min, next_max;
3055 tree next_cl = gimple_switch_label (last, idx);
3056 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3057 break;
3059 next_min = CASE_LOW (next_cl);
3060 next_max = CASE_HIGH (next_cl);
3062 wide_int difference = (wi::to_wide (next_min)
3063 - wi::to_wide (max ? max : min));
3064 if (wi::eq_p (difference, 1))
3065 max = next_max ? next_max : next_min;
3066 else
3067 break;
3069 idx--;
3071 if (max == NULL_TREE)
3073 /* Register the assertion OP != MIN. */
3074 auto_vec<assert_info, 8> asserts;
3075 min = fold_convert (TREE_TYPE (op), min);
3076 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3077 asserts);
3078 finish_register_edge_assert_for (default_edge, bsi, asserts);
3080 else
3082 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3083 which will give OP the anti-range ~[MIN,MAX]. */
3084 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3085 min = fold_convert (TREE_TYPE (uop), min);
3086 max = fold_convert (TREE_TYPE (uop), max);
3088 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3089 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3090 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3091 NULL, default_edge, bsi);
3094 if (--insertion_limit == 0)
3095 break;
3099 /* Traverse all the statements in block BB looking for statements that
3100 may generate useful assertions for the SSA names in their operand.
3101 If a statement produces a useful assertion A for name N_i, then the
3102 list of assertions already generated for N_i is scanned to
3103 determine if A is actually needed.
3105 If N_i already had the assertion A at a location dominating the
3106 current location, then nothing needs to be done. Otherwise, the
3107 new location for A is recorded instead.
3109 1- For every statement S in BB, all the variables used by S are
3110 added to bitmap FOUND_IN_SUBGRAPH.
3112 2- If statement S uses an operand N in a way that exposes a known
3113 value range for N, then if N was not already generated by an
3114 ASSERT_EXPR, create a new assert location for N. For instance,
3115 if N is a pointer and the statement dereferences it, we can
3116 assume that N is not NULL.
3118 3- COND_EXPRs are a special case of #2. We can derive range
3119 information from the predicate but need to insert different
3120 ASSERT_EXPRs for each of the sub-graphs rooted at the
3121 conditional block. If the last statement of BB is a conditional
3122 expression of the form 'X op Y', then
3124 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3126 b) If the conditional is the only entry point to the sub-graph
3127 corresponding to the THEN_CLAUSE, recurse into it. On
3128 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3129 an ASSERT_EXPR is added for the corresponding variable.
3131 c) Repeat step (b) on the ELSE_CLAUSE.
3133 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3135 For instance,
3137 if (a == 9)
3138 b = a;
3139 else
3140 b = c + 1;
3142 In this case, an assertion on the THEN clause is useful to
3143 determine that 'a' is always 9 on that edge. However, an assertion
3144 on the ELSE clause would be unnecessary.
3146 4- If BB does not end in a conditional expression, then we recurse
3147 into BB's dominator children.
3149 At the end of the recursive traversal, every SSA name will have a
3150 list of locations where ASSERT_EXPRs should be added. When a new
3151 location for name N is found, it is registered by calling
3152 register_new_assert_for. That function keeps track of all the
3153 registered assertions to prevent adding unnecessary assertions.
3154 For instance, if a pointer P_4 is dereferenced more than once in a
3155 dominator tree, only the location dominating all the dereference of
3156 P_4 will receive an ASSERT_EXPR. */
3158 void
3159 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3161 gimple *last;
3163 last = last_stmt (bb);
3165 /* If BB's last statement is a conditional statement involving integer
3166 operands, determine if we need to add ASSERT_EXPRs. */
3167 if (last
3168 && gimple_code (last) == GIMPLE_COND
3169 && !fp_predicate (last)
3170 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3171 find_conditional_asserts (bb, as_a <gcond *> (last));
3173 /* If BB's last statement is a switch statement involving integer
3174 operands, determine if we need to add ASSERT_EXPRs. */
3175 if (last
3176 && gimple_code (last) == GIMPLE_SWITCH
3177 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3178 find_switch_asserts (bb, as_a <gswitch *> (last));
3180 /* Traverse all the statements in BB marking used names and looking
3181 for statements that may infer assertions for their used operands. */
3182 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3183 gsi_prev (&si))
3185 gimple *stmt;
3186 tree op;
3187 ssa_op_iter i;
3189 stmt = gsi_stmt (si);
3191 if (is_gimple_debug (stmt))
3192 continue;
3194 /* See if we can derive an assertion for any of STMT's operands. */
3195 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3197 tree value;
3198 enum tree_code comp_code;
3200 /* If op is not live beyond this stmt, do not bother to insert
3201 asserts for it. */
3202 if (!live.live_on_block_p (op, bb))
3203 continue;
3205 /* If OP is used in such a way that we can infer a value
3206 range for it, and we don't find a previous assertion for
3207 it, create a new assertion location node for OP. */
3208 if (infer_value_range (stmt, op, &comp_code, &value))
3210 /* If we are able to infer a nonzero value range for OP,
3211 then walk backwards through the use-def chain to see if OP
3212 was set via a typecast.
3214 If so, then we can also infer a nonzero value range
3215 for the operand of the NOP_EXPR. */
3216 if (comp_code == NE_EXPR && integer_zerop (value))
3218 tree t = op;
3219 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3221 while (is_gimple_assign (def_stmt)
3222 && CONVERT_EXPR_CODE_P
3223 (gimple_assign_rhs_code (def_stmt))
3224 && TREE_CODE
3225 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3226 && POINTER_TYPE_P
3227 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3229 t = gimple_assign_rhs1 (def_stmt);
3230 def_stmt = SSA_NAME_DEF_STMT (t);
3232 /* Note we want to register the assert for the
3233 operand of the NOP_EXPR after SI, not after the
3234 conversion. */
3235 if (live.live_on_block_p (t, bb))
3236 register_new_assert_for (t, t, comp_code, value,
3237 bb, NULL, si);
3241 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3245 /* Update live. */
3246 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3247 live.set (op, bb);
3248 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3249 live.clear (op, bb);
3252 /* Traverse all PHI nodes in BB, updating live. */
3253 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3254 gsi_next (&si))
3256 use_operand_p arg_p;
3257 ssa_op_iter i;
3258 gphi *phi = si.phi ();
3259 tree res = gimple_phi_result (phi);
3261 if (virtual_operand_p (res))
3262 continue;
3264 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3266 tree arg = USE_FROM_PTR (arg_p);
3267 if (TREE_CODE (arg) == SSA_NAME)
3268 live.set (arg, bb);
3271 live.clear (res, bb);
3275 /* Do an RPO walk over the function computing SSA name liveness
3276 on-the-fly and deciding on assert expressions to insert. */
3278 void
3279 vrp_asserts::find_assert_locations (void)
3281 int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3282 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3283 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3284 int rpo_cnt, i;
3286 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3287 for (i = 0; i < rpo_cnt; ++i)
3288 bb_rpo[rpo[i]] = i;
3290 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3291 the order we compute liveness and insert asserts we otherwise
3292 fail to insert asserts into the loop latch. */
3293 for (auto loop : loops_list (cfun, 0))
3295 i = loop->latch->index;
3296 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3297 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3298 !gsi_end_p (gsi); gsi_next (&gsi))
3300 gphi *phi = gsi.phi ();
3301 if (virtual_operand_p (gimple_phi_result (phi)))
3302 continue;
3303 tree arg = gimple_phi_arg_def (phi, j);
3304 if (TREE_CODE (arg) == SSA_NAME)
3305 live.set (arg, loop->latch);
3309 for (i = rpo_cnt - 1; i >= 0; --i)
3311 basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3312 edge e;
3313 edge_iterator ei;
3315 /* Process BB and update the live information with uses in
3316 this block. */
3317 find_assert_locations_in_bb (bb);
3319 /* Merge liveness into the predecessor blocks and free it. */
3320 if (!live.block_has_live_names_p (bb))
3322 int pred_rpo = i;
3323 FOR_EACH_EDGE (e, ei, bb->preds)
3325 int pred = e->src->index;
3326 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3327 continue;
3329 live.merge (e->src, bb);
3331 if (bb_rpo[pred] < pred_rpo)
3332 pred_rpo = bb_rpo[pred];
3335 /* Record the RPO number of the last visited block that needs
3336 live information from this block. */
3337 last_rpo[rpo[i]] = pred_rpo;
3339 else
3340 live.clear_block (bb);
3342 /* We can free all successors live bitmaps if all their
3343 predecessors have been visited already. */
3344 FOR_EACH_EDGE (e, ei, bb->succs)
3345 if (last_rpo[e->dest->index] == i)
3346 live.clear_block (e->dest);
3349 XDELETEVEC (rpo);
3350 XDELETEVEC (bb_rpo);
3351 XDELETEVEC (last_rpo);
3354 /* Create an ASSERT_EXPR for NAME and insert it in the location
3355 indicated by LOC. Return true if we made any edge insertions. */
3357 bool
3358 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3360 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3361 gimple *stmt;
3362 tree cond;
3363 gimple *assert_stmt;
3364 edge_iterator ei;
3365 edge e;
3367 /* If we have X <=> X do not insert an assert expr for that. */
3368 if (loc->expr == loc->val)
3369 return false;
3371 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3372 assert_stmt = build_assert_expr_for (cond, name);
3373 if (loc->e)
3375 /* We have been asked to insert the assertion on an edge. This
3376 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3377 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3378 || (gimple_code (gsi_stmt (loc->si))
3379 == GIMPLE_SWITCH));
3381 gsi_insert_on_edge (loc->e, assert_stmt);
3382 return true;
3385 /* If the stmt iterator points at the end then this is an insertion
3386 at the beginning of a block. */
3387 if (gsi_end_p (loc->si))
3389 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3390 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3391 return false;
3394 /* Otherwise, we can insert right after LOC->SI iff the
3395 statement must not be the last statement in the block. */
3396 stmt = gsi_stmt (loc->si);
3397 if (!stmt_ends_bb_p (stmt))
3399 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3400 return false;
3403 /* If STMT must be the last statement in BB, we can only insert new
3404 assertions on the non-abnormal edge out of BB. Note that since
3405 STMT is not control flow, there may only be one non-abnormal/eh edge
3406 out of BB. */
3407 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3408 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3410 gsi_insert_on_edge (e, assert_stmt);
3411 return true;
3414 gcc_unreachable ();
3417 /* Qsort helper for sorting assert locations. If stable is true, don't
3418 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3419 on the other side some pointers might be NULL. */
3421 template <bool stable>
3423 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3425 assert_locus * const a = *(assert_locus * const *)pa;
3426 assert_locus * const b = *(assert_locus * const *)pb;
3428 /* If stable, some asserts might be optimized away already, sort
3429 them last. */
3430 if (stable)
3432 if (a == NULL)
3433 return b != NULL;
3434 else if (b == NULL)
3435 return -1;
3438 if (a->e == NULL && b->e != NULL)
3439 return 1;
3440 else if (a->e != NULL && b->e == NULL)
3441 return -1;
3443 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3444 no need to test both a->e and b->e. */
3446 /* Sort after destination index. */
3447 if (a->e == NULL)
3449 else if (a->e->dest->index > b->e->dest->index)
3450 return 1;
3451 else if (a->e->dest->index < b->e->dest->index)
3452 return -1;
3454 /* Sort after comp_code. */
3455 if (a->comp_code > b->comp_code)
3456 return 1;
3457 else if (a->comp_code < b->comp_code)
3458 return -1;
3460 hashval_t ha, hb;
3462 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3463 uses DECL_UID of the VAR_DECL, so sorting might differ between
3464 -g and -g0. When doing the removal of redundant assert exprs
3465 and commonization to successors, this does not matter, but for
3466 the final sort needs to be stable. */
3467 if (stable)
3469 ha = 0;
3470 hb = 0;
3472 else
3474 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3475 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3478 /* Break the tie using hashing and source/bb index. */
3479 if (ha == hb)
3480 return (a->e != NULL
3481 ? a->e->src->index - b->e->src->index
3482 : a->bb->index - b->bb->index);
3483 return ha > hb ? 1 : -1;
3486 /* Process all the insertions registered for every name N_i registered
3487 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3488 found in ASSERTS_FOR[i]. */
3490 void
3491 vrp_asserts::process_assert_insertions ()
3493 unsigned i;
3494 bitmap_iterator bi;
3495 bool update_edges_p = false;
3496 int num_asserts = 0;
3498 if (dump_file && (dump_flags & TDF_DETAILS))
3499 dump (dump_file);
3501 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3503 assert_locus *loc = asserts_for[i];
3504 gcc_assert (loc);
3506 auto_vec<assert_locus *, 16> asserts;
3507 for (; loc; loc = loc->next)
3508 asserts.safe_push (loc);
3509 asserts.qsort (compare_assert_loc<false>);
3511 /* Push down common asserts to successors and remove redundant ones. */
3512 unsigned ecnt = 0;
3513 assert_locus *common = NULL;
3514 unsigned commonj = 0;
3515 for (unsigned j = 0; j < asserts.length (); ++j)
3517 loc = asserts[j];
3518 if (! loc->e)
3519 common = NULL;
3520 else if (! common
3521 || loc->e->dest != common->e->dest
3522 || loc->comp_code != common->comp_code
3523 || ! operand_equal_p (loc->val, common->val, 0)
3524 || ! operand_equal_p (loc->expr, common->expr, 0))
3526 commonj = j;
3527 common = loc;
3528 ecnt = 1;
3530 else if (loc->e == asserts[j-1]->e)
3532 /* Remove duplicate asserts. */
3533 if (commonj == j - 1)
3535 commonj = j;
3536 common = loc;
3538 free (asserts[j-1]);
3539 asserts[j-1] = NULL;
3541 else
3543 ecnt++;
3544 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3546 /* We have the same assertion on all incoming edges of a BB.
3547 Insert it at the beginning of that block. */
3548 loc->bb = loc->e->dest;
3549 loc->e = NULL;
3550 loc->si = gsi_none ();
3551 common = NULL;
3552 /* Clear asserts commoned. */
3553 for (; commonj != j; ++commonj)
3554 if (asserts[commonj])
3556 free (asserts[commonj]);
3557 asserts[commonj] = NULL;
3563 /* The asserts vector sorting above might be unstable for
3564 -fcompare-debug, sort again to ensure a stable sort. */
3565 asserts.qsort (compare_assert_loc<true>);
3566 for (unsigned j = 0; j < asserts.length (); ++j)
3568 loc = asserts[j];
3569 if (! loc)
3570 break;
3571 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3572 num_asserts++;
3573 free (loc);
3577 if (update_edges_p)
3578 gsi_commit_edge_inserts ();
3580 statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3581 num_asserts);
3584 /* Traverse the flowgraph looking for conditional jumps to insert range
3585 expressions. These range expressions are meant to provide information
3586 to optimizations that need to reason in terms of value ranges. They
3587 will not be expanded into RTL. For instance, given:
3589 x = ...
3590 y = ...
3591 if (x < y)
3592 y = x - 2;
3593 else
3594 x = y + 3;
3596 this pass will transform the code into:
3598 x = ...
3599 y = ...
3600 if (x < y)
3602 x = ASSERT_EXPR <x, x < y>
3603 y = x - 2
3605 else
3607 y = ASSERT_EXPR <y, x >= y>
3608 x = y + 3
3611 The idea is that once copy and constant propagation have run, other
3612 optimizations will be able to determine what ranges of values can 'x'
3613 take in different paths of the code, simply by checking the reaching
3614 definition of 'x'. */
3616 void
3617 vrp_asserts::insert_range_assertions (void)
3619 need_assert_for = BITMAP_ALLOC (NULL);
3620 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3622 calculate_dominance_info (CDI_DOMINATORS);
3624 find_assert_locations ();
3625 if (!bitmap_empty_p (need_assert_for))
3627 process_assert_insertions ();
3628 update_ssa (TODO_update_ssa_no_phi);
3631 if (dump_file && (dump_flags & TDF_DETAILS))
3633 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3634 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3637 free (asserts_for);
3638 BITMAP_FREE (need_assert_for);
3641 /* Return true if all imm uses of VAR are either in STMT, or
3642 feed (optionally through a chain of single imm uses) GIMPLE_COND
3643 in basic block COND_BB. */
3645 bool
3646 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3647 gimple *stmt,
3648 basic_block cond_bb)
3650 use_operand_p use_p, use2_p;
3651 imm_use_iterator iter;
3653 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3654 if (USE_STMT (use_p) != stmt)
3656 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3657 if (is_gimple_debug (use_stmt))
3658 continue;
3659 while (is_gimple_assign (use_stmt)
3660 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3661 && single_imm_use (gimple_assign_lhs (use_stmt),
3662 &use2_p, &use_stmt2))
3663 use_stmt = use_stmt2;
3664 if (gimple_code (use_stmt) != GIMPLE_COND
3665 || gimple_bb (use_stmt) != cond_bb)
3666 return false;
3668 return true;
3671 /* Convert range assertion expressions into the implied copies and
3672 copy propagate away the copies. Doing the trivial copy propagation
3673 here avoids the need to run the full copy propagation pass after
3674 VRP.
3676 FIXME, this will eventually lead to copy propagation removing the
3677 names that had useful range information attached to them. For
3678 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3679 then N_i will have the range [3, +INF].
3681 However, by converting the assertion into the implied copy
3682 operation N_i = N_j, we will then copy-propagate N_j into the uses
3683 of N_i and lose the range information.
3685 The problem with keeping ASSERT_EXPRs around is that passes after
3686 VRP need to handle them appropriately.
3688 Another approach would be to make the range information a first
3689 class property of the SSA_NAME so that it can be queried from
3690 any pass. This is made somewhat more complex by the need for
3691 multiple ranges to be associated with one SSA_NAME. */
3693 void
3694 vrp_asserts::remove_range_assertions ()
3696 basic_block bb;
3697 gimple_stmt_iterator si;
3698 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3699 a basic block preceeded by GIMPLE_COND branching to it and
3700 __builtin_trap, -1 if not yet checked, 0 otherwise. */
3701 int is_unreachable;
3703 /* Note that the BSI iterator bump happens at the bottom of the
3704 loop and no bump is necessary if we're removing the statement
3705 referenced by the current BSI. */
3706 FOR_EACH_BB_FN (bb, fun)
3707 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3709 gimple *stmt = gsi_stmt (si);
3711 if (is_gimple_assign (stmt)
3712 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3714 tree lhs = gimple_assign_lhs (stmt);
3715 tree rhs = gimple_assign_rhs1 (stmt);
3716 tree var;
3718 var = ASSERT_EXPR_VAR (rhs);
3720 if (TREE_CODE (var) == SSA_NAME
3721 && !POINTER_TYPE_P (TREE_TYPE (lhs))
3722 && SSA_NAME_RANGE_INFO (lhs))
3724 if (is_unreachable == -1)
3726 is_unreachable = 0;
3727 if (single_pred_p (bb)
3728 && assert_unreachable_fallthru_edge_p
3729 (single_pred_edge (bb)))
3730 is_unreachable = 1;
3732 /* Handle
3733 if (x_7 >= 10 && x_7 < 20)
3734 __builtin_unreachable ();
3735 x_8 = ASSERT_EXPR <x_7, ...>;
3736 if the only uses of x_7 are in the ASSERT_EXPR and
3737 in the condition. In that case, we can copy the
3738 range info from x_8 computed in this pass also
3739 for x_7. */
3740 if (is_unreachable
3741 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3742 single_pred (bb)))
3744 if (SSA_NAME_RANGE_INFO (var))
3746 /* ?? This is a minor wart exposing the
3747 internals of SSA_NAME_RANGE_INFO in order
3748 to maintain existing behavior. This is
3749 because duplicate_ssa_name_range_info below
3750 needs a NULL destination range. This is
3751 all slated for removal... */
3752 ggc_free (SSA_NAME_RANGE_INFO (var));
3753 SSA_NAME_RANGE_INFO (var) = NULL;
3755 duplicate_ssa_name_range_info (var, lhs);
3756 maybe_set_nonzero_bits (single_pred_edge (bb), var);
3760 /* Propagate the RHS into every use of the LHS. For SSA names
3761 also propagate abnormals as it merely restores the original
3762 IL in this case (an replace_uses_by would assert). */
3763 if (TREE_CODE (var) == SSA_NAME)
3765 imm_use_iterator iter;
3766 use_operand_p use_p;
3767 gimple *use_stmt;
3768 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3769 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3770 SET_USE (use_p, var);
3772 else
3773 replace_uses_by (lhs, var);
3775 /* And finally, remove the copy, it is not needed. */
3776 gsi_remove (&si, true);
3777 release_defs (stmt);
3779 else
3781 if (!is_gimple_debug (gsi_stmt (si)))
3782 is_unreachable = 0;
3783 gsi_next (&si);
3788 class vrp_prop : public ssa_propagation_engine
3790 public:
3791 vrp_prop (vr_values *v)
3792 : ssa_propagation_engine (),
3793 m_vr_values (v) { }
3795 void initialize (struct function *);
3796 void finalize ();
3798 private:
3799 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
3800 enum ssa_prop_result visit_phi (gphi *) final override;
3802 struct function *fun;
3803 vr_values *m_vr_values;
3806 /* Initialization required by ssa_propagate engine. */
3808 void
3809 vrp_prop::initialize (struct function *fn)
3811 basic_block bb;
3812 fun = fn;
3814 FOR_EACH_BB_FN (bb, fun)
3816 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3817 gsi_next (&si))
3819 gphi *phi = si.phi ();
3820 if (!stmt_interesting_for_vrp (phi))
3822 tree lhs = PHI_RESULT (phi);
3823 m_vr_values->set_def_to_varying (lhs);
3824 prop_set_simulate_again (phi, false);
3826 else
3827 prop_set_simulate_again (phi, true);
3830 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3831 gsi_next (&si))
3833 gimple *stmt = gsi_stmt (si);
3835 /* If the statement is a control insn, then we do not
3836 want to avoid simulating the statement once. Failure
3837 to do so means that those edges will never get added. */
3838 if (stmt_ends_bb_p (stmt))
3839 prop_set_simulate_again (stmt, true);
3840 else if (!stmt_interesting_for_vrp (stmt))
3842 m_vr_values->set_defs_to_varying (stmt);
3843 prop_set_simulate_again (stmt, false);
3845 else
3846 prop_set_simulate_again (stmt, true);
3851 /* Evaluate statement STMT. If the statement produces a useful range,
3852 return SSA_PROP_INTERESTING and record the SSA name with the
3853 interesting range into *OUTPUT_P.
3855 If STMT is a conditional branch and we can determine its truth
3856 value, the taken edge is recorded in *TAKEN_EDGE_P.
3858 If STMT produces a varying value, return SSA_PROP_VARYING. */
3860 enum ssa_prop_result
3861 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3863 tree lhs = gimple_get_lhs (stmt);
3864 value_range_equiv vr;
3865 m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3867 if (*output_p)
3869 if (m_vr_values->update_value_range (*output_p, &vr))
3871 if (dump_file && (dump_flags & TDF_DETAILS))
3873 fprintf (dump_file, "Found new range for ");
3874 print_generic_expr (dump_file, *output_p);
3875 fprintf (dump_file, ": ");
3876 dump_value_range (dump_file, &vr);
3877 fprintf (dump_file, "\n");
3880 if (vr.varying_p ())
3881 return SSA_PROP_VARYING;
3883 return SSA_PROP_INTERESTING;
3885 return SSA_PROP_NOT_INTERESTING;
3888 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3889 switch (gimple_call_internal_fn (stmt))
3891 case IFN_ADD_OVERFLOW:
3892 case IFN_SUB_OVERFLOW:
3893 case IFN_MUL_OVERFLOW:
3894 case IFN_ATOMIC_COMPARE_EXCHANGE:
3895 /* These internal calls return _Complex integer type,
3896 which VRP does not track, but the immediate uses
3897 thereof might be interesting. */
3898 if (lhs && TREE_CODE (lhs) == SSA_NAME)
3900 imm_use_iterator iter;
3901 use_operand_p use_p;
3902 enum ssa_prop_result res = SSA_PROP_VARYING;
3904 m_vr_values->set_def_to_varying (lhs);
3906 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3908 gimple *use_stmt = USE_STMT (use_p);
3909 if (!is_gimple_assign (use_stmt))
3910 continue;
3911 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3912 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3913 continue;
3914 tree rhs1 = gimple_assign_rhs1 (use_stmt);
3915 tree use_lhs = gimple_assign_lhs (use_stmt);
3916 if (TREE_CODE (rhs1) != rhs_code
3917 || TREE_OPERAND (rhs1, 0) != lhs
3918 || TREE_CODE (use_lhs) != SSA_NAME
3919 || !stmt_interesting_for_vrp (use_stmt)
3920 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3921 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3922 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3923 continue;
3925 /* If there is a change in the value range for any of the
3926 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3927 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
3928 or IMAGPART_EXPR immediate uses, but none of them have
3929 a change in their value ranges, return
3930 SSA_PROP_NOT_INTERESTING. If there are no
3931 {REAL,IMAG}PART_EXPR uses at all,
3932 return SSA_PROP_VARYING. */
3933 value_range_equiv new_vr;
3934 m_vr_values->extract_range_basic (&new_vr, use_stmt);
3935 const value_range_equiv *old_vr
3936 = m_vr_values->get_value_range (use_lhs);
3937 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3938 res = SSA_PROP_INTERESTING;
3939 else
3940 res = SSA_PROP_NOT_INTERESTING;
3941 new_vr.equiv_clear ();
3942 if (res == SSA_PROP_INTERESTING)
3944 *output_p = lhs;
3945 return res;
3949 return res;
3951 break;
3952 default:
3953 break;
3956 /* All other statements produce nothing of interest for VRP, so mark
3957 their outputs varying and prevent further simulation. */
3958 m_vr_values->set_defs_to_varying (stmt);
3960 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
3963 /* Visit all arguments for PHI node PHI that flow through executable
3964 edges. If a valid value range can be derived from all the incoming
3965 value ranges, set a new range for the LHS of PHI. */
3967 enum ssa_prop_result
3968 vrp_prop::visit_phi (gphi *phi)
3970 tree lhs = PHI_RESULT (phi);
3971 value_range_equiv vr_result;
3972 m_vr_values->extract_range_from_phi_node (phi, &vr_result);
3973 if (m_vr_values->update_value_range (lhs, &vr_result))
3975 if (dump_file && (dump_flags & TDF_DETAILS))
3977 fprintf (dump_file, "Found new range for ");
3978 print_generic_expr (dump_file, lhs);
3979 fprintf (dump_file, ": ");
3980 dump_value_range (dump_file, &vr_result);
3981 fprintf (dump_file, "\n");
3984 if (vr_result.varying_p ())
3985 return SSA_PROP_VARYING;
3987 return SSA_PROP_INTERESTING;
3990 /* Nothing changed, don't add outgoing edges. */
3991 return SSA_PROP_NOT_INTERESTING;
3994 /* Traverse all the blocks folding conditionals with known ranges. */
3996 void
3997 vrp_prop::finalize ()
3999 size_t i;
4001 /* We have completed propagating through the lattice. */
4002 m_vr_values->set_lattice_propagation_complete ();
4004 if (dump_file)
4006 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4007 m_vr_values->dump (dump_file);
4008 fprintf (dump_file, "\n");
4011 /* Set value range to non pointer SSA_NAMEs. */
4012 for (i = 0; i < num_ssa_names; i++)
4014 tree name = ssa_name (i);
4015 if (!name)
4016 continue;
4018 const value_range_equiv *vr = m_vr_values->get_value_range (name);
4019 if (!name || vr->varying_p () || !vr->constant_p ())
4020 continue;
4022 if (POINTER_TYPE_P (TREE_TYPE (name))
4023 && range_includes_zero_p (vr) == 0)
4024 set_ptr_nonnull (name);
4025 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4026 set_range_info (name, *vr);
4030 class vrp_folder : public substitute_and_fold_engine
4032 public:
4033 vrp_folder (vr_values *v)
4034 : substitute_and_fold_engine (/* Fold all stmts. */ true),
4035 m_vr_values (v), simplifier (v)
4037 void simplify_casted_conds (function *fun);
4039 private:
4040 tree value_of_expr (tree name, gimple *stmt) override
4042 return m_vr_values->value_of_expr (name, stmt);
4044 bool fold_stmt (gimple_stmt_iterator *) final override;
4045 bool fold_predicate_in (gimple_stmt_iterator *);
4047 vr_values *m_vr_values;
4048 simplify_using_ranges simplifier;
4051 /* If the statement pointed by SI has a predicate whose value can be
4052 computed using the value range information computed by VRP, compute
4053 its value and return true. Otherwise, return false. */
4055 bool
4056 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4058 bool assignment_p = false;
4059 tree val;
4060 gimple *stmt = gsi_stmt (*si);
4062 if (is_gimple_assign (stmt)
4063 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4065 assignment_p = true;
4066 val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4067 gimple_assign_rhs1 (stmt),
4068 gimple_assign_rhs2 (stmt),
4069 stmt);
4071 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4072 val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4073 gimple_cond_lhs (cond_stmt),
4074 gimple_cond_rhs (cond_stmt),
4075 stmt);
4076 else
4077 return false;
4079 if (val)
4081 if (assignment_p)
4082 val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
4084 if (dump_file)
4086 fprintf (dump_file, "Folding predicate ");
4087 print_gimple_expr (dump_file, stmt, 0);
4088 fprintf (dump_file, " to ");
4089 print_generic_expr (dump_file, val);
4090 fprintf (dump_file, "\n");
4093 if (is_gimple_assign (stmt))
4094 gimple_assign_set_rhs_from_tree (si, val);
4095 else
4097 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4098 gcond *cond_stmt = as_a <gcond *> (stmt);
4099 if (integer_zerop (val))
4100 gimple_cond_make_false (cond_stmt);
4101 else if (integer_onep (val))
4102 gimple_cond_make_true (cond_stmt);
4103 else
4104 gcc_unreachable ();
4107 return true;
4110 return false;
4113 /* Callback for substitute_and_fold folding the stmt at *SI. */
4115 bool
4116 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4118 if (fold_predicate_in (si))
4119 return true;
4121 return simplifier.simplify (si);
4124 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4125 was set by a type conversion can often be rewritten to use the RHS
4126 of the type conversion. Do this optimization for all conditionals
4127 in FUN. */
4129 void
4130 vrp_folder::simplify_casted_conds (function *fun)
4132 basic_block bb;
4133 FOR_EACH_BB_FN (bb, fun)
4135 gimple *last = last_stmt (bb);
4136 if (last && gimple_code (last) == GIMPLE_COND)
4138 if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
4140 if (dump_file && (dump_flags & TDF_DETAILS))
4142 fprintf (dump_file, "Folded into: ");
4143 print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
4144 fprintf (dump_file, "\n");
4151 /* Main entry point to VRP (Value Range Propagation). This pass is
4152 loosely based on J. R. C. Patterson, ``Accurate Static Branch
4153 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4154 Programming Language Design and Implementation, pp. 67-78, 1995.
4155 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4157 This is essentially an SSA-CCP pass modified to deal with ranges
4158 instead of constants.
4160 While propagating ranges, we may find that two or more SSA name
4161 have equivalent, though distinct ranges. For instance,
4163 1 x_9 = p_3->a;
4164 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4165 3 if (p_4 == q_2)
4166 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4167 5 endif
4168 6 if (q_2)
4170 In the code above, pointer p_5 has range [q_2, q_2], but from the
4171 code we can also determine that p_5 cannot be NULL and, if q_2 had
4172 a non-varying range, p_5's range should also be compatible with it.
4174 These equivalences are created by two expressions: ASSERT_EXPR and
4175 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
4176 result of another assertion, then we can use the fact that p_5 and
4177 p_4 are equivalent when evaluating p_5's range.
4179 Together with value ranges, we also propagate these equivalences
4180 between names so that we can take advantage of information from
4181 multiple ranges when doing final replacement. Note that this
4182 equivalency relation is transitive but not symmetric.
4184 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4185 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4186 in contexts where that assertion does not hold (e.g., in line 6).
4188 TODO, the main difference between this pass and Patterson's is that
4189 we do not propagate edge probabilities. We only compute whether
4190 edges can be taken or not. That is, instead of having a spectrum
4191 of jump probabilities between 0 and 1, we only deal with 0, 1 and
4192 DON'T KNOW. In the future, it may be worthwhile to propagate
4193 probabilities to aid branch prediction. */
4195 static unsigned int
4196 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4198 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4199 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4200 scev_initialize ();
4202 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
4203 Inserting assertions may split edges which will invalidate
4204 EDGE_DFS_BACK. */
4205 vrp_asserts assert_engine (fun);
4206 assert_engine.insert_range_assertions ();
4208 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
4209 mark_dfs_back_edges ();
4211 vr_values vrp_vr_values;
4213 class vrp_prop vrp_prop (&vrp_vr_values);
4214 vrp_prop.initialize (fun);
4215 vrp_prop.ssa_propagate ();
4217 /* Instantiate the folder here, so that edge cleanups happen at the
4218 end of this function. */
4219 vrp_folder folder (&vrp_vr_values);
4220 vrp_prop.finalize ();
4222 /* If we're checking array refs, we want to merge information on
4223 the executability of each edge between vrp_folder and the
4224 check_array_bounds_dom_walker: each can clear the
4225 EDGE_EXECUTABLE flag on edges, in different ways.
4227 Hence, if we're going to call check_all_array_refs, set
4228 the flag on every edge now, rather than in
4229 check_array_bounds_dom_walker's ctor; vrp_folder may clear
4230 it from some edges. */
4231 if (warn_array_bounds && warn_array_bounds_p)
4232 set_all_edges_as_executable (fun);
4234 folder.substitute_and_fold ();
4236 if (warn_array_bounds && warn_array_bounds_p)
4238 array_bounds_checker array_checker (fun, &vrp_vr_values);
4239 array_checker.check ();
4242 folder.simplify_casted_conds (fun);
4244 free_numbers_of_iterations_estimates (fun);
4246 assert_engine.remove_range_assertions ();
4248 scev_finalize ();
4249 loop_optimizer_finalize ();
4250 return 0;
4253 // This is a ranger based folder which continues to use the dominator
4254 // walk to access the substitute and fold machinery. Ranges are calculated
4255 // on demand.
4257 class rvrp_folder : public substitute_and_fold_engine
4259 public:
4261 rvrp_folder (gimple_ranger *r) : substitute_and_fold_engine (),
4262 m_simplifier (r, r->non_executable_edge_flag)
4264 m_ranger = r;
4265 m_pta = new pointer_equiv_analyzer (m_ranger);
4268 ~rvrp_folder ()
4270 delete m_pta;
4273 tree value_of_expr (tree name, gimple *s = NULL) override
4275 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4276 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4277 return NULL;
4278 tree ret = m_ranger->value_of_expr (name, s);
4279 if (!ret && supported_pointer_equiv_p (name))
4280 ret = m_pta->get_equiv (name);
4281 return ret;
4284 tree value_on_edge (edge e, tree name) override
4286 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4287 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4288 return NULL;
4289 tree ret = m_ranger->value_on_edge (e, name);
4290 if (!ret && supported_pointer_equiv_p (name))
4291 ret = m_pta->get_equiv (name);
4292 return ret;
4295 tree value_of_stmt (gimple *s, tree name = NULL) override
4297 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4298 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4299 return NULL;
4300 return m_ranger->value_of_stmt (s, name);
4303 void pre_fold_bb (basic_block bb) override
4305 m_pta->enter (bb);
4306 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
4307 gsi_next (&gsi))
4308 m_ranger->register_inferred_ranges (gsi.phi ());
4311 void post_fold_bb (basic_block bb) override
4313 m_pta->leave (bb);
4316 void pre_fold_stmt (gimple *stmt) override
4318 m_pta->visit_stmt (stmt);
4321 bool fold_stmt (gimple_stmt_iterator *gsi) override
4323 bool ret = m_simplifier.simplify (gsi);
4324 if (!ret)
4325 ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
4326 m_ranger->register_inferred_ranges (gsi_stmt (*gsi));
4327 return ret;
4330 private:
4331 DISABLE_COPY_AND_ASSIGN (rvrp_folder);
4332 gimple_ranger *m_ranger;
4333 simplify_using_ranges m_simplifier;
4334 pointer_equiv_analyzer *m_pta;
4337 /* Main entry point for a VRP pass using just ranger. This can be called
4338 from anywhere to perform a VRP pass, including from EVRP. */
4340 unsigned int
4341 execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p)
4343 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4344 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4345 scev_initialize ();
4346 calculate_dominance_info (CDI_DOMINATORS);
4348 set_all_edges_as_executable (fun);
4349 gimple_ranger *ranger = enable_ranger (fun, false);
4350 rvrp_folder folder (ranger);
4351 folder.substitute_and_fold ();
4352 if (dump_file && (dump_flags & TDF_DETAILS))
4353 ranger->dump (dump_file);
4355 if (warn_array_bounds && warn_array_bounds_p)
4357 // Set all edges as executable, except those ranger says aren't.
4358 int non_exec_flag = ranger->non_executable_edge_flag;
4359 basic_block bb;
4360 FOR_ALL_BB_FN (bb, fun)
4362 edge_iterator ei;
4363 edge e;
4364 FOR_EACH_EDGE (e, ei, bb->succs)
4365 if (e->flags & non_exec_flag)
4366 e->flags &= ~EDGE_EXECUTABLE;
4367 else
4368 e->flags |= EDGE_EXECUTABLE;
4370 scev_reset ();
4371 array_bounds_checker array_checker (fun, ranger);
4372 array_checker.check ();
4375 disable_ranger (fun);
4376 scev_finalize ();
4377 loop_optimizer_finalize ();
4378 return 0;
4381 namespace {
4383 const pass_data pass_data_vrp =
4385 GIMPLE_PASS, /* type */
4386 "vrp", /* name */
4387 OPTGROUP_NONE, /* optinfo_flags */
4388 TV_TREE_VRP, /* tv_id */
4389 PROP_ssa, /* properties_required */
4390 0, /* properties_provided */
4391 0, /* properties_destroyed */
4392 0, /* todo_flags_start */
4393 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4396 const pass_data pass_data_early_vrp =
4398 GIMPLE_PASS, /* type */
4399 "evrp", /* name */
4400 OPTGROUP_NONE, /* optinfo_flags */
4401 TV_TREE_EARLY_VRP, /* tv_id */
4402 PROP_ssa, /* properties_required */
4403 0, /* properties_provided */
4404 0, /* properties_destroyed */
4405 0, /* todo_flags_start */
4406 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
4409 static int vrp_pass_num = 0;
4410 class pass_vrp : public gimple_opt_pass
4412 public:
4413 pass_vrp (gcc::context *ctxt, const pass_data &data_)
4414 : gimple_opt_pass (data_, ctxt), data (data_), warn_array_bounds_p (false),
4415 my_pass (vrp_pass_num++)
4418 /* opt_pass methods: */
4419 opt_pass * clone () final override { return new pass_vrp (m_ctxt, data); }
4420 void set_pass_param (unsigned int n, bool param) final override
4422 gcc_assert (n == 0);
4423 warn_array_bounds_p = param;
4425 bool gate (function *) final override { return flag_tree_vrp != 0; }
4426 unsigned int execute (function *fun) final override
4428 // Early VRP pass.
4429 if (my_pass == 0)
4430 return execute_ranger_vrp (fun, /*warn_array_bounds_p=*/false);
4432 if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
4433 || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
4434 return execute_ranger_vrp (fun, warn_array_bounds_p);
4435 return execute_vrp (fun, warn_array_bounds_p);
4438 private:
4439 const pass_data &data;
4440 bool warn_array_bounds_p;
4441 int my_pass;
4442 }; // class pass_vrp
4444 } // anon namespace
4446 gimple_opt_pass *
4447 make_pass_vrp (gcc::context *ctxt)
4449 return new pass_vrp (ctxt, pass_data_vrp);
4452 gimple_opt_pass *
4453 make_pass_early_vrp (gcc::context *ctxt)
4455 return new pass_vrp (ctxt, pass_data_early_vrp);