Daily bump.
[official-gcc.git] / gcc / tree-vrp.c
blob79a29bf0efb7f261a248633fe2c753627ef04939
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-general.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 #include "params.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
66 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
68 /* Allocation pools for tree-vrp allocations. */
69 static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges");
70 static bitmap_obstack vrp_equiv_obstack;
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
76 /* Return true if the SSA name NAME is live on the edge E. */
78 static bool
79 live_on_edge (edge e, tree name)
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
85 /* Local functions. */
86 static int compare_values (tree val1, tree val2);
87 static int compare_values_warnv (tree val1, tree val2, bool *);
88 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
89 tree, tree, bool, bool *,
90 bool *);
92 struct assert_info
94 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
95 enum tree_code comp_code;
97 /* Name to register the assert for. */
98 tree name;
100 /* Value being compared against. */
101 tree val;
103 /* Expression to compare. */
104 tree expr;
107 /* Location information for ASSERT_EXPRs. Each instance of this
108 structure describes an ASSERT_EXPR for an SSA name. Since a single
109 SSA name may have more than one assertion associated with it, these
110 locations are kept in a linked list attached to the corresponding
111 SSA name. */
112 struct assert_locus
114 /* Basic block where the assertion would be inserted. */
115 basic_block bb;
117 /* Some assertions need to be inserted on an edge (e.g., assertions
118 generated by COND_EXPRs). In those cases, BB will be NULL. */
119 edge e;
121 /* Pointer to the statement that generated this assertion. */
122 gimple_stmt_iterator si;
124 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
125 enum tree_code comp_code;
127 /* Value being compared against. */
128 tree val;
130 /* Expression to compare. */
131 tree expr;
133 /* Next node in the linked list. */
134 assert_locus *next;
137 /* If bit I is present, it means that SSA name N_i has a list of
138 assertions that should be inserted in the IL. */
139 static bitmap need_assert_for;
141 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
142 holds a list of ASSERT_LOCUS_T nodes that describe where
143 ASSERT_EXPRs for SSA name N_I should be inserted. */
144 static assert_locus **asserts_for;
146 /* Value range array. After propagation, VR_VALUE[I] holds the range
147 of values that SSA name N_I may take. */
148 static unsigned num_vr_values;
149 static value_range **vr_value;
150 static bool values_propagated;
152 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
153 number of executable edges we saw the last time we visited the
154 node. */
155 static int *vr_phi_edge_counts;
157 struct switch_update {
158 gswitch *stmt;
159 tree vec;
162 static vec<edge> to_remove_edges;
163 static vec<switch_update> to_update_switch_stmts;
166 /* Return the maximum value for TYPE. */
168 static inline tree
169 vrp_val_max (const_tree type)
171 if (!INTEGRAL_TYPE_P (type))
172 return NULL_TREE;
174 return TYPE_MAX_VALUE (type);
177 /* Return the minimum value for TYPE. */
179 static inline tree
180 vrp_val_min (const_tree type)
182 if (!INTEGRAL_TYPE_P (type))
183 return NULL_TREE;
185 return TYPE_MIN_VALUE (type);
188 /* Return whether VAL is equal to the maximum value of its type.
189 We can't do a simple equality comparison with TYPE_MAX_VALUE because
190 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
191 is not == to the integer constant with the same value in the type. */
193 static inline bool
194 vrp_val_is_max (const_tree val)
196 tree type_max = vrp_val_max (TREE_TYPE (val));
197 return (val == type_max
198 || (type_max != NULL_TREE
199 && operand_equal_p (val, type_max, 0)));
202 /* Return whether VAL is equal to the minimum value of its type. */
204 static inline bool
205 vrp_val_is_min (const_tree val)
207 tree type_min = vrp_val_min (TREE_TYPE (val));
208 return (val == type_min
209 || (type_min != NULL_TREE
210 && operand_equal_p (val, type_min, 0)));
214 /* Set value range VR to VR_UNDEFINED. */
216 static inline void
217 set_value_range_to_undefined (value_range *vr)
219 vr->type = VR_UNDEFINED;
220 vr->min = vr->max = NULL_TREE;
221 if (vr->equiv)
222 bitmap_clear (vr->equiv);
226 /* Set value range VR to VR_VARYING. */
228 static inline void
229 set_value_range_to_varying (value_range *vr)
231 vr->type = VR_VARYING;
232 vr->min = vr->max = NULL_TREE;
233 if (vr->equiv)
234 bitmap_clear (vr->equiv);
238 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
240 static void
241 set_value_range (value_range *vr, enum value_range_type t, tree min,
242 tree max, bitmap equiv)
244 /* Check the validity of the range. */
245 if (flag_checking
246 && (t == VR_RANGE || t == VR_ANTI_RANGE))
248 int cmp;
250 gcc_assert (min && max);
252 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
254 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
255 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
257 cmp = compare_values (min, max);
258 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
261 if (flag_checking
262 && (t == VR_UNDEFINED || t == VR_VARYING))
264 gcc_assert (min == NULL_TREE && max == NULL_TREE);
265 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
268 vr->type = t;
269 vr->min = min;
270 vr->max = max;
272 /* Since updating the equivalence set involves deep copying the
273 bitmaps, only do it if absolutely necessary. */
274 if (vr->equiv == NULL
275 && equiv != NULL)
276 vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
278 if (equiv != vr->equiv)
280 if (equiv && !bitmap_empty_p (equiv))
281 bitmap_copy (vr->equiv, equiv);
282 else
283 bitmap_clear (vr->equiv);
288 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
289 This means adjusting T, MIN and MAX representing the case of a
290 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
291 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
292 In corner cases where MAX+1 or MIN-1 wraps this will fall back
293 to varying.
294 This routine exists to ease canonicalization in the case where we
295 extract ranges from var + CST op limit. */
297 static void
298 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
299 tree min, tree max, bitmap equiv)
301 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
302 if (t == VR_UNDEFINED)
304 set_value_range_to_undefined (vr);
305 return;
307 else if (t == VR_VARYING)
309 set_value_range_to_varying (vr);
310 return;
313 /* Nothing to canonicalize for symbolic ranges. */
314 if (TREE_CODE (min) != INTEGER_CST
315 || TREE_CODE (max) != INTEGER_CST)
317 set_value_range (vr, t, min, max, equiv);
318 return;
321 /* Wrong order for min and max, to swap them and the VR type we need
322 to adjust them. */
323 if (tree_int_cst_lt (max, min))
325 tree one, tmp;
327 /* For one bit precision if max < min, then the swapped
328 range covers all values, so for VR_RANGE it is varying and
329 for VR_ANTI_RANGE empty range, so drop to varying as well. */
330 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
332 set_value_range_to_varying (vr);
333 return;
336 one = build_int_cst (TREE_TYPE (min), 1);
337 tmp = int_const_binop (PLUS_EXPR, max, one);
338 max = int_const_binop (MINUS_EXPR, min, one);
339 min = tmp;
341 /* There's one corner case, if we had [C+1, C] before we now have
342 that again. But this represents an empty value range, so drop
343 to varying in this case. */
344 if (tree_int_cst_lt (max, min))
346 set_value_range_to_varying (vr);
347 return;
350 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
353 /* Anti-ranges that can be represented as ranges should be so. */
354 if (t == VR_ANTI_RANGE)
356 bool is_min = vrp_val_is_min (min);
357 bool is_max = vrp_val_is_max (max);
359 if (is_min && is_max)
361 /* We cannot deal with empty ranges, drop to varying.
362 ??? This could be VR_UNDEFINED instead. */
363 set_value_range_to_varying (vr);
364 return;
366 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
367 && (is_min || is_max))
369 /* Non-empty boolean ranges can always be represented
370 as a singleton range. */
371 if (is_min)
372 min = max = vrp_val_max (TREE_TYPE (min));
373 else
374 min = max = vrp_val_min (TREE_TYPE (min));
375 t = VR_RANGE;
377 else if (is_min
378 /* As a special exception preserve non-null ranges. */
379 && !(TYPE_UNSIGNED (TREE_TYPE (min))
380 && integer_zerop (max)))
382 tree one = build_int_cst (TREE_TYPE (max), 1);
383 min = int_const_binop (PLUS_EXPR, max, one);
384 max = vrp_val_max (TREE_TYPE (max));
385 t = VR_RANGE;
387 else if (is_max)
389 tree one = build_int_cst (TREE_TYPE (min), 1);
390 max = int_const_binop (MINUS_EXPR, min, one);
391 min = vrp_val_min (TREE_TYPE (min));
392 t = VR_RANGE;
396 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
397 to make sure VRP iteration terminates, otherwise we can get into
398 oscillations. */
400 set_value_range (vr, t, min, max, equiv);
403 /* Copy value range FROM into value range TO. */
405 static inline void
406 copy_value_range (value_range *to, value_range *from)
408 set_value_range (to, from->type, from->min, from->max, from->equiv);
411 /* Set value range VR to a single value. This function is only called
412 with values we get from statements, and exists to clear the
413 TREE_OVERFLOW flag. */
415 static inline void
416 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
418 gcc_assert (is_gimple_min_invariant (val));
419 if (TREE_OVERFLOW_P (val))
420 val = drop_tree_overflow (val);
421 set_value_range (vr, VR_RANGE, val, val, equiv);
424 /* Set value range VR to a non-negative range of type TYPE. */
426 static inline void
427 set_value_range_to_nonnegative (value_range *vr, tree type)
429 tree zero = build_int_cst (type, 0);
430 set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
433 /* Set value range VR to a non-NULL range of type TYPE. */
435 static inline void
436 set_value_range_to_nonnull (value_range *vr, tree type)
438 tree zero = build_int_cst (type, 0);
439 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
443 /* Set value range VR to a NULL range of type TYPE. */
445 static inline void
446 set_value_range_to_null (value_range *vr, tree type)
448 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
452 /* Set value range VR to a range of a truthvalue of type TYPE. */
454 static inline void
455 set_value_range_to_truthvalue (value_range *vr, tree type)
457 if (TYPE_PRECISION (type) == 1)
458 set_value_range_to_varying (vr);
459 else
460 set_value_range (vr, VR_RANGE,
461 build_int_cst (type, 0), build_int_cst (type, 1),
462 vr->equiv);
466 /* If abs (min) < abs (max), set VR to [-max, max], if
467 abs (min) >= abs (max), set VR to [-min, min]. */
469 static void
470 abs_extent_range (value_range *vr, tree min, tree max)
472 int cmp;
474 gcc_assert (TREE_CODE (min) == INTEGER_CST);
475 gcc_assert (TREE_CODE (max) == INTEGER_CST);
476 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
477 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
478 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
479 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
480 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
482 set_value_range_to_varying (vr);
483 return;
485 cmp = compare_values (min, max);
486 if (cmp == -1)
487 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
488 else if (cmp == 0 || cmp == 1)
490 max = min;
491 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
493 else
495 set_value_range_to_varying (vr);
496 return;
498 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
502 /* Return value range information for VAR.
504 If we have no values ranges recorded (ie, VRP is not running), then
505 return NULL. Otherwise create an empty range if none existed for VAR. */
507 static value_range *
508 get_value_range (const_tree var)
510 static const value_range vr_const_varying
511 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
512 value_range *vr;
513 tree sym;
514 unsigned ver = SSA_NAME_VERSION (var);
516 /* If we have no recorded ranges, then return NULL. */
517 if (! vr_value)
518 return NULL;
520 /* If we query the range for a new SSA name return an unmodifiable VARYING.
521 We should get here at most from the substitute-and-fold stage which
522 will never try to change values. */
523 if (ver >= num_vr_values)
524 return CONST_CAST (value_range *, &vr_const_varying);
526 vr = vr_value[ver];
527 if (vr)
528 return vr;
530 /* After propagation finished do not allocate new value-ranges. */
531 if (values_propagated)
532 return CONST_CAST (value_range *, &vr_const_varying);
534 /* Create a default value range. */
535 vr_value[ver] = vr = vrp_value_range_pool.allocate ();
536 memset (vr, 0, sizeof (*vr));
538 /* Defer allocating the equivalence set. */
539 vr->equiv = NULL;
541 /* If VAR is a default definition of a parameter, the variable can
542 take any value in VAR's type. */
543 if (SSA_NAME_IS_DEFAULT_DEF (var))
545 sym = SSA_NAME_VAR (var);
546 if (TREE_CODE (sym) == PARM_DECL)
548 /* Try to use the "nonnull" attribute to create ~[0, 0]
549 anti-ranges for pointers. Note that this is only valid with
550 default definitions of PARM_DECLs. */
551 if (POINTER_TYPE_P (TREE_TYPE (sym))
552 && (nonnull_arg_p (sym)
553 || get_ptr_nonnull (var)))
554 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
555 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
557 wide_int min, max;
558 value_range_type rtype = get_range_info (var, &min, &max);
559 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
560 set_value_range (vr, rtype,
561 wide_int_to_tree (TREE_TYPE (var), min),
562 wide_int_to_tree (TREE_TYPE (var), max),
563 NULL);
564 else
565 set_value_range_to_varying (vr);
567 else
568 set_value_range_to_varying (vr);
570 else if (TREE_CODE (sym) == RESULT_DECL
571 && DECL_BY_REFERENCE (sym))
572 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
575 return vr;
578 /* Set value-ranges of all SSA names defined by STMT to varying. */
580 static void
581 set_defs_to_varying (gimple *stmt)
583 ssa_op_iter i;
584 tree def;
585 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
587 value_range *vr = get_value_range (def);
588 /* Avoid writing to vr_const_varying get_value_range may return. */
589 if (vr->type != VR_VARYING)
590 set_value_range_to_varying (vr);
595 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
597 static inline bool
598 vrp_operand_equal_p (const_tree val1, const_tree val2)
600 if (val1 == val2)
601 return true;
602 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
603 return false;
604 return true;
607 /* Return true, if the bitmaps B1 and B2 are equal. */
609 static inline bool
610 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
612 return (b1 == b2
613 || ((!b1 || bitmap_empty_p (b1))
614 && (!b2 || bitmap_empty_p (b2)))
615 || (b1 && b2
616 && bitmap_equal_p (b1, b2)));
619 /* Update the value range and equivalence set for variable VAR to
620 NEW_VR. Return true if NEW_VR is different from VAR's previous
621 value.
623 NOTE: This function assumes that NEW_VR is a temporary value range
624 object created for the sole purpose of updating VAR's range. The
625 storage used by the equivalence set from NEW_VR will be freed by
626 this function. Do not call update_value_range when NEW_VR
627 is the range object associated with another SSA name. */
629 static inline bool
630 update_value_range (const_tree var, value_range *new_vr)
632 value_range *old_vr;
633 bool is_new;
635 /* If there is a value-range on the SSA name from earlier analysis
636 factor that in. */
637 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
639 wide_int min, max;
640 value_range_type rtype = get_range_info (var, &min, &max);
641 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
643 tree nr_min, nr_max;
644 nr_min = wide_int_to_tree (TREE_TYPE (var), min);
645 nr_max = wide_int_to_tree (TREE_TYPE (var), max);
646 value_range nr = VR_INITIALIZER;
647 set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
648 vrp_intersect_ranges (new_vr, &nr);
652 /* Update the value range, if necessary. */
653 old_vr = get_value_range (var);
654 is_new = old_vr->type != new_vr->type
655 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
656 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
657 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
659 if (is_new)
661 /* Do not allow transitions up the lattice. The following
662 is slightly more awkward than just new_vr->type < old_vr->type
663 because VR_RANGE and VR_ANTI_RANGE need to be considered
664 the same. We may not have is_new when transitioning to
665 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
666 called. */
667 if (new_vr->type == VR_UNDEFINED)
669 BITMAP_FREE (new_vr->equiv);
670 set_value_range_to_varying (old_vr);
671 set_value_range_to_varying (new_vr);
672 return true;
674 else
675 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
676 new_vr->equiv);
679 BITMAP_FREE (new_vr->equiv);
681 return is_new;
685 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
686 point where equivalence processing can be turned on/off. */
688 static void
689 add_equivalence (bitmap *equiv, const_tree var)
691 unsigned ver = SSA_NAME_VERSION (var);
692 value_range *vr = get_value_range (var);
694 if (*equiv == NULL)
695 *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
696 bitmap_set_bit (*equiv, ver);
697 if (vr && vr->equiv)
698 bitmap_ior_into (*equiv, vr->equiv);
702 /* Return true if VR is ~[0, 0]. */
704 static inline bool
705 range_is_nonnull (value_range *vr)
707 return vr->type == VR_ANTI_RANGE
708 && integer_zerop (vr->min)
709 && integer_zerop (vr->max);
713 /* Return true if VR is [0, 0]. */
715 static inline bool
716 range_is_null (value_range *vr)
718 return vr->type == VR_RANGE
719 && integer_zerop (vr->min)
720 && integer_zerop (vr->max);
723 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
724 a singleton. */
726 static inline bool
727 range_int_cst_p (value_range *vr)
729 return (vr->type == VR_RANGE
730 && TREE_CODE (vr->max) == INTEGER_CST
731 && TREE_CODE (vr->min) == INTEGER_CST);
734 /* Return true if VR is a INTEGER_CST singleton. */
736 static inline bool
737 range_int_cst_singleton_p (value_range *vr)
739 return (range_int_cst_p (vr)
740 && tree_int_cst_equal (vr->min, vr->max));
743 /* Return true if value range VR involves at least one symbol. */
745 static inline bool
746 symbolic_range_p (value_range *vr)
748 return (!is_gimple_min_invariant (vr->min)
749 || !is_gimple_min_invariant (vr->max));
752 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
753 otherwise. We only handle additive operations and set NEG to true if the
754 symbol is negated and INV to the invariant part, if any. */
756 static tree
757 get_single_symbol (tree t, bool *neg, tree *inv)
759 bool neg_;
760 tree inv_;
762 *inv = NULL_TREE;
763 *neg = false;
765 if (TREE_CODE (t) == PLUS_EXPR
766 || TREE_CODE (t) == POINTER_PLUS_EXPR
767 || TREE_CODE (t) == MINUS_EXPR)
769 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
771 neg_ = (TREE_CODE (t) == MINUS_EXPR);
772 inv_ = TREE_OPERAND (t, 0);
773 t = TREE_OPERAND (t, 1);
775 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
777 neg_ = false;
778 inv_ = TREE_OPERAND (t, 1);
779 t = TREE_OPERAND (t, 0);
781 else
782 return NULL_TREE;
784 else
786 neg_ = false;
787 inv_ = NULL_TREE;
790 if (TREE_CODE (t) == NEGATE_EXPR)
792 t = TREE_OPERAND (t, 0);
793 neg_ = !neg_;
796 if (TREE_CODE (t) != SSA_NAME)
797 return NULL_TREE;
799 if (inv_ && TREE_OVERFLOW_P (inv_))
800 inv_ = drop_tree_overflow (inv_);
802 *neg = neg_;
803 *inv = inv_;
804 return t;
807 /* The reverse operation: build a symbolic expression with TYPE
808 from symbol SYM, negated according to NEG, and invariant INV. */
810 static tree
811 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
813 const bool pointer_p = POINTER_TYPE_P (type);
814 tree t = sym;
816 if (neg)
817 t = build1 (NEGATE_EXPR, type, t);
819 if (integer_zerop (inv))
820 return t;
822 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
825 /* Return true if value range VR involves exactly one symbol SYM. */
827 static bool
828 symbolic_range_based_on_p (value_range *vr, const_tree sym)
830 bool neg, min_has_symbol, max_has_symbol;
831 tree inv;
833 if (is_gimple_min_invariant (vr->min))
834 min_has_symbol = false;
835 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
836 min_has_symbol = true;
837 else
838 return false;
840 if (is_gimple_min_invariant (vr->max))
841 max_has_symbol = false;
842 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
843 max_has_symbol = true;
844 else
845 return false;
847 return (min_has_symbol || max_has_symbol);
850 /* Return true if the result of assignment STMT is know to be non-zero. */
852 static bool
853 gimple_assign_nonzero_p (gimple *stmt)
855 enum tree_code code = gimple_assign_rhs_code (stmt);
856 bool strict_overflow_p;
857 switch (get_gimple_rhs_class (code))
859 case GIMPLE_UNARY_RHS:
860 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
861 gimple_expr_type (stmt),
862 gimple_assign_rhs1 (stmt),
863 &strict_overflow_p);
864 case GIMPLE_BINARY_RHS:
865 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
866 gimple_expr_type (stmt),
867 gimple_assign_rhs1 (stmt),
868 gimple_assign_rhs2 (stmt),
869 &strict_overflow_p);
870 case GIMPLE_TERNARY_RHS:
871 return false;
872 case GIMPLE_SINGLE_RHS:
873 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
874 &strict_overflow_p);
875 case GIMPLE_INVALID_RHS:
876 gcc_unreachable ();
877 default:
878 gcc_unreachable ();
882 /* Return true if STMT is known to compute a non-zero value. */
884 static bool
885 gimple_stmt_nonzero_p (gimple *stmt)
887 switch (gimple_code (stmt))
889 case GIMPLE_ASSIGN:
890 return gimple_assign_nonzero_p (stmt);
891 case GIMPLE_CALL:
893 tree fndecl = gimple_call_fndecl (stmt);
894 if (!fndecl) return false;
895 if (flag_delete_null_pointer_checks && !flag_check_new
896 && DECL_IS_OPERATOR_NEW (fndecl)
897 && !TREE_NOTHROW (fndecl))
898 return true;
899 /* References are always non-NULL. */
900 if (flag_delete_null_pointer_checks
901 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
902 return true;
903 if (flag_delete_null_pointer_checks &&
904 lookup_attribute ("returns_nonnull",
905 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
906 return true;
908 gcall *call_stmt = as_a<gcall *> (stmt);
909 unsigned rf = gimple_call_return_flags (call_stmt);
910 if (rf & ERF_RETURNS_ARG)
912 unsigned argnum = rf & ERF_RETURN_ARG_MASK;
913 if (argnum < gimple_call_num_args (call_stmt))
915 tree arg = gimple_call_arg (call_stmt, argnum);
916 if (SSA_VAR_P (arg)
917 && infer_nonnull_range_by_attribute (stmt, arg))
918 return true;
921 return gimple_alloca_call_p (stmt);
923 default:
924 gcc_unreachable ();
928 /* Like tree_expr_nonzero_p, but this function uses value ranges
929 obtained so far. */
931 static bool
932 vrp_stmt_computes_nonzero (gimple *stmt)
934 if (gimple_stmt_nonzero_p (stmt))
935 return true;
937 /* If we have an expression of the form &X->a, then the expression
938 is nonnull if X is nonnull. */
939 if (is_gimple_assign (stmt)
940 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
942 tree expr = gimple_assign_rhs1 (stmt);
943 tree base = get_base_address (TREE_OPERAND (expr, 0));
945 if (base != NULL_TREE
946 && TREE_CODE (base) == MEM_REF
947 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
949 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
950 if (range_is_nonnull (vr))
951 return true;
955 return false;
958 /* Returns true if EXPR is a valid value (as expected by compare_values) --
959 a gimple invariant, or SSA_NAME +- CST. */
961 static bool
962 valid_value_p (tree expr)
964 if (TREE_CODE (expr) == SSA_NAME)
965 return true;
967 if (TREE_CODE (expr) == PLUS_EXPR
968 || TREE_CODE (expr) == MINUS_EXPR)
969 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
970 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
972 return is_gimple_min_invariant (expr);
975 /* Return
976 1 if VAL < VAL2
977 0 if !(VAL < VAL2)
978 -2 if those are incomparable. */
979 static inline int
980 operand_less_p (tree val, tree val2)
982 /* LT is folded faster than GE and others. Inline the common case. */
983 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
984 return tree_int_cst_lt (val, val2);
985 else
987 tree tcmp;
989 fold_defer_overflow_warnings ();
991 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
993 fold_undefer_and_ignore_overflow_warnings ();
995 if (!tcmp
996 || TREE_CODE (tcmp) != INTEGER_CST)
997 return -2;
999 if (!integer_zerop (tcmp))
1000 return 1;
1003 return 0;
1006 /* Compare two values VAL1 and VAL2. Return
1008 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1009 -1 if VAL1 < VAL2,
1010 0 if VAL1 == VAL2,
1011 +1 if VAL1 > VAL2, and
1012 +2 if VAL1 != VAL2
1014 This is similar to tree_int_cst_compare but supports pointer values
1015 and values that cannot be compared at compile time.
1017 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1018 true if the return value is only valid if we assume that signed
1019 overflow is undefined. */
1021 static int
1022 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1024 if (val1 == val2)
1025 return 0;
1027 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1028 both integers. */
1029 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1030 == POINTER_TYPE_P (TREE_TYPE (val2)));
1032 /* Convert the two values into the same type. This is needed because
1033 sizetype causes sign extension even for unsigned types. */
1034 val2 = fold_convert (TREE_TYPE (val1), val2);
1035 STRIP_USELESS_TYPE_CONVERSION (val2);
1037 const bool overflow_undefined
1038 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1039 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1040 tree inv1, inv2;
1041 bool neg1, neg2;
1042 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1043 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1045 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1046 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1047 if (sym1 && sym2)
1049 /* Both values must use the same name with the same sign. */
1050 if (sym1 != sym2 || neg1 != neg2)
1051 return -2;
1053 /* [-]NAME + CST == [-]NAME + CST. */
1054 if (inv1 == inv2)
1055 return 0;
1057 /* If overflow is defined we cannot simplify more. */
1058 if (!overflow_undefined)
1059 return -2;
1061 if (strict_overflow_p != NULL
1062 /* Symbolic range building sets TREE_NO_WARNING to declare
1063 that overflow doesn't happen. */
1064 && (!inv1 || !TREE_NO_WARNING (val1))
1065 && (!inv2 || !TREE_NO_WARNING (val2)))
1066 *strict_overflow_p = true;
1068 if (!inv1)
1069 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1070 if (!inv2)
1071 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1073 return wi::cmp (inv1, inv2, TYPE_SIGN (TREE_TYPE (val1)));
1076 const bool cst1 = is_gimple_min_invariant (val1);
1077 const bool cst2 = is_gimple_min_invariant (val2);
1079 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1080 it might be possible to say something depending on the constants. */
1081 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1083 if (!overflow_undefined)
1084 return -2;
1086 if (strict_overflow_p != NULL
1087 /* Symbolic range building sets TREE_NO_WARNING to declare
1088 that overflow doesn't happen. */
1089 && (!sym1 || !TREE_NO_WARNING (val1))
1090 && (!sym2 || !TREE_NO_WARNING (val2)))
1091 *strict_overflow_p = true;
1093 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1094 tree cst = cst1 ? val1 : val2;
1095 tree inv = cst1 ? inv2 : inv1;
1097 /* Compute the difference between the constants. If it overflows or
1098 underflows, this means that we can trivially compare the NAME with
1099 it and, consequently, the two values with each other. */
1100 wide_int diff = wi::sub (cst, inv);
1101 if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
1103 const int res = wi::cmp (cst, inv, sgn);
1104 return cst1 ? res : -res;
1107 return -2;
1110 /* We cannot say anything more for non-constants. */
1111 if (!cst1 || !cst2)
1112 return -2;
1114 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1116 /* We cannot compare overflowed values. */
1117 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1118 return -2;
1120 return tree_int_cst_compare (val1, val2);
1122 else
1124 tree t;
1126 /* First see if VAL1 and VAL2 are not the same. */
1127 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1128 return 0;
1130 /* If VAL1 is a lower address than VAL2, return -1. */
1131 if (operand_less_p (val1, val2) == 1)
1132 return -1;
1134 /* If VAL1 is a higher address than VAL2, return +1. */
1135 if (operand_less_p (val2, val1) == 1)
1136 return 1;
1138 /* If VAL1 is different than VAL2, return +2.
1139 For integer constants we either have already returned -1 or 1
1140 or they are equivalent. We still might succeed in proving
1141 something about non-trivial operands. */
1142 if (TREE_CODE (val1) != INTEGER_CST
1143 || TREE_CODE (val2) != INTEGER_CST)
1145 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1146 if (t && integer_onep (t))
1147 return 2;
1150 return -2;
1154 /* Compare values like compare_values_warnv. */
1156 static int
1157 compare_values (tree val1, tree val2)
1159 bool sop;
1160 return compare_values_warnv (val1, val2, &sop);
1164 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1165 0 if VAL is not inside [MIN, MAX],
1166 -2 if we cannot tell either way.
1168 Benchmark compile/20001226-1.c compilation time after changing this
1169 function. */
1171 static inline int
1172 value_inside_range (tree val, tree min, tree max)
1174 int cmp1, cmp2;
1176 cmp1 = operand_less_p (val, min);
1177 if (cmp1 == -2)
1178 return -2;
1179 if (cmp1 == 1)
1180 return 0;
1182 cmp2 = operand_less_p (max, val);
1183 if (cmp2 == -2)
1184 return -2;
1186 return !cmp2;
1190 /* Return true if value ranges VR0 and VR1 have a non-empty
1191 intersection.
1193 Benchmark compile/20001226-1.c compilation time after changing this
1194 function.
1197 static inline bool
1198 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1200 /* The value ranges do not intersect if the maximum of the first range is
1201 less than the minimum of the second range or vice versa.
1202 When those relations are unknown, we can't do any better. */
1203 if (operand_less_p (vr0->max, vr1->min) != 0)
1204 return false;
1205 if (operand_less_p (vr1->max, vr0->min) != 0)
1206 return false;
1207 return true;
1211 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1212 include the value zero, -2 if we cannot tell. */
1214 static inline int
1215 range_includes_zero_p (tree min, tree max)
1217 tree zero = build_int_cst (TREE_TYPE (min), 0);
1218 return value_inside_range (zero, min, max);
1221 /* Return true if *VR is know to only contain nonnegative values. */
1223 static inline bool
1224 value_range_nonnegative_p (value_range *vr)
1226 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1227 which would return a useful value should be encoded as a
1228 VR_RANGE. */
1229 if (vr->type == VR_RANGE)
1231 int result = compare_values (vr->min, integer_zero_node);
1232 return (result == 0 || result == 1);
1235 return false;
1238 /* If *VR has a value rante that is a single constant value return that,
1239 otherwise return NULL_TREE. */
1241 static tree
1242 value_range_constant_singleton (value_range *vr)
1244 if (vr->type == VR_RANGE
1245 && vrp_operand_equal_p (vr->min, vr->max)
1246 && is_gimple_min_invariant (vr->min))
1247 return vr->min;
1249 return NULL_TREE;
1252 /* If OP has a value range with a single constant value return that,
1253 otherwise return NULL_TREE. This returns OP itself if OP is a
1254 constant. */
1256 static tree
1257 op_with_constant_singleton_value_range (tree op)
1259 if (is_gimple_min_invariant (op))
1260 return op;
1262 if (TREE_CODE (op) != SSA_NAME)
1263 return NULL_TREE;
1265 return value_range_constant_singleton (get_value_range (op));
1268 /* Return true if op is in a boolean [0, 1] value-range. */
1270 static bool
1271 op_with_boolean_value_range_p (tree op)
1273 value_range *vr;
1275 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1276 return true;
1278 if (integer_zerop (op)
1279 || integer_onep (op))
1280 return true;
1282 if (TREE_CODE (op) != SSA_NAME)
1283 return false;
1285 vr = get_value_range (op);
1286 return (vr->type == VR_RANGE
1287 && integer_zerop (vr->min)
1288 && integer_onep (vr->max));
1291 /* Extract value range information for VAR when (OP COND_CODE LIMIT) is
1292 true and store it in *VR_P. */
1294 static void
1295 extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code,
1296 tree op, tree limit,
1297 value_range *vr_p)
1299 tree min, max, type;
1300 value_range *limit_vr;
1301 type = TREE_TYPE (var);
1302 gcc_assert (limit != var);
1304 /* For pointer arithmetic, we only keep track of pointer equality
1305 and inequality. */
1306 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1308 set_value_range_to_varying (vr_p);
1309 return;
1312 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1313 try to use LIMIT's range to avoid creating symbolic ranges
1314 unnecessarily. */
1315 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1317 /* LIMIT's range is only interesting if it has any useful information. */
1318 if (! limit_vr
1319 || limit_vr->type == VR_UNDEFINED
1320 || limit_vr->type == VR_VARYING
1321 || (symbolic_range_p (limit_vr)
1322 && ! (limit_vr->type == VR_RANGE
1323 && (limit_vr->min == limit_vr->max
1324 || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
1325 limit_vr = NULL;
1327 /* Initially, the new range has the same set of equivalences of
1328 VAR's range. This will be revised before returning the final
1329 value. Since assertions may be chained via mutually exclusive
1330 predicates, we will need to trim the set of equivalences before
1331 we are done. */
1332 gcc_assert (vr_p->equiv == NULL);
1333 add_equivalence (&vr_p->equiv, var);
1335 /* Extract a new range based on the asserted comparison for VAR and
1336 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1337 will only use it for equality comparisons (EQ_EXPR). For any
1338 other kind of assertion, we cannot derive a range from LIMIT's
1339 anti-range that can be used to describe the new range. For
1340 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1341 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1342 no single range for x_2 that could describe LE_EXPR, so we might
1343 as well build the range [b_4, +INF] for it.
1344 One special case we handle is extracting a range from a
1345 range test encoded as (unsigned)var + CST <= limit. */
1346 if (TREE_CODE (op) == NOP_EXPR
1347 || TREE_CODE (op) == PLUS_EXPR)
1349 if (TREE_CODE (op) == PLUS_EXPR)
1351 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
1352 TREE_OPERAND (op, 1));
1353 max = int_const_binop (PLUS_EXPR, limit, min);
1354 op = TREE_OPERAND (op, 0);
1356 else
1358 min = build_int_cst (TREE_TYPE (var), 0);
1359 max = limit;
1362 /* Make sure to not set TREE_OVERFLOW on the final type
1363 conversion. We are willingly interpreting large positive
1364 unsigned values as negative signed values here. */
1365 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1366 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1368 /* We can transform a max, min range to an anti-range or
1369 vice-versa. Use set_and_canonicalize_value_range which does
1370 this for us. */
1371 if (cond_code == LE_EXPR)
1372 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1373 min, max, vr_p->equiv);
1374 else if (cond_code == GT_EXPR)
1375 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1376 min, max, vr_p->equiv);
1377 else
1378 gcc_unreachable ();
1380 else if (cond_code == EQ_EXPR)
1382 enum value_range_type range_type;
1384 if (limit_vr)
1386 range_type = limit_vr->type;
1387 min = limit_vr->min;
1388 max = limit_vr->max;
1390 else
1392 range_type = VR_RANGE;
1393 min = limit;
1394 max = limit;
1397 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1399 /* When asserting the equality VAR == LIMIT and LIMIT is another
1400 SSA name, the new range will also inherit the equivalence set
1401 from LIMIT. */
1402 if (TREE_CODE (limit) == SSA_NAME)
1403 add_equivalence (&vr_p->equiv, limit);
1405 else if (cond_code == NE_EXPR)
1407 /* As described above, when LIMIT's range is an anti-range and
1408 this assertion is an inequality (NE_EXPR), then we cannot
1409 derive anything from the anti-range. For instance, if
1410 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1411 not imply that VAR's range is [0, 0]. So, in the case of
1412 anti-ranges, we just assert the inequality using LIMIT and
1413 not its anti-range.
1415 If LIMIT_VR is a range, we can only use it to build a new
1416 anti-range if LIMIT_VR is a single-valued range. For
1417 instance, if LIMIT_VR is [0, 1], the predicate
1418 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1419 Rather, it means that for value 0 VAR should be ~[0, 0]
1420 and for value 1, VAR should be ~[1, 1]. We cannot
1421 represent these ranges.
1423 The only situation in which we can build a valid
1424 anti-range is when LIMIT_VR is a single-valued range
1425 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1426 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1427 if (limit_vr
1428 && limit_vr->type == VR_RANGE
1429 && compare_values (limit_vr->min, limit_vr->max) == 0)
1431 min = limit_vr->min;
1432 max = limit_vr->max;
1434 else
1436 /* In any other case, we cannot use LIMIT's range to build a
1437 valid anti-range. */
1438 min = max = limit;
1441 /* If MIN and MAX cover the whole range for their type, then
1442 just use the original LIMIT. */
1443 if (INTEGRAL_TYPE_P (type)
1444 && vrp_val_is_min (min)
1445 && vrp_val_is_max (max))
1446 min = max = limit;
1448 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1449 min, max, vr_p->equiv);
1451 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1453 min = TYPE_MIN_VALUE (type);
1455 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1456 max = limit;
1457 else
1459 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1460 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1461 LT_EXPR. */
1462 max = limit_vr->max;
1465 /* If the maximum value forces us to be out of bounds, simply punt.
1466 It would be pointless to try and do anything more since this
1467 all should be optimized away above us. */
1468 if (cond_code == LT_EXPR
1469 && compare_values (max, min) == 0)
1470 set_value_range_to_varying (vr_p);
1471 else
1473 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1474 if (cond_code == LT_EXPR)
1476 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1477 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1478 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1479 build_int_cst (TREE_TYPE (max), -1));
1480 else
1481 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1482 build_int_cst (TREE_TYPE (max), 1));
1483 /* Signal to compare_values_warnv this expr doesn't overflow. */
1484 if (EXPR_P (max))
1485 TREE_NO_WARNING (max) = 1;
1488 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1491 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1493 max = TYPE_MAX_VALUE (type);
1495 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1496 min = limit;
1497 else
1499 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1500 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1501 GT_EXPR. */
1502 min = limit_vr->min;
1505 /* If the minimum value forces us to be out of bounds, simply punt.
1506 It would be pointless to try and do anything more since this
1507 all should be optimized away above us. */
1508 if (cond_code == GT_EXPR
1509 && compare_values (min, max) == 0)
1510 set_value_range_to_varying (vr_p);
1511 else
1513 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1514 if (cond_code == GT_EXPR)
1516 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1517 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1518 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1519 build_int_cst (TREE_TYPE (min), -1));
1520 else
1521 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1522 build_int_cst (TREE_TYPE (min), 1));
1523 /* Signal to compare_values_warnv this expr doesn't overflow. */
1524 if (EXPR_P (min))
1525 TREE_NO_WARNING (min) = 1;
1528 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1531 else
1532 gcc_unreachable ();
1534 /* Finally intersect the new range with what we already know about var. */
1535 vrp_intersect_ranges (vr_p, get_value_range (var));
1538 /* Extract value range information from an ASSERT_EXPR EXPR and store
1539 it in *VR_P. */
1541 static void
1542 extract_range_from_assert (value_range *vr_p, tree expr)
1544 tree var = ASSERT_EXPR_VAR (expr);
1545 tree cond = ASSERT_EXPR_COND (expr);
1546 tree limit, op;
1547 enum tree_code cond_code;
1548 gcc_assert (COMPARISON_CLASS_P (cond));
1550 /* Find VAR in the ASSERT_EXPR conditional. */
1551 if (var == TREE_OPERAND (cond, 0)
1552 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1553 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1555 /* If the predicate is of the form VAR COMP LIMIT, then we just
1556 take LIMIT from the RHS and use the same comparison code. */
1557 cond_code = TREE_CODE (cond);
1558 limit = TREE_OPERAND (cond, 1);
1559 op = TREE_OPERAND (cond, 0);
1561 else
1563 /* If the predicate is of the form LIMIT COMP VAR, then we need
1564 to flip around the comparison code to create the proper range
1565 for VAR. */
1566 cond_code = swap_tree_comparison (TREE_CODE (cond));
1567 limit = TREE_OPERAND (cond, 0);
1568 op = TREE_OPERAND (cond, 1);
1570 extract_range_for_var_from_comparison_expr (var, cond_code, op,
1571 limit, vr_p);
1574 /* Extract range information from SSA name VAR and store it in VR. If
1575 VAR has an interesting range, use it. Otherwise, create the
1576 range [VAR, VAR] and return it. This is useful in situations where
1577 we may have conditionals testing values of VARYING names. For
1578 instance,
1580 x_3 = y_5;
1581 if (x_3 > y_5)
1584 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1585 always false. */
1587 static void
1588 extract_range_from_ssa_name (value_range *vr, tree var)
1590 value_range *var_vr = get_value_range (var);
1592 if (var_vr->type != VR_VARYING)
1593 copy_value_range (vr, var_vr);
1594 else
1595 set_value_range (vr, VR_RANGE, var, var, NULL);
1597 add_equivalence (&vr->equiv, var);
1601 /* Wrapper around int_const_binop. If the operation overflows and
1602 overflow is undefined, then adjust the result to be
1603 -INF or +INF depending on CODE, VAL1 and VAL2. Sets *OVERFLOW_P
1604 to whether the operation overflowed. For division by zero
1605 the result is indeterminate but *OVERFLOW_P is set. */
1607 static wide_int
1608 vrp_int_const_binop (enum tree_code code, tree val1, tree val2,
1609 bool *overflow_p)
1611 bool overflow = false;
1612 signop sign = TYPE_SIGN (TREE_TYPE (val1));
1613 wide_int res;
1615 switch (code)
1617 case RSHIFT_EXPR:
1618 case LSHIFT_EXPR:
1620 wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
1621 if (wi::neg_p (wval2))
1623 wval2 = -wval2;
1624 if (code == RSHIFT_EXPR)
1625 code = LSHIFT_EXPR;
1626 else
1627 code = RSHIFT_EXPR;
1630 if (code == RSHIFT_EXPR)
1631 /* It's unclear from the C standard whether shifts can overflow.
1632 The following code ignores overflow; perhaps a C standard
1633 interpretation ruling is needed. */
1634 res = wi::rshift (val1, wval2, sign);
1635 else
1636 res = wi::lshift (val1, wval2);
1637 break;
1640 case MULT_EXPR:
1641 res = wi::mul (val1, val2, sign, &overflow);
1642 break;
1644 case TRUNC_DIV_EXPR:
1645 case EXACT_DIV_EXPR:
1646 if (val2 == 0)
1648 *overflow_p = true;
1649 return res;
1651 else
1652 res = wi::div_trunc (val1, val2, sign, &overflow);
1653 break;
1655 case FLOOR_DIV_EXPR:
1656 if (val2 == 0)
1658 *overflow_p = true;
1659 return res;
1661 res = wi::div_floor (val1, val2, sign, &overflow);
1662 break;
1664 case CEIL_DIV_EXPR:
1665 if (val2 == 0)
1667 *overflow_p = true;
1668 return res;
1670 res = wi::div_ceil (val1, val2, sign, &overflow);
1671 break;
1673 case ROUND_DIV_EXPR:
1674 if (val2 == 0)
1676 *overflow_p = 0;
1677 return res;
1679 res = wi::div_round (val1, val2, sign, &overflow);
1680 break;
1682 default:
1683 gcc_unreachable ();
1686 *overflow_p = overflow;
1688 if (overflow
1689 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1691 /* If the operation overflowed return -INF or +INF depending
1692 on the operation and the combination of signs of the operands. */
1693 int sgn1 = tree_int_cst_sgn (val1);
1694 int sgn2 = tree_int_cst_sgn (val2);
1696 /* Notice that we only need to handle the restricted set of
1697 operations handled by extract_range_from_binary_expr.
1698 Among them, only multiplication, addition and subtraction
1699 can yield overflow without overflown operands because we
1700 are working with integral types only... except in the
1701 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1702 for division too. */
1704 /* For multiplication, the sign of the overflow is given
1705 by the comparison of the signs of the operands. */
1706 if ((code == MULT_EXPR && sgn1 == sgn2)
1707 /* For addition, the operands must be of the same sign
1708 to yield an overflow. Its sign is therefore that
1709 of one of the operands, for example the first. */
1710 || (code == PLUS_EXPR && sgn1 >= 0)
1711 /* For subtraction, operands must be of
1712 different signs to yield an overflow. Its sign is
1713 therefore that of the first operand or the opposite of
1714 that of the second operand. A first operand of 0 counts
1715 as positive here, for the corner case 0 - (-INF), which
1716 overflows, but must yield +INF. */
1717 || (code == MINUS_EXPR && sgn1 >= 0)
1718 /* For division, the only case is -INF / -1 = +INF. */
1719 || code == TRUNC_DIV_EXPR
1720 || code == FLOOR_DIV_EXPR
1721 || code == CEIL_DIV_EXPR
1722 || code == EXACT_DIV_EXPR
1723 || code == ROUND_DIV_EXPR)
1724 return wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
1725 TYPE_SIGN (TREE_TYPE (val1)));
1726 else
1727 return wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
1728 TYPE_SIGN (TREE_TYPE (val1)));
1731 return res;
1735 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1736 bitmask if some bit is unset, it means for all numbers in the range
1737 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1738 bitmask if some bit is set, it means for all numbers in the range
1739 the bit is 1, otherwise it might be 0 or 1. */
1741 static bool
1742 zero_nonzero_bits_from_vr (const tree expr_type,
1743 value_range *vr,
1744 wide_int *may_be_nonzero,
1745 wide_int *must_be_nonzero)
1747 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1748 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1749 if (!range_int_cst_p (vr))
1750 return false;
1752 if (range_int_cst_singleton_p (vr))
1754 *may_be_nonzero = vr->min;
1755 *must_be_nonzero = *may_be_nonzero;
1757 else if (tree_int_cst_sgn (vr->min) >= 0
1758 || tree_int_cst_sgn (vr->max) < 0)
1760 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1761 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1762 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1763 if (xor_mask != 0)
1765 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1766 may_be_nonzero->get_precision ());
1767 *may_be_nonzero = *may_be_nonzero | mask;
1768 *must_be_nonzero = must_be_nonzero->and_not (mask);
1772 return true;
1775 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1776 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1777 false otherwise. If *AR can be represented with a single range
1778 *VR1 will be VR_UNDEFINED. */
1780 static bool
1781 ranges_from_anti_range (value_range *ar,
1782 value_range *vr0, value_range *vr1)
1784 tree type = TREE_TYPE (ar->min);
1786 vr0->type = VR_UNDEFINED;
1787 vr1->type = VR_UNDEFINED;
1789 if (ar->type != VR_ANTI_RANGE
1790 || TREE_CODE (ar->min) != INTEGER_CST
1791 || TREE_CODE (ar->max) != INTEGER_CST
1792 || !vrp_val_min (type)
1793 || !vrp_val_max (type))
1794 return false;
1796 if (!vrp_val_is_min (ar->min))
1798 vr0->type = VR_RANGE;
1799 vr0->min = vrp_val_min (type);
1800 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
1802 if (!vrp_val_is_max (ar->max))
1804 vr1->type = VR_RANGE;
1805 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
1806 vr1->max = vrp_val_max (type);
1808 if (vr0->type == VR_UNDEFINED)
1810 *vr0 = *vr1;
1811 vr1->type = VR_UNDEFINED;
1814 return vr0->type != VR_UNDEFINED;
1817 /* Helper to extract a value-range *VR for a multiplicative operation
1818 *VR0 CODE *VR1. */
1820 static void
1821 extract_range_from_multiplicative_op_1 (value_range *vr,
1822 enum tree_code code,
1823 value_range *vr0, value_range *vr1)
1825 enum value_range_type rtype;
1826 wide_int val, min, max;
1827 bool sop;
1828 tree type;
1830 /* Multiplications, divisions and shifts are a bit tricky to handle,
1831 depending on the mix of signs we have in the two ranges, we
1832 need to operate on different values to get the minimum and
1833 maximum values for the new range. One approach is to figure
1834 out all the variations of range combinations and do the
1835 operations.
1837 However, this involves several calls to compare_values and it
1838 is pretty convoluted. It's simpler to do the 4 operations
1839 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
1840 MAX1) and then figure the smallest and largest values to form
1841 the new range. */
1842 gcc_assert (code == MULT_EXPR
1843 || code == TRUNC_DIV_EXPR
1844 || code == FLOOR_DIV_EXPR
1845 || code == CEIL_DIV_EXPR
1846 || code == EXACT_DIV_EXPR
1847 || code == ROUND_DIV_EXPR
1848 || code == RSHIFT_EXPR
1849 || code == LSHIFT_EXPR);
1850 gcc_assert ((vr0->type == VR_RANGE
1851 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
1852 && vr0->type == vr1->type);
1854 rtype = vr0->type;
1855 type = TREE_TYPE (vr0->min);
1856 signop sgn = TYPE_SIGN (type);
1858 /* Compute the 4 cross operations and their minimum and maximum value. */
1859 sop = false;
1860 val = vrp_int_const_binop (code, vr0->min, vr1->min, &sop);
1861 if (! sop)
1862 min = max = val;
1864 if (vr1->max == vr1->min)
1866 else if (! sop)
1868 val = vrp_int_const_binop (code, vr0->min, vr1->max, &sop);
1869 if (! sop)
1871 if (wi::lt_p (val, min, sgn))
1872 min = val;
1873 else if (wi::gt_p (val, max, sgn))
1874 max = val;
1878 if (vr0->max == vr0->min)
1880 else if (! sop)
1882 val = vrp_int_const_binop (code, vr0->max, vr1->min, &sop);
1883 if (! sop)
1885 if (wi::lt_p (val, min, sgn))
1886 min = val;
1887 else if (wi::gt_p (val, max, sgn))
1888 max = val;
1892 if (vr0->min == vr0->max || vr1->min == vr1->max)
1894 else if (! sop)
1896 val = vrp_int_const_binop (code, vr0->max, vr1->max, &sop);
1897 if (! sop)
1899 if (wi::lt_p (val, min, sgn))
1900 min = val;
1901 else if (wi::gt_p (val, max, sgn))
1902 max = val;
1906 /* If either operation overflowed, drop to VARYING. */
1907 if (sop)
1909 set_value_range_to_varying (vr);
1910 return;
1913 /* If the new range has its limits swapped around (MIN > MAX),
1914 then the operation caused one of them to wrap around, mark
1915 the new range VARYING. */
1916 if (wi::gt_p (min, max, sgn))
1918 set_value_range_to_varying (vr);
1919 return;
1922 /* We punt for [-INF, +INF].
1923 We learn nothing when we have INF on both sides.
1924 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1925 if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn))
1926 && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn)))
1928 set_value_range_to_varying (vr);
1929 return;
1932 set_value_range (vr, rtype,
1933 wide_int_to_tree (type, min),
1934 wide_int_to_tree (type, max), NULL);
1937 /* Extract range information from a binary operation CODE based on
1938 the ranges of each of its operands *VR0 and *VR1 with resulting
1939 type EXPR_TYPE. The resulting range is stored in *VR. */
1941 static void
1942 extract_range_from_binary_expr_1 (value_range *vr,
1943 enum tree_code code, tree expr_type,
1944 value_range *vr0_, value_range *vr1_)
1946 value_range vr0 = *vr0_, vr1 = *vr1_;
1947 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1948 enum value_range_type type;
1949 tree min = NULL_TREE, max = NULL_TREE;
1950 int cmp;
1952 if (!INTEGRAL_TYPE_P (expr_type)
1953 && !POINTER_TYPE_P (expr_type))
1955 set_value_range_to_varying (vr);
1956 return;
1959 /* Not all binary expressions can be applied to ranges in a
1960 meaningful way. Handle only arithmetic operations. */
1961 if (code != PLUS_EXPR
1962 && code != MINUS_EXPR
1963 && code != POINTER_PLUS_EXPR
1964 && code != MULT_EXPR
1965 && code != TRUNC_DIV_EXPR
1966 && code != FLOOR_DIV_EXPR
1967 && code != CEIL_DIV_EXPR
1968 && code != EXACT_DIV_EXPR
1969 && code != ROUND_DIV_EXPR
1970 && code != TRUNC_MOD_EXPR
1971 && code != RSHIFT_EXPR
1972 && code != LSHIFT_EXPR
1973 && code != MIN_EXPR
1974 && code != MAX_EXPR
1975 && code != BIT_AND_EXPR
1976 && code != BIT_IOR_EXPR
1977 && code != BIT_XOR_EXPR)
1979 set_value_range_to_varying (vr);
1980 return;
1983 /* If both ranges are UNDEFINED, so is the result. */
1984 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1986 set_value_range_to_undefined (vr);
1987 return;
1989 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1990 code. At some point we may want to special-case operations that
1991 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1992 operand. */
1993 else if (vr0.type == VR_UNDEFINED)
1994 set_value_range_to_varying (&vr0);
1995 else if (vr1.type == VR_UNDEFINED)
1996 set_value_range_to_varying (&vr1);
1998 /* We get imprecise results from ranges_from_anti_range when
1999 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
2000 range, but then we also need to hack up vrp_meet. It's just
2001 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
2002 if (code == EXACT_DIV_EXPR
2003 && vr0.type == VR_ANTI_RANGE
2004 && vr0.min == vr0.max
2005 && integer_zerop (vr0.min))
2007 set_value_range_to_nonnull (vr, expr_type);
2008 return;
2011 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2012 and express ~[] op X as ([]' op X) U ([]'' op X). */
2013 if (vr0.type == VR_ANTI_RANGE
2014 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2016 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2017 if (vrtem1.type != VR_UNDEFINED)
2019 value_range vrres = VR_INITIALIZER;
2020 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2021 &vrtem1, vr1_);
2022 vrp_meet (vr, &vrres);
2024 return;
2026 /* Likewise for X op ~[]. */
2027 if (vr1.type == VR_ANTI_RANGE
2028 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2030 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2031 if (vrtem1.type != VR_UNDEFINED)
2033 value_range vrres = VR_INITIALIZER;
2034 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2035 vr0_, &vrtem1);
2036 vrp_meet (vr, &vrres);
2038 return;
2041 /* The type of the resulting value range defaults to VR0.TYPE. */
2042 type = vr0.type;
2044 /* Refuse to operate on VARYING ranges, ranges of different kinds
2045 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2046 because we may be able to derive a useful range even if one of
2047 the operands is VR_VARYING or symbolic range. Similarly for
2048 divisions, MIN/MAX and PLUS/MINUS.
2050 TODO, we may be able to derive anti-ranges in some cases. */
2051 if (code != BIT_AND_EXPR
2052 && code != BIT_IOR_EXPR
2053 && code != TRUNC_DIV_EXPR
2054 && code != FLOOR_DIV_EXPR
2055 && code != CEIL_DIV_EXPR
2056 && code != EXACT_DIV_EXPR
2057 && code != ROUND_DIV_EXPR
2058 && code != TRUNC_MOD_EXPR
2059 && code != MIN_EXPR
2060 && code != MAX_EXPR
2061 && code != PLUS_EXPR
2062 && code != MINUS_EXPR
2063 && code != RSHIFT_EXPR
2064 && (vr0.type == VR_VARYING
2065 || vr1.type == VR_VARYING
2066 || vr0.type != vr1.type
2067 || symbolic_range_p (&vr0)
2068 || symbolic_range_p (&vr1)))
2070 set_value_range_to_varying (vr);
2071 return;
2074 /* Now evaluate the expression to determine the new range. */
2075 if (POINTER_TYPE_P (expr_type))
2077 if (code == MIN_EXPR || code == MAX_EXPR)
2079 /* For MIN/MAX expressions with pointers, we only care about
2080 nullness, if both are non null, then the result is nonnull.
2081 If both are null, then the result is null. Otherwise they
2082 are varying. */
2083 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2084 set_value_range_to_nonnull (vr, expr_type);
2085 else if (range_is_null (&vr0) && range_is_null (&vr1))
2086 set_value_range_to_null (vr, expr_type);
2087 else
2088 set_value_range_to_varying (vr);
2090 else if (code == POINTER_PLUS_EXPR)
2092 /* For pointer types, we are really only interested in asserting
2093 whether the expression evaluates to non-NULL. */
2094 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2095 set_value_range_to_nonnull (vr, expr_type);
2096 else if (range_is_null (&vr0) && range_is_null (&vr1))
2097 set_value_range_to_null (vr, expr_type);
2098 else
2099 set_value_range_to_varying (vr);
2101 else if (code == BIT_AND_EXPR)
2103 /* For pointer types, we are really only interested in asserting
2104 whether the expression evaluates to non-NULL. */
2105 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2106 set_value_range_to_nonnull (vr, expr_type);
2107 else if (range_is_null (&vr0) || range_is_null (&vr1))
2108 set_value_range_to_null (vr, expr_type);
2109 else
2110 set_value_range_to_varying (vr);
2112 else
2113 set_value_range_to_varying (vr);
2115 return;
2118 /* For integer ranges, apply the operation to each end of the
2119 range and see what we end up with. */
2120 if (code == PLUS_EXPR || code == MINUS_EXPR)
2122 const bool minus_p = (code == MINUS_EXPR);
2123 tree min_op0 = vr0.min;
2124 tree min_op1 = minus_p ? vr1.max : vr1.min;
2125 tree max_op0 = vr0.max;
2126 tree max_op1 = minus_p ? vr1.min : vr1.max;
2127 tree sym_min_op0 = NULL_TREE;
2128 tree sym_min_op1 = NULL_TREE;
2129 tree sym_max_op0 = NULL_TREE;
2130 tree sym_max_op1 = NULL_TREE;
2131 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2133 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2134 single-symbolic ranges, try to compute the precise resulting range,
2135 but only if we know that this resulting range will also be constant
2136 or single-symbolic. */
2137 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2138 && (TREE_CODE (min_op0) == INTEGER_CST
2139 || (sym_min_op0
2140 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2141 && (TREE_CODE (min_op1) == INTEGER_CST
2142 || (sym_min_op1
2143 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2144 && (!(sym_min_op0 && sym_min_op1)
2145 || (sym_min_op0 == sym_min_op1
2146 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2147 && (TREE_CODE (max_op0) == INTEGER_CST
2148 || (sym_max_op0
2149 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2150 && (TREE_CODE (max_op1) == INTEGER_CST
2151 || (sym_max_op1
2152 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2153 && (!(sym_max_op0 && sym_max_op1)
2154 || (sym_max_op0 == sym_max_op1
2155 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2157 const signop sgn = TYPE_SIGN (expr_type);
2158 const unsigned int prec = TYPE_PRECISION (expr_type);
2159 wide_int type_min, type_max, wmin, wmax;
2160 int min_ovf = 0;
2161 int max_ovf = 0;
2163 /* Get the lower and upper bounds of the type. */
2164 if (TYPE_OVERFLOW_WRAPS (expr_type))
2166 type_min = wi::min_value (prec, sgn);
2167 type_max = wi::max_value (prec, sgn);
2169 else
2171 type_min = vrp_val_min (expr_type);
2172 type_max = vrp_val_max (expr_type);
2175 /* Combine the lower bounds, if any. */
2176 if (min_op0 && min_op1)
2178 if (minus_p)
2180 wmin = wi::sub (min_op0, min_op1);
2182 /* Check for overflow. */
2183 if (wi::cmp (0, min_op1, sgn)
2184 != wi::cmp (wmin, min_op0, sgn))
2185 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2187 else
2189 wmin = wi::add (min_op0, min_op1);
2191 /* Check for overflow. */
2192 if (wi::cmp (min_op1, 0, sgn)
2193 != wi::cmp (wmin, min_op0, sgn))
2194 min_ovf = wi::cmp (min_op0, wmin, sgn);
2197 else if (min_op0)
2198 wmin = min_op0;
2199 else if (min_op1)
2201 if (minus_p)
2203 wmin = wi::neg (min_op1);
2205 /* Check for overflow. */
2206 if (sgn == SIGNED && wi::neg_p (min_op1) && wi::neg_p (wmin))
2207 min_ovf = 1;
2208 else if (sgn == UNSIGNED && wi::ne_p (min_op1, 0))
2209 min_ovf = -1;
2211 else
2212 wmin = min_op1;
2214 else
2215 wmin = wi::shwi (0, prec);
2217 /* Combine the upper bounds, if any. */
2218 if (max_op0 && max_op1)
2220 if (minus_p)
2222 wmax = wi::sub (max_op0, max_op1);
2224 /* Check for overflow. */
2225 if (wi::cmp (0, max_op1, sgn)
2226 != wi::cmp (wmax, max_op0, sgn))
2227 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2229 else
2231 wmax = wi::add (max_op0, max_op1);
2233 if (wi::cmp (max_op1, 0, sgn)
2234 != wi::cmp (wmax, max_op0, sgn))
2235 max_ovf = wi::cmp (max_op0, wmax, sgn);
2238 else if (max_op0)
2239 wmax = max_op0;
2240 else if (max_op1)
2242 if (minus_p)
2244 wmax = wi::neg (max_op1);
2246 /* Check for overflow. */
2247 if (sgn == SIGNED && wi::neg_p (max_op1) && wi::neg_p (wmax))
2248 max_ovf = 1;
2249 else if (sgn == UNSIGNED && wi::ne_p (max_op1, 0))
2250 max_ovf = -1;
2252 else
2253 wmax = max_op1;
2255 else
2256 wmax = wi::shwi (0, prec);
2258 /* Check for type overflow. */
2259 if (min_ovf == 0)
2261 if (wi::cmp (wmin, type_min, sgn) == -1)
2262 min_ovf = -1;
2263 else if (wi::cmp (wmin, type_max, sgn) == 1)
2264 min_ovf = 1;
2266 if (max_ovf == 0)
2268 if (wi::cmp (wmax, type_min, sgn) == -1)
2269 max_ovf = -1;
2270 else if (wi::cmp (wmax, type_max, sgn) == 1)
2271 max_ovf = 1;
2274 /* If we have overflow for the constant part and the resulting
2275 range will be symbolic, drop to VR_VARYING. */
2276 if ((min_ovf && sym_min_op0 != sym_min_op1)
2277 || (max_ovf && sym_max_op0 != sym_max_op1))
2279 set_value_range_to_varying (vr);
2280 return;
2283 if (TYPE_OVERFLOW_WRAPS (expr_type))
2285 /* If overflow wraps, truncate the values and adjust the
2286 range kind and bounds appropriately. */
2287 wide_int tmin = wide_int::from (wmin, prec, sgn);
2288 wide_int tmax = wide_int::from (wmax, prec, sgn);
2289 if (min_ovf == max_ovf)
2291 /* No overflow or both overflow or underflow. The
2292 range kind stays VR_RANGE. */
2293 min = wide_int_to_tree (expr_type, tmin);
2294 max = wide_int_to_tree (expr_type, tmax);
2296 else if ((min_ovf == -1 && max_ovf == 0)
2297 || (max_ovf == 1 && min_ovf == 0))
2299 /* Min underflow or max overflow. The range kind
2300 changes to VR_ANTI_RANGE. */
2301 bool covers = false;
2302 wide_int tem = tmin;
2303 type = VR_ANTI_RANGE;
2304 tmin = tmax + 1;
2305 if (wi::cmp (tmin, tmax, sgn) < 0)
2306 covers = true;
2307 tmax = tem - 1;
2308 if (wi::cmp (tmax, tem, sgn) > 0)
2309 covers = true;
2310 /* If the anti-range would cover nothing, drop to varying.
2311 Likewise if the anti-range bounds are outside of the
2312 types values. */
2313 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2315 set_value_range_to_varying (vr);
2316 return;
2318 min = wide_int_to_tree (expr_type, tmin);
2319 max = wide_int_to_tree (expr_type, tmax);
2321 else
2323 /* Other underflow and/or overflow, drop to VR_VARYING. */
2324 set_value_range_to_varying (vr);
2325 return;
2328 else
2330 /* If overflow does not wrap, saturate to the types min/max
2331 value. */
2332 if (min_ovf == -1)
2333 min = wide_int_to_tree (expr_type, type_min);
2334 else if (min_ovf == 1)
2335 min = wide_int_to_tree (expr_type, type_max);
2336 else
2337 min = wide_int_to_tree (expr_type, wmin);
2339 if (max_ovf == -1)
2340 max = wide_int_to_tree (expr_type, type_min);
2341 else if (max_ovf == 1)
2342 max = wide_int_to_tree (expr_type, type_max);
2343 else
2344 max = wide_int_to_tree (expr_type, wmax);
2347 /* If the result lower bound is constant, we're done;
2348 otherwise, build the symbolic lower bound. */
2349 if (sym_min_op0 == sym_min_op1)
2351 else if (sym_min_op0)
2352 min = build_symbolic_expr (expr_type, sym_min_op0,
2353 neg_min_op0, min);
2354 else if (sym_min_op1)
2356 /* We may not negate if that might introduce
2357 undefined overflow. */
2358 if (! minus_p
2359 || neg_min_op1
2360 || TYPE_OVERFLOW_WRAPS (expr_type))
2361 min = build_symbolic_expr (expr_type, sym_min_op1,
2362 neg_min_op1 ^ minus_p, min);
2363 else
2364 min = NULL_TREE;
2367 /* Likewise for the upper bound. */
2368 if (sym_max_op0 == sym_max_op1)
2370 else if (sym_max_op0)
2371 max = build_symbolic_expr (expr_type, sym_max_op0,
2372 neg_max_op0, max);
2373 else if (sym_max_op1)
2375 /* We may not negate if that might introduce
2376 undefined overflow. */
2377 if (! minus_p
2378 || neg_max_op1
2379 || TYPE_OVERFLOW_WRAPS (expr_type))
2380 max = build_symbolic_expr (expr_type, sym_max_op1,
2381 neg_max_op1 ^ minus_p, max);
2382 else
2383 max = NULL_TREE;
2386 else
2388 /* For other cases, for example if we have a PLUS_EXPR with two
2389 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2390 to compute a precise range for such a case.
2391 ??? General even mixed range kind operations can be expressed
2392 by for example transforming ~[3, 5] + [1, 2] to range-only
2393 operations and a union primitive:
2394 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2395 [-INF+1, 4] U [6, +INF(OVF)]
2396 though usually the union is not exactly representable with
2397 a single range or anti-range as the above is
2398 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2399 but one could use a scheme similar to equivalences for this. */
2400 set_value_range_to_varying (vr);
2401 return;
2404 else if (code == MIN_EXPR
2405 || code == MAX_EXPR)
2407 if (vr0.type == VR_RANGE
2408 && !symbolic_range_p (&vr0))
2410 type = VR_RANGE;
2411 if (vr1.type == VR_RANGE
2412 && !symbolic_range_p (&vr1))
2414 /* For operations that make the resulting range directly
2415 proportional to the original ranges, apply the operation to
2416 the same end of each range. */
2417 min = int_const_binop (code, vr0.min, vr1.min);
2418 max = int_const_binop (code, vr0.max, vr1.max);
2420 else if (code == MIN_EXPR)
2422 min = vrp_val_min (expr_type);
2423 max = vr0.max;
2425 else if (code == MAX_EXPR)
2427 min = vr0.min;
2428 max = vrp_val_max (expr_type);
2431 else if (vr1.type == VR_RANGE
2432 && !symbolic_range_p (&vr1))
2434 type = VR_RANGE;
2435 if (code == MIN_EXPR)
2437 min = vrp_val_min (expr_type);
2438 max = vr1.max;
2440 else if (code == MAX_EXPR)
2442 min = vr1.min;
2443 max = vrp_val_max (expr_type);
2446 else
2448 set_value_range_to_varying (vr);
2449 return;
2452 else if (code == MULT_EXPR)
2454 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2455 drop to varying. This test requires 2*prec bits if both
2456 operands are signed and 2*prec + 2 bits if either is not. */
2458 signop sign = TYPE_SIGN (expr_type);
2459 unsigned int prec = TYPE_PRECISION (expr_type);
2461 if (range_int_cst_p (&vr0)
2462 && range_int_cst_p (&vr1)
2463 && TYPE_OVERFLOW_WRAPS (expr_type))
2465 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2466 typedef generic_wide_int
2467 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2468 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2469 vrp_int size = sizem1 + 1;
2471 /* Extend the values using the sign of the result to PREC2.
2472 From here on out, everthing is just signed math no matter
2473 what the input types were. */
2474 vrp_int min0 = vrp_int_cst (vr0.min);
2475 vrp_int max0 = vrp_int_cst (vr0.max);
2476 vrp_int min1 = vrp_int_cst (vr1.min);
2477 vrp_int max1 = vrp_int_cst (vr1.max);
2478 /* Canonicalize the intervals. */
2479 if (sign == UNSIGNED)
2481 if (wi::ltu_p (size, min0 + max0))
2483 min0 -= size;
2484 max0 -= size;
2487 if (wi::ltu_p (size, min1 + max1))
2489 min1 -= size;
2490 max1 -= size;
2494 vrp_int prod0 = min0 * min1;
2495 vrp_int prod1 = min0 * max1;
2496 vrp_int prod2 = max0 * min1;
2497 vrp_int prod3 = max0 * max1;
2499 /* Sort the 4 products so that min is in prod0 and max is in
2500 prod3. */
2501 /* min0min1 > max0max1 */
2502 if (prod0 > prod3)
2503 std::swap (prod0, prod3);
2505 /* min0max1 > max0min1 */
2506 if (prod1 > prod2)
2507 std::swap (prod1, prod2);
2509 if (prod0 > prod1)
2510 std::swap (prod0, prod1);
2512 if (prod2 > prod3)
2513 std::swap (prod2, prod3);
2515 /* diff = max - min. */
2516 prod2 = prod3 - prod0;
2517 if (wi::geu_p (prod2, sizem1))
2519 /* the range covers all values. */
2520 set_value_range_to_varying (vr);
2521 return;
2524 /* The following should handle the wrapping and selecting
2525 VR_ANTI_RANGE for us. */
2526 min = wide_int_to_tree (expr_type, prod0);
2527 max = wide_int_to_tree (expr_type, prod3);
2528 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2529 return;
2532 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2533 drop to VR_VARYING. It would take more effort to compute a
2534 precise range for such a case. For example, if we have
2535 op0 == 65536 and op1 == 65536 with their ranges both being
2536 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2537 we cannot claim that the product is in ~[0,0]. Note that we
2538 are guaranteed to have vr0.type == vr1.type at this
2539 point. */
2540 if (vr0.type == VR_ANTI_RANGE
2541 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2543 set_value_range_to_varying (vr);
2544 return;
2547 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2548 return;
2550 else if (code == RSHIFT_EXPR
2551 || code == LSHIFT_EXPR)
2553 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2554 then drop to VR_VARYING. Outside of this range we get undefined
2555 behavior from the shift operation. We cannot even trust
2556 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2557 shifts, and the operation at the tree level may be widened. */
2558 if (range_int_cst_p (&vr1)
2559 && compare_tree_int (vr1.min, 0) >= 0
2560 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2562 if (code == RSHIFT_EXPR)
2564 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2565 useful ranges just from the shift count. E.g.
2566 x >> 63 for signed 64-bit x is always [-1, 0]. */
2567 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2569 vr0.type = type = VR_RANGE;
2570 vr0.min = vrp_val_min (expr_type);
2571 vr0.max = vrp_val_max (expr_type);
2573 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2574 return;
2576 /* We can map lshifts by constants to MULT_EXPR handling. */
2577 else if (code == LSHIFT_EXPR
2578 && range_int_cst_singleton_p (&vr1))
2580 bool saved_flag_wrapv;
2581 value_range vr1p = VR_INITIALIZER;
2582 vr1p.type = VR_RANGE;
2583 vr1p.min = (wide_int_to_tree
2584 (expr_type,
2585 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2586 TYPE_PRECISION (expr_type))));
2587 vr1p.max = vr1p.min;
2588 /* We have to use a wrapping multiply though as signed overflow
2589 on lshifts is implementation defined in C89. */
2590 saved_flag_wrapv = flag_wrapv;
2591 flag_wrapv = 1;
2592 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2593 &vr0, &vr1p);
2594 flag_wrapv = saved_flag_wrapv;
2595 return;
2597 else if (code == LSHIFT_EXPR
2598 && range_int_cst_p (&vr0))
2600 int prec = TYPE_PRECISION (expr_type);
2601 int overflow_pos = prec;
2602 int bound_shift;
2603 wide_int low_bound, high_bound;
2604 bool uns = TYPE_UNSIGNED (expr_type);
2605 bool in_bounds = false;
2607 if (!uns)
2608 overflow_pos -= 1;
2610 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2611 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2612 overflow. However, for that to happen, vr1.max needs to be
2613 zero, which means vr1 is a singleton range of zero, which
2614 means it should be handled by the previous LSHIFT_EXPR
2615 if-clause. */
2616 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2617 wide_int complement = ~(bound - 1);
2619 if (uns)
2621 low_bound = bound;
2622 high_bound = complement;
2623 if (wi::ltu_p (vr0.max, low_bound))
2625 /* [5, 6] << [1, 2] == [10, 24]. */
2626 /* We're shifting out only zeroes, the value increases
2627 monotonically. */
2628 in_bounds = true;
2630 else if (wi::ltu_p (high_bound, vr0.min))
2632 /* [0xffffff00, 0xffffffff] << [1, 2]
2633 == [0xfffffc00, 0xfffffffe]. */
2634 /* We're shifting out only ones, the value decreases
2635 monotonically. */
2636 in_bounds = true;
2639 else
2641 /* [-1, 1] << [1, 2] == [-4, 4]. */
2642 low_bound = complement;
2643 high_bound = bound;
2644 if (wi::lts_p (vr0.max, high_bound)
2645 && wi::lts_p (low_bound, vr0.min))
2647 /* For non-negative numbers, we're shifting out only
2648 zeroes, the value increases monotonically.
2649 For negative numbers, we're shifting out only ones, the
2650 value decreases monotomically. */
2651 in_bounds = true;
2655 if (in_bounds)
2657 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2658 return;
2662 set_value_range_to_varying (vr);
2663 return;
2665 else if (code == TRUNC_DIV_EXPR
2666 || code == FLOOR_DIV_EXPR
2667 || code == CEIL_DIV_EXPR
2668 || code == EXACT_DIV_EXPR
2669 || code == ROUND_DIV_EXPR)
2671 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2673 /* For division, if op1 has VR_RANGE but op0 does not, something
2674 can be deduced just from that range. Say [min, max] / [4, max]
2675 gives [min / 4, max / 4] range. */
2676 if (vr1.type == VR_RANGE
2677 && !symbolic_range_p (&vr1)
2678 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2680 vr0.type = type = VR_RANGE;
2681 vr0.min = vrp_val_min (expr_type);
2682 vr0.max = vrp_val_max (expr_type);
2684 else
2686 set_value_range_to_varying (vr);
2687 return;
2691 /* For divisions, if flag_non_call_exceptions is true, we must
2692 not eliminate a division by zero. */
2693 if (cfun->can_throw_non_call_exceptions
2694 && (vr1.type != VR_RANGE
2695 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2697 set_value_range_to_varying (vr);
2698 return;
2701 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2702 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2703 include 0. */
2704 if (vr0.type == VR_RANGE
2705 && (vr1.type != VR_RANGE
2706 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2708 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2709 int cmp;
2711 min = NULL_TREE;
2712 max = NULL_TREE;
2713 if (TYPE_UNSIGNED (expr_type)
2714 || value_range_nonnegative_p (&vr1))
2716 /* For unsigned division or when divisor is known
2717 to be non-negative, the range has to cover
2718 all numbers from 0 to max for positive max
2719 and all numbers from min to 0 for negative min. */
2720 cmp = compare_values (vr0.max, zero);
2721 if (cmp == -1)
2723 /* When vr0.max < 0, vr1.min != 0 and value
2724 ranges for dividend and divisor are available. */
2725 if (vr1.type == VR_RANGE
2726 && !symbolic_range_p (&vr0)
2727 && !symbolic_range_p (&vr1)
2728 && compare_values (vr1.min, zero) != 0)
2729 max = int_const_binop (code, vr0.max, vr1.min);
2730 else
2731 max = zero;
2733 else if (cmp == 0 || cmp == 1)
2734 max = vr0.max;
2735 else
2736 type = VR_VARYING;
2737 cmp = compare_values (vr0.min, zero);
2738 if (cmp == 1)
2740 /* For unsigned division when value ranges for dividend
2741 and divisor are available. */
2742 if (vr1.type == VR_RANGE
2743 && !symbolic_range_p (&vr0)
2744 && !symbolic_range_p (&vr1)
2745 && compare_values (vr1.max, zero) != 0)
2746 min = int_const_binop (code, vr0.min, vr1.max);
2747 else
2748 min = zero;
2750 else if (cmp == 0 || cmp == -1)
2751 min = vr0.min;
2752 else
2753 type = VR_VARYING;
2755 else
2757 /* Otherwise the range is -max .. max or min .. -min
2758 depending on which bound is bigger in absolute value,
2759 as the division can change the sign. */
2760 abs_extent_range (vr, vr0.min, vr0.max);
2761 return;
2763 if (type == VR_VARYING)
2765 set_value_range_to_varying (vr);
2766 return;
2769 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
2771 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2772 return;
2775 else if (code == TRUNC_MOD_EXPR)
2777 if (range_is_null (&vr1))
2779 set_value_range_to_undefined (vr);
2780 return;
2782 /* ABS (A % B) < ABS (B) and either
2783 0 <= A % B <= A or A <= A % B <= 0. */
2784 type = VR_RANGE;
2785 signop sgn = TYPE_SIGN (expr_type);
2786 unsigned int prec = TYPE_PRECISION (expr_type);
2787 wide_int wmin, wmax, tmp;
2788 wide_int zero = wi::zero (prec);
2789 wide_int one = wi::one (prec);
2790 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2792 wmax = wi::sub (vr1.max, one);
2793 if (sgn == SIGNED)
2795 tmp = wi::sub (wi::minus_one (prec), vr1.min);
2796 wmax = wi::smax (wmax, tmp);
2799 else
2801 wmax = wi::max_value (prec, sgn);
2802 /* X % INT_MIN may be INT_MAX. */
2803 if (sgn == UNSIGNED)
2804 wmax = wmax - one;
2807 if (sgn == UNSIGNED)
2808 wmin = zero;
2809 else
2811 wmin = -wmax;
2812 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
2814 tmp = vr0.min;
2815 if (wi::gts_p (tmp, zero))
2816 tmp = zero;
2817 wmin = wi::smax (wmin, tmp);
2821 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
2823 tmp = vr0.max;
2824 if (sgn == SIGNED && wi::neg_p (tmp))
2825 tmp = zero;
2826 wmax = wi::min (wmax, tmp, sgn);
2829 min = wide_int_to_tree (expr_type, wmin);
2830 max = wide_int_to_tree (expr_type, wmax);
2832 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2834 bool int_cst_range0, int_cst_range1;
2835 wide_int may_be_nonzero0, may_be_nonzero1;
2836 wide_int must_be_nonzero0, must_be_nonzero1;
2838 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
2839 &may_be_nonzero0,
2840 &must_be_nonzero0);
2841 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
2842 &may_be_nonzero1,
2843 &must_be_nonzero1);
2845 if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2847 value_range *vr0p = NULL, *vr1p = NULL;
2848 if (range_int_cst_singleton_p (&vr1))
2850 vr0p = &vr0;
2851 vr1p = &vr1;
2853 else if (range_int_cst_singleton_p (&vr0))
2855 vr0p = &vr1;
2856 vr1p = &vr0;
2858 /* For op & or | attempt to optimize:
2859 [x, y] op z into [x op z, y op z]
2860 if z is a constant which (for op | its bitwise not) has n
2861 consecutive least significant bits cleared followed by m 1
2862 consecutive bits set immediately above it and either
2863 m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2864 The least significant n bits of all the values in the range are
2865 cleared or set, the m bits above it are preserved and any bits
2866 above these are required to be the same for all values in the
2867 range. */
2868 if (vr0p && range_int_cst_p (vr0p))
2870 wide_int w = vr1p->min;
2871 int m = 0, n = 0;
2872 if (code == BIT_IOR_EXPR)
2873 w = ~w;
2874 if (wi::eq_p (w, 0))
2875 n = TYPE_PRECISION (expr_type);
2876 else
2878 n = wi::ctz (w);
2879 w = ~(w | wi::mask (n, false, w.get_precision ()));
2880 if (wi::eq_p (w, 0))
2881 m = TYPE_PRECISION (expr_type) - n;
2882 else
2883 m = wi::ctz (w) - n;
2885 wide_int mask = wi::mask (m + n, true, w.get_precision ());
2886 if (wi::eq_p (mask & vr0p->min, mask & vr0p->max))
2888 min = int_const_binop (code, vr0p->min, vr1p->min);
2889 max = int_const_binop (code, vr0p->max, vr1p->min);
2894 type = VR_RANGE;
2895 if (min && max)
2896 /* Optimized above already. */;
2897 else if (code == BIT_AND_EXPR)
2899 min = wide_int_to_tree (expr_type,
2900 must_be_nonzero0 & must_be_nonzero1);
2901 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
2902 /* If both input ranges contain only negative values we can
2903 truncate the result range maximum to the minimum of the
2904 input range maxima. */
2905 if (int_cst_range0 && int_cst_range1
2906 && tree_int_cst_sgn (vr0.max) < 0
2907 && tree_int_cst_sgn (vr1.max) < 0)
2909 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
2910 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
2912 /* If either input range contains only non-negative values
2913 we can truncate the result range maximum to the respective
2914 maximum of the input range. */
2915 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2916 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
2917 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2918 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
2919 max = wide_int_to_tree (expr_type, wmax);
2920 cmp = compare_values (min, max);
2921 /* PR68217: In case of signed & sign-bit-CST should
2922 result in [-INF, 0] instead of [-INF, INF]. */
2923 if (cmp == -2 || cmp == 1)
2925 wide_int sign_bit
2926 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
2927 TYPE_PRECISION (expr_type));
2928 if (!TYPE_UNSIGNED (expr_type)
2929 && ((value_range_constant_singleton (&vr0)
2930 && !wi::cmps (vr0.min, sign_bit))
2931 || (value_range_constant_singleton (&vr1)
2932 && !wi::cmps (vr1.min, sign_bit))))
2934 min = TYPE_MIN_VALUE (expr_type);
2935 max = build_int_cst (expr_type, 0);
2939 else if (code == BIT_IOR_EXPR)
2941 max = wide_int_to_tree (expr_type,
2942 may_be_nonzero0 | may_be_nonzero1);
2943 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
2944 /* If the input ranges contain only positive values we can
2945 truncate the minimum of the result range to the maximum
2946 of the input range minima. */
2947 if (int_cst_range0 && int_cst_range1
2948 && tree_int_cst_sgn (vr0.min) >= 0
2949 && tree_int_cst_sgn (vr1.min) >= 0)
2951 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
2952 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
2954 /* If either input range contains only negative values
2955 we can truncate the minimum of the result range to the
2956 respective minimum range. */
2957 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2958 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
2959 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2960 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
2961 min = wide_int_to_tree (expr_type, wmin);
2963 else if (code == BIT_XOR_EXPR)
2965 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
2966 | ~(may_be_nonzero0 | may_be_nonzero1));
2967 wide_int result_one_bits
2968 = (must_be_nonzero0.and_not (may_be_nonzero1)
2969 | must_be_nonzero1.and_not (may_be_nonzero0));
2970 max = wide_int_to_tree (expr_type, ~result_zero_bits);
2971 min = wide_int_to_tree (expr_type, result_one_bits);
2972 /* If the range has all positive or all negative values the
2973 result is better than VARYING. */
2974 if (tree_int_cst_sgn (min) < 0
2975 || tree_int_cst_sgn (max) >= 0)
2977 else
2978 max = min = NULL_TREE;
2981 else
2982 gcc_unreachable ();
2984 /* If either MIN or MAX overflowed, then set the resulting range to
2985 VARYING. */
2986 if (min == NULL_TREE
2987 || TREE_OVERFLOW_P (min)
2988 || max == NULL_TREE
2989 || TREE_OVERFLOW_P (max))
2991 set_value_range_to_varying (vr);
2992 return;
2995 /* We punt for [-INF, +INF].
2996 We learn nothing when we have INF on both sides.
2997 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2998 if (vrp_val_is_min (min) && vrp_val_is_max (max))
3000 set_value_range_to_varying (vr);
3001 return;
3004 cmp = compare_values (min, max);
3005 if (cmp == -2 || cmp == 1)
3007 /* If the new range has its limits swapped around (MIN > MAX),
3008 then the operation caused one of them to wrap around, mark
3009 the new range VARYING. */
3010 set_value_range_to_varying (vr);
3012 else
3013 set_value_range (vr, type, min, max, NULL);
3016 /* Extract range information from a binary expression OP0 CODE OP1 based on
3017 the ranges of each of its operands with resulting type EXPR_TYPE.
3018 The resulting range is stored in *VR. */
3020 static void
3021 extract_range_from_binary_expr (value_range *vr,
3022 enum tree_code code,
3023 tree expr_type, tree op0, tree op1)
3025 value_range vr0 = VR_INITIALIZER;
3026 value_range vr1 = VR_INITIALIZER;
3028 /* Get value ranges for each operand. For constant operands, create
3029 a new value range with the operand to simplify processing. */
3030 if (TREE_CODE (op0) == SSA_NAME)
3031 vr0 = *(get_value_range (op0));
3032 else if (is_gimple_min_invariant (op0))
3033 set_value_range_to_value (&vr0, op0, NULL);
3034 else
3035 set_value_range_to_varying (&vr0);
3037 if (TREE_CODE (op1) == SSA_NAME)
3038 vr1 = *(get_value_range (op1));
3039 else if (is_gimple_min_invariant (op1))
3040 set_value_range_to_value (&vr1, op1, NULL);
3041 else
3042 set_value_range_to_varying (&vr1);
3044 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3046 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3047 and based on the other operand, for example if it was deduced from a
3048 symbolic comparison. When a bound of the range of the first operand
3049 is invariant, we set the corresponding bound of the new range to INF
3050 in order to avoid recursing on the range of the second operand. */
3051 if (vr->type == VR_VARYING
3052 && (code == PLUS_EXPR || code == MINUS_EXPR)
3053 && TREE_CODE (op1) == SSA_NAME
3054 && vr0.type == VR_RANGE
3055 && symbolic_range_based_on_p (&vr0, op1))
3057 const bool minus_p = (code == MINUS_EXPR);
3058 value_range n_vr1 = VR_INITIALIZER;
3060 /* Try with VR0 and [-INF, OP1]. */
3061 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3062 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3064 /* Try with VR0 and [OP1, +INF]. */
3065 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3066 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3068 /* Try with VR0 and [OP1, OP1]. */
3069 else
3070 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3072 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3075 if (vr->type == VR_VARYING
3076 && (code == PLUS_EXPR || code == MINUS_EXPR)
3077 && TREE_CODE (op0) == SSA_NAME
3078 && vr1.type == VR_RANGE
3079 && symbolic_range_based_on_p (&vr1, op0))
3081 const bool minus_p = (code == MINUS_EXPR);
3082 value_range n_vr0 = VR_INITIALIZER;
3084 /* Try with [-INF, OP0] and VR1. */
3085 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3086 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3088 /* Try with [OP0, +INF] and VR1. */
3089 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3090 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3092 /* Try with [OP0, OP0] and VR1. */
3093 else
3094 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3096 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3099 /* If we didn't derive a range for MINUS_EXPR, and
3100 op1's range is ~[op0,op0] or vice-versa, then we
3101 can derive a non-null range. This happens often for
3102 pointer subtraction. */
3103 if (vr->type == VR_VARYING
3104 && code == MINUS_EXPR
3105 && TREE_CODE (op0) == SSA_NAME
3106 && ((vr0.type == VR_ANTI_RANGE
3107 && vr0.min == op1
3108 && vr0.min == vr0.max)
3109 || (vr1.type == VR_ANTI_RANGE
3110 && vr1.min == op0
3111 && vr1.min == vr1.max)))
3112 set_value_range_to_nonnull (vr, TREE_TYPE (op0));
3115 /* Extract range information from a unary operation CODE based on
3116 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3117 The resulting range is stored in *VR. */
3119 void
3120 extract_range_from_unary_expr (value_range *vr,
3121 enum tree_code code, tree type,
3122 value_range *vr0_, tree op0_type)
3124 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3126 /* VRP only operates on integral and pointer types. */
3127 if (!(INTEGRAL_TYPE_P (op0_type)
3128 || POINTER_TYPE_P (op0_type))
3129 || !(INTEGRAL_TYPE_P (type)
3130 || POINTER_TYPE_P (type)))
3132 set_value_range_to_varying (vr);
3133 return;
3136 /* If VR0 is UNDEFINED, so is the result. */
3137 if (vr0.type == VR_UNDEFINED)
3139 set_value_range_to_undefined (vr);
3140 return;
3143 /* Handle operations that we express in terms of others. */
3144 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3146 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3147 copy_value_range (vr, &vr0);
3148 return;
3150 else if (code == NEGATE_EXPR)
3152 /* -X is simply 0 - X, so re-use existing code that also handles
3153 anti-ranges fine. */
3154 value_range zero = VR_INITIALIZER;
3155 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3156 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3157 return;
3159 else if (code == BIT_NOT_EXPR)
3161 /* ~X is simply -1 - X, so re-use existing code that also handles
3162 anti-ranges fine. */
3163 value_range minusone = VR_INITIALIZER;
3164 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3165 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3166 type, &minusone, &vr0);
3167 return;
3170 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3171 and express op ~[] as (op []') U (op []''). */
3172 if (vr0.type == VR_ANTI_RANGE
3173 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3175 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
3176 if (vrtem1.type != VR_UNDEFINED)
3178 value_range vrres = VR_INITIALIZER;
3179 extract_range_from_unary_expr (&vrres, code, type,
3180 &vrtem1, op0_type);
3181 vrp_meet (vr, &vrres);
3183 return;
3186 if (CONVERT_EXPR_CODE_P (code))
3188 tree inner_type = op0_type;
3189 tree outer_type = type;
3191 /* If the expression evaluates to a pointer, we are only interested in
3192 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3193 if (POINTER_TYPE_P (type))
3195 if (range_is_nonnull (&vr0))
3196 set_value_range_to_nonnull (vr, type);
3197 else if (range_is_null (&vr0))
3198 set_value_range_to_null (vr, type);
3199 else
3200 set_value_range_to_varying (vr);
3201 return;
3204 /* If VR0 is varying and we increase the type precision, assume
3205 a full range for the following transformation. */
3206 if (vr0.type == VR_VARYING
3207 && INTEGRAL_TYPE_P (inner_type)
3208 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3210 vr0.type = VR_RANGE;
3211 vr0.min = TYPE_MIN_VALUE (inner_type);
3212 vr0.max = TYPE_MAX_VALUE (inner_type);
3215 /* If VR0 is a constant range or anti-range and the conversion is
3216 not truncating we can convert the min and max values and
3217 canonicalize the resulting range. Otherwise we can do the
3218 conversion if the size of the range is less than what the
3219 precision of the target type can represent and the range is
3220 not an anti-range. */
3221 if ((vr0.type == VR_RANGE
3222 || vr0.type == VR_ANTI_RANGE)
3223 && TREE_CODE (vr0.min) == INTEGER_CST
3224 && TREE_CODE (vr0.max) == INTEGER_CST
3225 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3226 || (vr0.type == VR_RANGE
3227 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3228 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3229 size_int (TYPE_PRECISION (outer_type)))))))
3231 tree new_min, new_max;
3232 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3233 0, false);
3234 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3235 0, false);
3236 set_and_canonicalize_value_range (vr, vr0.type,
3237 new_min, new_max, NULL);
3238 return;
3241 set_value_range_to_varying (vr);
3242 return;
3244 else if (code == ABS_EXPR)
3246 tree min, max;
3247 int cmp;
3249 /* Pass through vr0 in the easy cases. */
3250 if (TYPE_UNSIGNED (type)
3251 || value_range_nonnegative_p (&vr0))
3253 copy_value_range (vr, &vr0);
3254 return;
3257 /* For the remaining varying or symbolic ranges we can't do anything
3258 useful. */
3259 if (vr0.type == VR_VARYING
3260 || symbolic_range_p (&vr0))
3262 set_value_range_to_varying (vr);
3263 return;
3266 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3267 useful range. */
3268 if (!TYPE_OVERFLOW_UNDEFINED (type)
3269 && ((vr0.type == VR_RANGE
3270 && vrp_val_is_min (vr0.min))
3271 || (vr0.type == VR_ANTI_RANGE
3272 && !vrp_val_is_min (vr0.min))))
3274 set_value_range_to_varying (vr);
3275 return;
3278 /* ABS_EXPR may flip the range around, if the original range
3279 included negative values. */
3280 if (!vrp_val_is_min (vr0.min))
3281 min = fold_unary_to_constant (code, type, vr0.min);
3282 else
3283 min = TYPE_MAX_VALUE (type);
3285 if (!vrp_val_is_min (vr0.max))
3286 max = fold_unary_to_constant (code, type, vr0.max);
3287 else
3288 max = TYPE_MAX_VALUE (type);
3290 cmp = compare_values (min, max);
3292 /* If a VR_ANTI_RANGEs contains zero, then we have
3293 ~[-INF, min(MIN, MAX)]. */
3294 if (vr0.type == VR_ANTI_RANGE)
3296 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3298 /* Take the lower of the two values. */
3299 if (cmp != 1)
3300 max = min;
3302 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3303 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3304 flag_wrapv is set and the original anti-range doesn't include
3305 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3306 if (TYPE_OVERFLOW_WRAPS (type))
3308 tree type_min_value = TYPE_MIN_VALUE (type);
3310 min = (vr0.min != type_min_value
3311 ? int_const_binop (PLUS_EXPR, type_min_value,
3312 build_int_cst (TREE_TYPE (type_min_value), 1))
3313 : type_min_value);
3315 else
3316 min = TYPE_MIN_VALUE (type);
3318 else
3320 /* All else has failed, so create the range [0, INF], even for
3321 flag_wrapv since TYPE_MIN_VALUE is in the original
3322 anti-range. */
3323 vr0.type = VR_RANGE;
3324 min = build_int_cst (type, 0);
3325 max = TYPE_MAX_VALUE (type);
3329 /* If the range contains zero then we know that the minimum value in the
3330 range will be zero. */
3331 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3333 if (cmp == 1)
3334 max = min;
3335 min = build_int_cst (type, 0);
3337 else
3339 /* If the range was reversed, swap MIN and MAX. */
3340 if (cmp == 1)
3341 std::swap (min, max);
3344 cmp = compare_values (min, max);
3345 if (cmp == -2 || cmp == 1)
3347 /* If the new range has its limits swapped around (MIN > MAX),
3348 then the operation caused one of them to wrap around, mark
3349 the new range VARYING. */
3350 set_value_range_to_varying (vr);
3352 else
3353 set_value_range (vr, vr0.type, min, max, NULL);
3354 return;
3357 /* For unhandled operations fall back to varying. */
3358 set_value_range_to_varying (vr);
3359 return;
3363 /* Extract range information from a unary expression CODE OP0 based on
3364 the range of its operand with resulting type TYPE.
3365 The resulting range is stored in *VR. */
3367 static void
3368 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3369 tree type, tree op0)
3371 value_range vr0 = VR_INITIALIZER;
3373 /* Get value ranges for the operand. For constant operands, create
3374 a new value range with the operand to simplify processing. */
3375 if (TREE_CODE (op0) == SSA_NAME)
3376 vr0 = *(get_value_range (op0));
3377 else if (is_gimple_min_invariant (op0))
3378 set_value_range_to_value (&vr0, op0, NULL);
3379 else
3380 set_value_range_to_varying (&vr0);
3382 extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
3386 /* Extract range information from a conditional expression STMT based on
3387 the ranges of each of its operands and the expression code. */
3389 static void
3390 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3392 tree op0, op1;
3393 value_range vr0 = VR_INITIALIZER;
3394 value_range vr1 = VR_INITIALIZER;
3396 /* Get value ranges for each operand. For constant operands, create
3397 a new value range with the operand to simplify processing. */
3398 op0 = gimple_assign_rhs2 (stmt);
3399 if (TREE_CODE (op0) == SSA_NAME)
3400 vr0 = *(get_value_range (op0));
3401 else if (is_gimple_min_invariant (op0))
3402 set_value_range_to_value (&vr0, op0, NULL);
3403 else
3404 set_value_range_to_varying (&vr0);
3406 op1 = gimple_assign_rhs3 (stmt);
3407 if (TREE_CODE (op1) == SSA_NAME)
3408 vr1 = *(get_value_range (op1));
3409 else if (is_gimple_min_invariant (op1))
3410 set_value_range_to_value (&vr1, op1, NULL);
3411 else
3412 set_value_range_to_varying (&vr1);
3414 /* The resulting value range is the union of the operand ranges */
3415 copy_value_range (vr, &vr0);
3416 vrp_meet (vr, &vr1);
3420 /* Extract range information from a comparison expression EXPR based
3421 on the range of its operand and the expression code. */
3423 static void
3424 extract_range_from_comparison (value_range *vr, enum tree_code code,
3425 tree type, tree op0, tree op1)
3427 bool sop;
3428 tree val;
3430 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3431 NULL);
3432 if (val)
3434 /* Since this expression was found on the RHS of an assignment,
3435 its type may be different from _Bool. Convert VAL to EXPR's
3436 type. */
3437 val = fold_convert (type, val);
3438 if (is_gimple_min_invariant (val))
3439 set_value_range_to_value (vr, val, vr->equiv);
3440 else
3441 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3443 else
3444 /* The result of a comparison is always true or false. */
3445 set_value_range_to_truthvalue (vr, type);
3448 /* Helper function for simplify_internal_call_using_ranges and
3449 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3450 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3451 always overflow. Set *OVF to true if it is known to always
3452 overflow. */
3454 static bool
3455 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3456 tree op0, tree op1, bool *ovf)
3458 value_range vr0 = VR_INITIALIZER;
3459 value_range vr1 = VR_INITIALIZER;
3460 if (TREE_CODE (op0) == SSA_NAME)
3461 vr0 = *get_value_range (op0);
3462 else if (TREE_CODE (op0) == INTEGER_CST)
3463 set_value_range_to_value (&vr0, op0, NULL);
3464 else
3465 set_value_range_to_varying (&vr0);
3467 if (TREE_CODE (op1) == SSA_NAME)
3468 vr1 = *get_value_range (op1);
3469 else if (TREE_CODE (op1) == INTEGER_CST)
3470 set_value_range_to_value (&vr1, op1, NULL);
3471 else
3472 set_value_range_to_varying (&vr1);
3474 if (!range_int_cst_p (&vr0)
3475 || TREE_OVERFLOW (vr0.min)
3476 || TREE_OVERFLOW (vr0.max))
3478 vr0.min = vrp_val_min (TREE_TYPE (op0));
3479 vr0.max = vrp_val_max (TREE_TYPE (op0));
3481 if (!range_int_cst_p (&vr1)
3482 || TREE_OVERFLOW (vr1.min)
3483 || TREE_OVERFLOW (vr1.max))
3485 vr1.min = vrp_val_min (TREE_TYPE (op1));
3486 vr1.max = vrp_val_max (TREE_TYPE (op1));
3488 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3489 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3490 if (arith_overflowed_p (subcode, type, vr0.max,
3491 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3492 return false;
3493 if (subcode == MULT_EXPR)
3495 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3496 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3497 return false;
3499 if (*ovf)
3501 /* So far we found that there is an overflow on the boundaries.
3502 That doesn't prove that there is an overflow even for all values
3503 in between the boundaries. For that compute widest_int range
3504 of the result and see if it doesn't overlap the range of
3505 type. */
3506 widest_int wmin, wmax;
3507 widest_int w[4];
3508 int i;
3509 w[0] = wi::to_widest (vr0.min);
3510 w[1] = wi::to_widest (vr0.max);
3511 w[2] = wi::to_widest (vr1.min);
3512 w[3] = wi::to_widest (vr1.max);
3513 for (i = 0; i < 4; i++)
3515 widest_int wt;
3516 switch (subcode)
3518 case PLUS_EXPR:
3519 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3520 break;
3521 case MINUS_EXPR:
3522 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3523 break;
3524 case MULT_EXPR:
3525 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3526 break;
3527 default:
3528 gcc_unreachable ();
3530 if (i == 0)
3532 wmin = wt;
3533 wmax = wt;
3535 else
3537 wmin = wi::smin (wmin, wt);
3538 wmax = wi::smax (wmax, wt);
3541 /* The result of op0 CODE op1 is known to be in range
3542 [wmin, wmax]. */
3543 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3544 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3545 /* If all values in [wmin, wmax] are smaller than
3546 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3547 the arithmetic operation will always overflow. */
3548 if (wmax < wtmin || wmin > wtmax)
3549 return true;
3550 return false;
3552 return true;
3555 /* Try to derive a nonnegative or nonzero range out of STMT relying
3556 primarily on generic routines in fold in conjunction with range data.
3557 Store the result in *VR */
3559 static void
3560 extract_range_basic (value_range *vr, gimple *stmt)
3562 bool sop;
3563 tree type = gimple_expr_type (stmt);
3565 if (is_gimple_call (stmt))
3567 tree arg;
3568 int mini, maxi, zerov = 0, prec;
3569 enum tree_code subcode = ERROR_MARK;
3570 combined_fn cfn = gimple_call_combined_fn (stmt);
3572 switch (cfn)
3574 case CFN_BUILT_IN_CONSTANT_P:
3575 /* If the call is __builtin_constant_p and the argument is a
3576 function parameter resolve it to false. This avoids bogus
3577 array bound warnings.
3578 ??? We could do this as early as inlining is finished. */
3579 arg = gimple_call_arg (stmt, 0);
3580 if (TREE_CODE (arg) == SSA_NAME
3581 && SSA_NAME_IS_DEFAULT_DEF (arg)
3582 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
3583 && cfun->after_inlining)
3585 set_value_range_to_null (vr, type);
3586 return;
3588 break;
3589 /* Both __builtin_ffs* and __builtin_popcount return
3590 [0, prec]. */
3591 CASE_CFN_FFS:
3592 CASE_CFN_POPCOUNT:
3593 arg = gimple_call_arg (stmt, 0);
3594 prec = TYPE_PRECISION (TREE_TYPE (arg));
3595 mini = 0;
3596 maxi = prec;
3597 if (TREE_CODE (arg) == SSA_NAME)
3599 value_range *vr0 = get_value_range (arg);
3600 /* If arg is non-zero, then ffs or popcount
3601 are non-zero. */
3602 if ((vr0->type == VR_RANGE
3603 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3604 || (vr0->type == VR_ANTI_RANGE
3605 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3606 mini = 1;
3607 /* If some high bits are known to be zero,
3608 we can decrease the maximum. */
3609 if (vr0->type == VR_RANGE
3610 && TREE_CODE (vr0->max) == INTEGER_CST
3611 && !operand_less_p (vr0->min,
3612 build_zero_cst (TREE_TYPE (vr0->min))))
3613 maxi = tree_floor_log2 (vr0->max) + 1;
3615 goto bitop_builtin;
3616 /* __builtin_parity* returns [0, 1]. */
3617 CASE_CFN_PARITY:
3618 mini = 0;
3619 maxi = 1;
3620 goto bitop_builtin;
3621 /* __builtin_c[lt]z* return [0, prec-1], except for
3622 when the argument is 0, but that is undefined behavior.
3623 On many targets where the CLZ RTL or optab value is defined
3624 for 0 the value is prec, so include that in the range
3625 by default. */
3626 CASE_CFN_CLZ:
3627 arg = gimple_call_arg (stmt, 0);
3628 prec = TYPE_PRECISION (TREE_TYPE (arg));
3629 mini = 0;
3630 maxi = prec;
3631 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3632 != CODE_FOR_nothing
3633 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3634 zerov)
3635 /* Handle only the single common value. */
3636 && zerov != prec)
3637 /* Magic value to give up, unless vr0 proves
3638 arg is non-zero. */
3639 mini = -2;
3640 if (TREE_CODE (arg) == SSA_NAME)
3642 value_range *vr0 = get_value_range (arg);
3643 /* From clz of VR_RANGE minimum we can compute
3644 result maximum. */
3645 if (vr0->type == VR_RANGE
3646 && TREE_CODE (vr0->min) == INTEGER_CST)
3648 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3649 if (maxi != prec)
3650 mini = 0;
3652 else if (vr0->type == VR_ANTI_RANGE
3653 && integer_zerop (vr0->min))
3655 maxi = prec - 1;
3656 mini = 0;
3658 if (mini == -2)
3659 break;
3660 /* From clz of VR_RANGE maximum we can compute
3661 result minimum. */
3662 if (vr0->type == VR_RANGE
3663 && TREE_CODE (vr0->max) == INTEGER_CST)
3665 mini = prec - 1 - tree_floor_log2 (vr0->max);
3666 if (mini == prec)
3667 break;
3670 if (mini == -2)
3671 break;
3672 goto bitop_builtin;
3673 /* __builtin_ctz* return [0, prec-1], except for
3674 when the argument is 0, but that is undefined behavior.
3675 If there is a ctz optab for this mode and
3676 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3677 otherwise just assume 0 won't be seen. */
3678 CASE_CFN_CTZ:
3679 arg = gimple_call_arg (stmt, 0);
3680 prec = TYPE_PRECISION (TREE_TYPE (arg));
3681 mini = 0;
3682 maxi = prec - 1;
3683 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3684 != CODE_FOR_nothing
3685 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3686 zerov))
3688 /* Handle only the two common values. */
3689 if (zerov == -1)
3690 mini = -1;
3691 else if (zerov == prec)
3692 maxi = prec;
3693 else
3694 /* Magic value to give up, unless vr0 proves
3695 arg is non-zero. */
3696 mini = -2;
3698 if (TREE_CODE (arg) == SSA_NAME)
3700 value_range *vr0 = get_value_range (arg);
3701 /* If arg is non-zero, then use [0, prec - 1]. */
3702 if ((vr0->type == VR_RANGE
3703 && integer_nonzerop (vr0->min))
3704 || (vr0->type == VR_ANTI_RANGE
3705 && integer_zerop (vr0->min)))
3707 mini = 0;
3708 maxi = prec - 1;
3710 /* If some high bits are known to be zero,
3711 we can decrease the result maximum. */
3712 if (vr0->type == VR_RANGE
3713 && TREE_CODE (vr0->max) == INTEGER_CST)
3715 maxi = tree_floor_log2 (vr0->max);
3716 /* For vr0 [0, 0] give up. */
3717 if (maxi == -1)
3718 break;
3721 if (mini == -2)
3722 break;
3723 goto bitop_builtin;
3724 /* __builtin_clrsb* returns [0, prec-1]. */
3725 CASE_CFN_CLRSB:
3726 arg = gimple_call_arg (stmt, 0);
3727 prec = TYPE_PRECISION (TREE_TYPE (arg));
3728 mini = 0;
3729 maxi = prec - 1;
3730 goto bitop_builtin;
3731 bitop_builtin:
3732 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3733 build_int_cst (type, maxi), NULL);
3734 return;
3735 case CFN_UBSAN_CHECK_ADD:
3736 subcode = PLUS_EXPR;
3737 break;
3738 case CFN_UBSAN_CHECK_SUB:
3739 subcode = MINUS_EXPR;
3740 break;
3741 case CFN_UBSAN_CHECK_MUL:
3742 subcode = MULT_EXPR;
3743 break;
3744 case CFN_GOACC_DIM_SIZE:
3745 case CFN_GOACC_DIM_POS:
3746 /* Optimizing these two internal functions helps the loop
3747 optimizer eliminate outer comparisons. Size is [1,N]
3748 and pos is [0,N-1]. */
3750 bool is_pos = cfn == CFN_GOACC_DIM_POS;
3751 int axis = oacc_get_ifn_dim_arg (stmt);
3752 int size = oacc_get_fn_dim_size (current_function_decl, axis);
3754 if (!size)
3755 /* If it's dynamic, the backend might know a hardware
3756 limitation. */
3757 size = targetm.goacc.dim_limit (axis);
3759 tree type = TREE_TYPE (gimple_call_lhs (stmt));
3760 set_value_range (vr, VR_RANGE,
3761 build_int_cst (type, is_pos ? 0 : 1),
3762 size ? build_int_cst (type, size - is_pos)
3763 : vrp_val_max (type), NULL);
3765 return;
3766 case CFN_BUILT_IN_STRLEN:
3767 if (tree lhs = gimple_call_lhs (stmt))
3768 if (ptrdiff_type_node
3769 && (TYPE_PRECISION (ptrdiff_type_node)
3770 == TYPE_PRECISION (TREE_TYPE (lhs))))
3772 tree type = TREE_TYPE (lhs);
3773 tree max = vrp_val_max (ptrdiff_type_node);
3774 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
3775 tree range_min = build_zero_cst (type);
3776 tree range_max = wide_int_to_tree (type, wmax - 1);
3777 set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
3778 return;
3780 break;
3781 default:
3782 break;
3784 if (subcode != ERROR_MARK)
3786 bool saved_flag_wrapv = flag_wrapv;
3787 /* Pretend the arithmetics is wrapping. If there is
3788 any overflow, we'll complain, but will actually do
3789 wrapping operation. */
3790 flag_wrapv = 1;
3791 extract_range_from_binary_expr (vr, subcode, type,
3792 gimple_call_arg (stmt, 0),
3793 gimple_call_arg (stmt, 1));
3794 flag_wrapv = saved_flag_wrapv;
3796 /* If for both arguments vrp_valueize returned non-NULL,
3797 this should have been already folded and if not, it
3798 wasn't folded because of overflow. Avoid removing the
3799 UBSAN_CHECK_* calls in that case. */
3800 if (vr->type == VR_RANGE
3801 && (vr->min == vr->max
3802 || operand_equal_p (vr->min, vr->max, 0)))
3803 set_value_range_to_varying (vr);
3804 return;
3807 /* Handle extraction of the two results (result of arithmetics and
3808 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
3809 internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
3810 else if (is_gimple_assign (stmt)
3811 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
3812 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
3813 && INTEGRAL_TYPE_P (type))
3815 enum tree_code code = gimple_assign_rhs_code (stmt);
3816 tree op = gimple_assign_rhs1 (stmt);
3817 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
3819 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
3820 if (is_gimple_call (g) && gimple_call_internal_p (g))
3822 enum tree_code subcode = ERROR_MARK;
3823 switch (gimple_call_internal_fn (g))
3825 case IFN_ADD_OVERFLOW:
3826 subcode = PLUS_EXPR;
3827 break;
3828 case IFN_SUB_OVERFLOW:
3829 subcode = MINUS_EXPR;
3830 break;
3831 case IFN_MUL_OVERFLOW:
3832 subcode = MULT_EXPR;
3833 break;
3834 case IFN_ATOMIC_COMPARE_EXCHANGE:
3835 if (code == IMAGPART_EXPR)
3837 /* This is the boolean return value whether compare and
3838 exchange changed anything or not. */
3839 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
3840 build_int_cst (type, 1), NULL);
3841 return;
3843 break;
3844 default:
3845 break;
3847 if (subcode != ERROR_MARK)
3849 tree op0 = gimple_call_arg (g, 0);
3850 tree op1 = gimple_call_arg (g, 1);
3851 if (code == IMAGPART_EXPR)
3853 bool ovf = false;
3854 if (check_for_binary_op_overflow (subcode, type,
3855 op0, op1, &ovf))
3856 set_value_range_to_value (vr,
3857 build_int_cst (type, ovf),
3858 NULL);
3859 else if (TYPE_PRECISION (type) == 1
3860 && !TYPE_UNSIGNED (type))
3861 set_value_range_to_varying (vr);
3862 else
3863 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
3864 build_int_cst (type, 1), NULL);
3866 else if (types_compatible_p (type, TREE_TYPE (op0))
3867 && types_compatible_p (type, TREE_TYPE (op1)))
3869 bool saved_flag_wrapv = flag_wrapv;
3870 /* Pretend the arithmetics is wrapping. If there is
3871 any overflow, IMAGPART_EXPR will be set. */
3872 flag_wrapv = 1;
3873 extract_range_from_binary_expr (vr, subcode, type,
3874 op0, op1);
3875 flag_wrapv = saved_flag_wrapv;
3877 else
3879 value_range vr0 = VR_INITIALIZER;
3880 value_range vr1 = VR_INITIALIZER;
3881 bool saved_flag_wrapv = flag_wrapv;
3882 /* Pretend the arithmetics is wrapping. If there is
3883 any overflow, IMAGPART_EXPR will be set. */
3884 flag_wrapv = 1;
3885 extract_range_from_unary_expr (&vr0, NOP_EXPR,
3886 type, op0);
3887 extract_range_from_unary_expr (&vr1, NOP_EXPR,
3888 type, op1);
3889 extract_range_from_binary_expr_1 (vr, subcode, type,
3890 &vr0, &vr1);
3891 flag_wrapv = saved_flag_wrapv;
3893 return;
3898 if (INTEGRAL_TYPE_P (type)
3899 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3900 set_value_range_to_nonnegative (vr, type);
3901 else if (vrp_stmt_computes_nonzero (stmt))
3902 set_value_range_to_nonnull (vr, type);
3903 else
3904 set_value_range_to_varying (vr);
3908 /* Try to compute a useful range out of assignment STMT and store it
3909 in *VR. */
3911 static void
3912 extract_range_from_assignment (value_range *vr, gassign *stmt)
3914 enum tree_code code = gimple_assign_rhs_code (stmt);
3916 if (code == ASSERT_EXPR)
3917 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3918 else if (code == SSA_NAME)
3919 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3920 else if (TREE_CODE_CLASS (code) == tcc_binary)
3921 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3922 gimple_expr_type (stmt),
3923 gimple_assign_rhs1 (stmt),
3924 gimple_assign_rhs2 (stmt));
3925 else if (TREE_CODE_CLASS (code) == tcc_unary)
3926 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3927 gimple_expr_type (stmt),
3928 gimple_assign_rhs1 (stmt));
3929 else if (code == COND_EXPR)
3930 extract_range_from_cond_expr (vr, stmt);
3931 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3932 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3933 gimple_expr_type (stmt),
3934 gimple_assign_rhs1 (stmt),
3935 gimple_assign_rhs2 (stmt));
3936 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3937 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3938 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3939 else
3940 set_value_range_to_varying (vr);
3942 if (vr->type == VR_VARYING)
3943 extract_range_basic (vr, stmt);
3946 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3947 would be profitable to adjust VR using scalar evolution information
3948 for VAR. If so, update VR with the new limits. */
3950 static void
3951 adjust_range_with_scev (value_range *vr, struct loop *loop,
3952 gimple *stmt, tree var)
3954 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3955 enum ev_direction dir;
3957 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3958 better opportunities than a regular range, but I'm not sure. */
3959 if (vr->type == VR_ANTI_RANGE)
3960 return;
3962 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3964 /* Like in PR19590, scev can return a constant function. */
3965 if (is_gimple_min_invariant (chrec))
3967 set_value_range_to_value (vr, chrec, vr->equiv);
3968 return;
3971 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3972 return;
3974 init = initial_condition_in_loop_num (chrec, loop->num);
3975 tem = op_with_constant_singleton_value_range (init);
3976 if (tem)
3977 init = tem;
3978 step = evolution_part_in_loop_num (chrec, loop->num);
3979 tem = op_with_constant_singleton_value_range (step);
3980 if (tem)
3981 step = tem;
3983 /* If STEP is symbolic, we can't know whether INIT will be the
3984 minimum or maximum value in the range. Also, unless INIT is
3985 a simple expression, compare_values and possibly other functions
3986 in tree-vrp won't be able to handle it. */
3987 if (step == NULL_TREE
3988 || !is_gimple_min_invariant (step)
3989 || !valid_value_p (init))
3990 return;
3992 dir = scev_direction (chrec);
3993 if (/* Do not adjust ranges if we do not know whether the iv increases
3994 or decreases, ... */
3995 dir == EV_DIR_UNKNOWN
3996 /* ... or if it may wrap. */
3997 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
3998 get_chrec_loop (chrec), true))
3999 return;
4001 type = TREE_TYPE (var);
4002 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4003 tmin = lower_bound_in_type (type, type);
4004 else
4005 tmin = TYPE_MIN_VALUE (type);
4006 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4007 tmax = upper_bound_in_type (type, type);
4008 else
4009 tmax = TYPE_MAX_VALUE (type);
4011 /* Try to use estimated number of iterations for the loop to constrain the
4012 final value in the evolution. */
4013 if (TREE_CODE (step) == INTEGER_CST
4014 && is_gimple_val (init)
4015 && (TREE_CODE (init) != SSA_NAME
4016 || get_value_range (init)->type == VR_RANGE))
4018 widest_int nit;
4020 /* We are only entering here for loop header PHI nodes, so using
4021 the number of latch executions is the correct thing to use. */
4022 if (max_loop_iterations (loop, &nit))
4024 value_range maxvr = VR_INITIALIZER;
4025 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4026 bool overflow;
4028 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4029 &overflow);
4030 /* If the multiplication overflowed we can't do a meaningful
4031 adjustment. Likewise if the result doesn't fit in the type
4032 of the induction variable. For a signed type we have to
4033 check whether the result has the expected signedness which
4034 is that of the step as number of iterations is unsigned. */
4035 if (!overflow
4036 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4037 && (sgn == UNSIGNED
4038 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4040 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4041 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4042 TREE_TYPE (init), init, tem);
4043 /* Likewise if the addition did. */
4044 if (maxvr.type == VR_RANGE)
4046 value_range initvr = VR_INITIALIZER;
4048 if (TREE_CODE (init) == SSA_NAME)
4049 initvr = *(get_value_range (init));
4050 else if (is_gimple_min_invariant (init))
4051 set_value_range_to_value (&initvr, init, NULL);
4052 else
4053 return;
4055 /* Check if init + nit * step overflows. Though we checked
4056 scev {init, step}_loop doesn't wrap, it is not enough
4057 because the loop may exit immediately. Overflow could
4058 happen in the plus expression in this case. */
4059 if ((dir == EV_DIR_DECREASES
4060 && compare_values (maxvr.min, initvr.min) != -1)
4061 || (dir == EV_DIR_GROWS
4062 && compare_values (maxvr.max, initvr.max) != 1))
4063 return;
4065 tmin = maxvr.min;
4066 tmax = maxvr.max;
4072 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4074 min = tmin;
4075 max = tmax;
4077 /* For VARYING or UNDEFINED ranges, just about anything we get
4078 from scalar evolutions should be better. */
4080 if (dir == EV_DIR_DECREASES)
4081 max = init;
4082 else
4083 min = init;
4085 else if (vr->type == VR_RANGE)
4087 min = vr->min;
4088 max = vr->max;
4090 if (dir == EV_DIR_DECREASES)
4092 /* INIT is the maximum value. If INIT is lower than VR->MAX
4093 but no smaller than VR->MIN, set VR->MAX to INIT. */
4094 if (compare_values (init, max) == -1)
4095 max = init;
4097 /* According to the loop information, the variable does not
4098 overflow. */
4099 if (compare_values (min, tmin) == -1)
4100 min = tmin;
4103 else
4105 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4106 if (compare_values (init, min) == 1)
4107 min = init;
4109 if (compare_values (tmax, max) == -1)
4110 max = tmax;
4113 else
4114 return;
4116 /* If we just created an invalid range with the minimum
4117 greater than the maximum, we fail conservatively.
4118 This should happen only in unreachable
4119 parts of code, or for invalid programs. */
4120 if (compare_values (min, max) == 1)
4121 return;
4123 /* Even for valid range info, sometimes overflow flag will leak in.
4124 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4125 drop them. */
4126 if (TREE_OVERFLOW_P (min))
4127 min = drop_tree_overflow (min);
4128 if (TREE_OVERFLOW_P (max))
4129 max = drop_tree_overflow (max);
4131 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4135 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4137 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4138 all the values in the ranges.
4140 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4142 - Return NULL_TREE if it is not always possible to determine the
4143 value of the comparison.
4145 Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
4146 assumed signed overflow is undefined. */
4149 static tree
4150 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4151 bool *strict_overflow_p)
4153 /* VARYING or UNDEFINED ranges cannot be compared. */
4154 if (vr0->type == VR_VARYING
4155 || vr0->type == VR_UNDEFINED
4156 || vr1->type == VR_VARYING
4157 || vr1->type == VR_UNDEFINED)
4158 return NULL_TREE;
4160 /* Anti-ranges need to be handled separately. */
4161 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4163 /* If both are anti-ranges, then we cannot compute any
4164 comparison. */
4165 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4166 return NULL_TREE;
4168 /* These comparisons are never statically computable. */
4169 if (comp == GT_EXPR
4170 || comp == GE_EXPR
4171 || comp == LT_EXPR
4172 || comp == LE_EXPR)
4173 return NULL_TREE;
4175 /* Equality can be computed only between a range and an
4176 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4177 if (vr0->type == VR_RANGE)
4179 /* To simplify processing, make VR0 the anti-range. */
4180 value_range *tmp = vr0;
4181 vr0 = vr1;
4182 vr1 = tmp;
4185 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4187 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4188 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4189 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4191 return NULL_TREE;
4194 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4195 operands around and change the comparison code. */
4196 if (comp == GT_EXPR || comp == GE_EXPR)
4198 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4199 std::swap (vr0, vr1);
4202 if (comp == EQ_EXPR)
4204 /* Equality may only be computed if both ranges represent
4205 exactly one value. */
4206 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4207 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4209 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4210 strict_overflow_p);
4211 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4212 strict_overflow_p);
4213 if (cmp_min == 0 && cmp_max == 0)
4214 return boolean_true_node;
4215 else if (cmp_min != -2 && cmp_max != -2)
4216 return boolean_false_node;
4218 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4219 else if (compare_values_warnv (vr0->min, vr1->max,
4220 strict_overflow_p) == 1
4221 || compare_values_warnv (vr1->min, vr0->max,
4222 strict_overflow_p) == 1)
4223 return boolean_false_node;
4225 return NULL_TREE;
4227 else if (comp == NE_EXPR)
4229 int cmp1, cmp2;
4231 /* If VR0 is completely to the left or completely to the right
4232 of VR1, they are always different. Notice that we need to
4233 make sure that both comparisons yield similar results to
4234 avoid comparing values that cannot be compared at
4235 compile-time. */
4236 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4237 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4238 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4239 return boolean_true_node;
4241 /* If VR0 and VR1 represent a single value and are identical,
4242 return false. */
4243 else if (compare_values_warnv (vr0->min, vr0->max,
4244 strict_overflow_p) == 0
4245 && compare_values_warnv (vr1->min, vr1->max,
4246 strict_overflow_p) == 0
4247 && compare_values_warnv (vr0->min, vr1->min,
4248 strict_overflow_p) == 0
4249 && compare_values_warnv (vr0->max, vr1->max,
4250 strict_overflow_p) == 0)
4251 return boolean_false_node;
4253 /* Otherwise, they may or may not be different. */
4254 else
4255 return NULL_TREE;
4257 else if (comp == LT_EXPR || comp == LE_EXPR)
4259 int tst;
4261 /* If VR0 is to the left of VR1, return true. */
4262 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4263 if ((comp == LT_EXPR && tst == -1)
4264 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4265 return boolean_true_node;
4267 /* If VR0 is to the right of VR1, return false. */
4268 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4269 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4270 || (comp == LE_EXPR && tst == 1))
4271 return boolean_false_node;
4273 /* Otherwise, we don't know. */
4274 return NULL_TREE;
4277 gcc_unreachable ();
4281 /* Given a value range VR, a value VAL and a comparison code COMP, return
4282 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4283 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4284 always returns false. Return NULL_TREE if it is not always
4285 possible to determine the value of the comparison. Also set
4286 *STRICT_OVERFLOW_P to indicate whether comparision evaluation
4287 assumed signed overflow is undefined. */
4289 static tree
4290 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4291 bool *strict_overflow_p)
4293 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4294 return NULL_TREE;
4296 /* Anti-ranges need to be handled separately. */
4297 if (vr->type == VR_ANTI_RANGE)
4299 /* For anti-ranges, the only predicates that we can compute at
4300 compile time are equality and inequality. */
4301 if (comp == GT_EXPR
4302 || comp == GE_EXPR
4303 || comp == LT_EXPR
4304 || comp == LE_EXPR)
4305 return NULL_TREE;
4307 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4308 if (value_inside_range (val, vr->min, vr->max) == 1)
4309 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4311 return NULL_TREE;
4314 if (comp == EQ_EXPR)
4316 /* EQ_EXPR may only be computed if VR represents exactly
4317 one value. */
4318 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4320 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4321 if (cmp == 0)
4322 return boolean_true_node;
4323 else if (cmp == -1 || cmp == 1 || cmp == 2)
4324 return boolean_false_node;
4326 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4327 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4328 return boolean_false_node;
4330 return NULL_TREE;
4332 else if (comp == NE_EXPR)
4334 /* If VAL is not inside VR, then they are always different. */
4335 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4336 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4337 return boolean_true_node;
4339 /* If VR represents exactly one value equal to VAL, then return
4340 false. */
4341 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4342 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4343 return boolean_false_node;
4345 /* Otherwise, they may or may not be different. */
4346 return NULL_TREE;
4348 else if (comp == LT_EXPR || comp == LE_EXPR)
4350 int tst;
4352 /* If VR is to the left of VAL, return true. */
4353 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4354 if ((comp == LT_EXPR && tst == -1)
4355 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4356 return boolean_true_node;
4358 /* If VR is to the right of VAL, return false. */
4359 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4360 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4361 || (comp == LE_EXPR && tst == 1))
4362 return boolean_false_node;
4364 /* Otherwise, we don't know. */
4365 return NULL_TREE;
4367 else if (comp == GT_EXPR || comp == GE_EXPR)
4369 int tst;
4371 /* If VR is to the right of VAL, return true. */
4372 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4373 if ((comp == GT_EXPR && tst == 1)
4374 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4375 return boolean_true_node;
4377 /* If VR is to the left of VAL, return false. */
4378 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4379 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4380 || (comp == GE_EXPR && tst == -1))
4381 return boolean_false_node;
4383 /* Otherwise, we don't know. */
4384 return NULL_TREE;
4387 gcc_unreachable ();
4391 /* Debugging dumps. */
4393 void dump_value_range (FILE *, const value_range *);
4394 void debug_value_range (value_range *);
4395 void dump_all_value_ranges (FILE *);
4396 void debug_all_value_ranges (void);
4397 void dump_vr_equiv (FILE *, bitmap);
4398 void debug_vr_equiv (bitmap);
4401 /* Dump value range VR to FILE. */
4403 void
4404 dump_value_range (FILE *file, const value_range *vr)
4406 if (vr == NULL)
4407 fprintf (file, "[]");
4408 else if (vr->type == VR_UNDEFINED)
4409 fprintf (file, "UNDEFINED");
4410 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4412 tree type = TREE_TYPE (vr->min);
4414 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4416 if (INTEGRAL_TYPE_P (type)
4417 && !TYPE_UNSIGNED (type)
4418 && vrp_val_is_min (vr->min))
4419 fprintf (file, "-INF");
4420 else
4421 print_generic_expr (file, vr->min);
4423 fprintf (file, ", ");
4425 if (INTEGRAL_TYPE_P (type)
4426 && vrp_val_is_max (vr->max))
4427 fprintf (file, "+INF");
4428 else
4429 print_generic_expr (file, vr->max);
4431 fprintf (file, "]");
4433 if (vr->equiv)
4435 bitmap_iterator bi;
4436 unsigned i, c = 0;
4438 fprintf (file, " EQUIVALENCES: { ");
4440 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4442 print_generic_expr (file, ssa_name (i));
4443 fprintf (file, " ");
4444 c++;
4447 fprintf (file, "} (%u elements)", c);
4450 else if (vr->type == VR_VARYING)
4451 fprintf (file, "VARYING");
4452 else
4453 fprintf (file, "INVALID RANGE");
4457 /* Dump value range VR to stderr. */
4459 DEBUG_FUNCTION void
4460 debug_value_range (value_range *vr)
4462 dump_value_range (stderr, vr);
4463 fprintf (stderr, "\n");
4467 /* Dump value ranges of all SSA_NAMEs to FILE. */
4469 void
4470 dump_all_value_ranges (FILE *file)
4472 size_t i;
4474 for (i = 0; i < num_vr_values; i++)
4476 if (vr_value[i])
4478 print_generic_expr (file, ssa_name (i));
4479 fprintf (file, ": ");
4480 dump_value_range (file, vr_value[i]);
4481 fprintf (file, "\n");
4485 fprintf (file, "\n");
4489 /* Dump all value ranges to stderr. */
4491 DEBUG_FUNCTION void
4492 debug_all_value_ranges (void)
4494 dump_all_value_ranges (stderr);
4498 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4499 create a new SSA name N and return the assertion assignment
4500 'N = ASSERT_EXPR <V, V OP W>'. */
4502 static gimple *
4503 build_assert_expr_for (tree cond, tree v)
4505 tree a;
4506 gassign *assertion;
4508 gcc_assert (TREE_CODE (v) == SSA_NAME
4509 && COMPARISON_CLASS_P (cond));
4511 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4512 assertion = gimple_build_assign (NULL_TREE, a);
4514 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4515 operand of the ASSERT_EXPR. Create it so the new name and the old one
4516 are registered in the replacement table so that we can fix the SSA web
4517 after adding all the ASSERT_EXPRs. */
4518 create_new_def_for (v, assertion, NULL);
4520 return assertion;
4524 /* Return false if EXPR is a predicate expression involving floating
4525 point values. */
4527 static inline bool
4528 fp_predicate (gimple *stmt)
4530 GIMPLE_CHECK (stmt, GIMPLE_COND);
4532 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4535 /* If the range of values taken by OP can be inferred after STMT executes,
4536 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4537 describes the inferred range. Return true if a range could be
4538 inferred. */
4540 static bool
4541 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4543 *val_p = NULL_TREE;
4544 *comp_code_p = ERROR_MARK;
4546 /* Do not attempt to infer anything in names that flow through
4547 abnormal edges. */
4548 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4549 return false;
4551 /* If STMT is the last statement of a basic block with no normal
4552 successors, there is no point inferring anything about any of its
4553 operands. We would not be able to find a proper insertion point
4554 for the assertion, anyway. */
4555 if (stmt_ends_bb_p (stmt))
4557 edge_iterator ei;
4558 edge e;
4560 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4561 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4562 break;
4563 if (e == NULL)
4564 return false;
4567 if (infer_nonnull_range (stmt, op))
4569 *val_p = build_int_cst (TREE_TYPE (op), 0);
4570 *comp_code_p = NE_EXPR;
4571 return true;
4574 return false;
4578 void dump_asserts_for (FILE *, tree);
4579 void debug_asserts_for (tree);
4580 void dump_all_asserts (FILE *);
4581 void debug_all_asserts (void);
4583 /* Dump all the registered assertions for NAME to FILE. */
4585 void
4586 dump_asserts_for (FILE *file, tree name)
4588 assert_locus *loc;
4590 fprintf (file, "Assertions to be inserted for ");
4591 print_generic_expr (file, name);
4592 fprintf (file, "\n");
4594 loc = asserts_for[SSA_NAME_VERSION (name)];
4595 while (loc)
4597 fprintf (file, "\t");
4598 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
4599 fprintf (file, "\n\tBB #%d", loc->bb->index);
4600 if (loc->e)
4602 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4603 loc->e->dest->index);
4604 dump_edge_info (file, loc->e, dump_flags, 0);
4606 fprintf (file, "\n\tPREDICATE: ");
4607 print_generic_expr (file, loc->expr);
4608 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4609 print_generic_expr (file, loc->val);
4610 fprintf (file, "\n\n");
4611 loc = loc->next;
4614 fprintf (file, "\n");
4618 /* Dump all the registered assertions for NAME to stderr. */
4620 DEBUG_FUNCTION void
4621 debug_asserts_for (tree name)
4623 dump_asserts_for (stderr, name);
4627 /* Dump all the registered assertions for all the names to FILE. */
4629 void
4630 dump_all_asserts (FILE *file)
4632 unsigned i;
4633 bitmap_iterator bi;
4635 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4636 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4637 dump_asserts_for (file, ssa_name (i));
4638 fprintf (file, "\n");
4642 /* Dump all the registered assertions for all the names to stderr. */
4644 DEBUG_FUNCTION void
4645 debug_all_asserts (void)
4647 dump_all_asserts (stderr);
4650 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
4652 static void
4653 add_assert_info (vec<assert_info> &asserts,
4654 tree name, tree expr, enum tree_code comp_code, tree val)
4656 assert_info info;
4657 info.comp_code = comp_code;
4658 info.name = name;
4659 info.val = val;
4660 info.expr = expr;
4661 asserts.safe_push (info);
4664 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4665 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4666 E->DEST, then register this location as a possible insertion point
4667 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4669 BB, E and SI provide the exact insertion point for the new
4670 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4671 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4672 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4673 must not be NULL. */
4675 static void
4676 register_new_assert_for (tree name, tree expr,
4677 enum tree_code comp_code,
4678 tree val,
4679 basic_block bb,
4680 edge e,
4681 gimple_stmt_iterator si)
4683 assert_locus *n, *loc, *last_loc;
4684 basic_block dest_bb;
4686 gcc_checking_assert (bb == NULL || e == NULL);
4688 if (e == NULL)
4689 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4690 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4692 /* Never build an assert comparing against an integer constant with
4693 TREE_OVERFLOW set. This confuses our undefined overflow warning
4694 machinery. */
4695 if (TREE_OVERFLOW_P (val))
4696 val = drop_tree_overflow (val);
4698 /* The new assertion A will be inserted at BB or E. We need to
4699 determine if the new location is dominated by a previously
4700 registered location for A. If we are doing an edge insertion,
4701 assume that A will be inserted at E->DEST. Note that this is not
4702 necessarily true.
4704 If E is a critical edge, it will be split. But even if E is
4705 split, the new block will dominate the same set of blocks that
4706 E->DEST dominates.
4708 The reverse, however, is not true, blocks dominated by E->DEST
4709 will not be dominated by the new block created to split E. So,
4710 if the insertion location is on a critical edge, we will not use
4711 the new location to move another assertion previously registered
4712 at a block dominated by E->DEST. */
4713 dest_bb = (bb) ? bb : e->dest;
4715 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4716 VAL at a block dominating DEST_BB, then we don't need to insert a new
4717 one. Similarly, if the same assertion already exists at a block
4718 dominated by DEST_BB and the new location is not on a critical
4719 edge, then update the existing location for the assertion (i.e.,
4720 move the assertion up in the dominance tree).
4722 Note, this is implemented as a simple linked list because there
4723 should not be more than a handful of assertions registered per
4724 name. If this becomes a performance problem, a table hashed by
4725 COMP_CODE and VAL could be implemented. */
4726 loc = asserts_for[SSA_NAME_VERSION (name)];
4727 last_loc = loc;
4728 while (loc)
4730 if (loc->comp_code == comp_code
4731 && (loc->val == val
4732 || operand_equal_p (loc->val, val, 0))
4733 && (loc->expr == expr
4734 || operand_equal_p (loc->expr, expr, 0)))
4736 /* If E is not a critical edge and DEST_BB
4737 dominates the existing location for the assertion, move
4738 the assertion up in the dominance tree by updating its
4739 location information. */
4740 if ((e == NULL || !EDGE_CRITICAL_P (e))
4741 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4743 loc->bb = dest_bb;
4744 loc->e = e;
4745 loc->si = si;
4746 return;
4750 /* Update the last node of the list and move to the next one. */
4751 last_loc = loc;
4752 loc = loc->next;
4755 /* If we didn't find an assertion already registered for
4756 NAME COMP_CODE VAL, add a new one at the end of the list of
4757 assertions associated with NAME. */
4758 n = XNEW (struct assert_locus);
4759 n->bb = dest_bb;
4760 n->e = e;
4761 n->si = si;
4762 n->comp_code = comp_code;
4763 n->val = val;
4764 n->expr = expr;
4765 n->next = NULL;
4767 if (last_loc)
4768 last_loc->next = n;
4769 else
4770 asserts_for[SSA_NAME_VERSION (name)] = n;
4772 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4775 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4776 Extract a suitable test code and value and store them into *CODE_P and
4777 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4779 If no extraction was possible, return FALSE, otherwise return TRUE.
4781 If INVERT is true, then we invert the result stored into *CODE_P. */
4783 static bool
4784 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4785 tree cond_op0, tree cond_op1,
4786 bool invert, enum tree_code *code_p,
4787 tree *val_p)
4789 enum tree_code comp_code;
4790 tree val;
4792 /* Otherwise, we have a comparison of the form NAME COMP VAL
4793 or VAL COMP NAME. */
4794 if (name == cond_op1)
4796 /* If the predicate is of the form VAL COMP NAME, flip
4797 COMP around because we need to register NAME as the
4798 first operand in the predicate. */
4799 comp_code = swap_tree_comparison (cond_code);
4800 val = cond_op0;
4802 else if (name == cond_op0)
4804 /* The comparison is of the form NAME COMP VAL, so the
4805 comparison code remains unchanged. */
4806 comp_code = cond_code;
4807 val = cond_op1;
4809 else
4810 gcc_unreachable ();
4812 /* Invert the comparison code as necessary. */
4813 if (invert)
4814 comp_code = invert_tree_comparison (comp_code, 0);
4816 /* VRP only handles integral and pointer types. */
4817 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
4818 && ! POINTER_TYPE_P (TREE_TYPE (val)))
4819 return false;
4821 /* Do not register always-false predicates.
4822 FIXME: this works around a limitation in fold() when dealing with
4823 enumerations. Given 'enum { N1, N2 } x;', fold will not
4824 fold 'if (x > N2)' to 'if (0)'. */
4825 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4826 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4828 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4829 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4831 if (comp_code == GT_EXPR
4832 && (!max
4833 || compare_values (val, max) == 0))
4834 return false;
4836 if (comp_code == LT_EXPR
4837 && (!min
4838 || compare_values (val, min) == 0))
4839 return false;
4841 *code_p = comp_code;
4842 *val_p = val;
4843 return true;
4846 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4847 (otherwise return VAL). VAL and MASK must be zero-extended for
4848 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4849 (to transform signed values into unsigned) and at the end xor
4850 SGNBIT back. */
4852 static wide_int
4853 masked_increment (const wide_int &val_in, const wide_int &mask,
4854 const wide_int &sgnbit, unsigned int prec)
4856 wide_int bit = wi::one (prec), res;
4857 unsigned int i;
4859 wide_int val = val_in ^ sgnbit;
4860 for (i = 0; i < prec; i++, bit += bit)
4862 res = mask;
4863 if ((res & bit) == 0)
4864 continue;
4865 res = bit - 1;
4866 res = (val + bit).and_not (res);
4867 res &= mask;
4868 if (wi::gtu_p (res, val))
4869 return res ^ sgnbit;
4871 return val ^ sgnbit;
4874 /* Helper for overflow_comparison_p
4876 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
4877 OP1's defining statement to see if it ultimately has the form
4878 OP0 CODE (OP0 PLUS INTEGER_CST)
4880 If so, return TRUE indicating this is an overflow test and store into
4881 *NEW_CST an updated constant that can be used in a narrowed range test.
4883 REVERSED indicates if the comparison was originally:
4885 OP1 CODE' OP0.
4887 This affects how we build the updated constant. */
4889 static bool
4890 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
4891 bool follow_assert_exprs, bool reversed, tree *new_cst)
4893 /* See if this is a relational operation between two SSA_NAMES with
4894 unsigned, overflow wrapping values. If so, check it more deeply. */
4895 if ((code == LT_EXPR || code == LE_EXPR
4896 || code == GE_EXPR || code == GT_EXPR)
4897 && TREE_CODE (op0) == SSA_NAME
4898 && TREE_CODE (op1) == SSA_NAME
4899 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
4900 && TYPE_UNSIGNED (TREE_TYPE (op0))
4901 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
4903 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
4905 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
4906 if (follow_assert_exprs)
4908 while (gimple_assign_single_p (op1_def)
4909 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
4911 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
4912 if (TREE_CODE (op1) != SSA_NAME)
4913 break;
4914 op1_def = SSA_NAME_DEF_STMT (op1);
4918 /* Now look at the defining statement of OP1 to see if it adds
4919 or subtracts a nonzero constant from another operand. */
4920 if (op1_def
4921 && is_gimple_assign (op1_def)
4922 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
4923 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
4924 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
4926 tree target = gimple_assign_rhs1 (op1_def);
4928 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
4929 for one where TARGET appears on the RHS. */
4930 if (follow_assert_exprs)
4932 /* Now see if that "other operand" is op0, following the chain
4933 of ASSERT_EXPRs if necessary. */
4934 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
4935 while (op0 != target
4936 && gimple_assign_single_p (op0_def)
4937 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
4939 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
4940 if (TREE_CODE (op0) != SSA_NAME)
4941 break;
4942 op0_def = SSA_NAME_DEF_STMT (op0);
4946 /* If we did not find our target SSA_NAME, then this is not
4947 an overflow test. */
4948 if (op0 != target)
4949 return false;
4951 tree type = TREE_TYPE (op0);
4952 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
4953 tree inc = gimple_assign_rhs2 (op1_def);
4954 if (reversed)
4955 *new_cst = wide_int_to_tree (type, max + inc);
4956 else
4957 *new_cst = wide_int_to_tree (type, max - inc);
4958 return true;
4961 return false;
4964 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
4965 OP1's defining statement to see if it ultimately has the form
4966 OP0 CODE (OP0 PLUS INTEGER_CST)
4968 If so, return TRUE indicating this is an overflow test and store into
4969 *NEW_CST an updated constant that can be used in a narrowed range test.
4971 These statements are left as-is in the IL to facilitate discovery of
4972 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
4973 the alternate range representation is often useful within VRP. */
4975 static bool
4976 overflow_comparison_p (tree_code code, tree name, tree val,
4977 bool use_equiv_p, tree *new_cst)
4979 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
4980 return true;
4981 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
4982 use_equiv_p, true, new_cst);
4986 /* Try to register an edge assertion for SSA name NAME on edge E for
4987 the condition COND contributing to the conditional jump pointed to by BSI.
4988 Invert the condition COND if INVERT is true. */
4990 static void
4991 register_edge_assert_for_2 (tree name, edge e,
4992 enum tree_code cond_code,
4993 tree cond_op0, tree cond_op1, bool invert,
4994 vec<assert_info> &asserts)
4996 tree val;
4997 enum tree_code comp_code;
4999 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5000 cond_op0,
5001 cond_op1,
5002 invert, &comp_code, &val))
5003 return;
5005 /* Queue the assert. */
5006 tree x;
5007 if (overflow_comparison_p (comp_code, name, val, false, &x))
5009 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
5010 ? GT_EXPR : LE_EXPR);
5011 add_assert_info (asserts, name, name, new_code, x);
5013 add_assert_info (asserts, name, name, comp_code, val);
5015 /* In the case of NAME <= CST and NAME being defined as
5016 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5017 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5018 This catches range and anti-range tests. */
5019 if ((comp_code == LE_EXPR
5020 || comp_code == GT_EXPR)
5021 && TREE_CODE (val) == INTEGER_CST
5022 && TYPE_UNSIGNED (TREE_TYPE (val)))
5024 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5025 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5027 /* Extract CST2 from the (optional) addition. */
5028 if (is_gimple_assign (def_stmt)
5029 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5031 name2 = gimple_assign_rhs1 (def_stmt);
5032 cst2 = gimple_assign_rhs2 (def_stmt);
5033 if (TREE_CODE (name2) == SSA_NAME
5034 && TREE_CODE (cst2) == INTEGER_CST)
5035 def_stmt = SSA_NAME_DEF_STMT (name2);
5038 /* Extract NAME2 from the (optional) sign-changing cast. */
5039 if (gimple_assign_cast_p (def_stmt))
5041 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5042 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5043 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5044 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5045 name3 = gimple_assign_rhs1 (def_stmt);
5048 /* If name3 is used later, create an ASSERT_EXPR for it. */
5049 if (name3 != NULL_TREE
5050 && TREE_CODE (name3) == SSA_NAME
5051 && (cst2 == NULL_TREE
5052 || TREE_CODE (cst2) == INTEGER_CST)
5053 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
5055 tree tmp;
5057 /* Build an expression for the range test. */
5058 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5059 if (cst2 != NULL_TREE)
5060 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5062 if (dump_file)
5064 fprintf (dump_file, "Adding assert for ");
5065 print_generic_expr (dump_file, name3);
5066 fprintf (dump_file, " from ");
5067 print_generic_expr (dump_file, tmp);
5068 fprintf (dump_file, "\n");
5071 add_assert_info (asserts, name3, tmp, comp_code, val);
5074 /* If name2 is used later, create an ASSERT_EXPR for it. */
5075 if (name2 != NULL_TREE
5076 && TREE_CODE (name2) == SSA_NAME
5077 && TREE_CODE (cst2) == INTEGER_CST
5078 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
5080 tree tmp;
5082 /* Build an expression for the range test. */
5083 tmp = name2;
5084 if (TREE_TYPE (name) != TREE_TYPE (name2))
5085 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5086 if (cst2 != NULL_TREE)
5087 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5089 if (dump_file)
5091 fprintf (dump_file, "Adding assert for ");
5092 print_generic_expr (dump_file, name2);
5093 fprintf (dump_file, " from ");
5094 print_generic_expr (dump_file, tmp);
5095 fprintf (dump_file, "\n");
5098 add_assert_info (asserts, name2, tmp, comp_code, val);
5102 /* In the case of post-in/decrement tests like if (i++) ... and uses
5103 of the in/decremented value on the edge the extra name we want to
5104 assert for is not on the def chain of the name compared. Instead
5105 it is in the set of use stmts.
5106 Similar cases happen for conversions that were simplified through
5107 fold_{sign_changed,widened}_comparison. */
5108 if ((comp_code == NE_EXPR
5109 || comp_code == EQ_EXPR)
5110 && TREE_CODE (val) == INTEGER_CST)
5112 imm_use_iterator ui;
5113 gimple *use_stmt;
5114 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5116 if (!is_gimple_assign (use_stmt))
5117 continue;
5119 /* Cut off to use-stmts that are dominating the predecessor. */
5120 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5121 continue;
5123 tree name2 = gimple_assign_lhs (use_stmt);
5124 if (TREE_CODE (name2) != SSA_NAME)
5125 continue;
5127 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5128 tree cst;
5129 if (code == PLUS_EXPR
5130 || code == MINUS_EXPR)
5132 cst = gimple_assign_rhs2 (use_stmt);
5133 if (TREE_CODE (cst) != INTEGER_CST)
5134 continue;
5135 cst = int_const_binop (code, val, cst);
5137 else if (CONVERT_EXPR_CODE_P (code))
5139 /* For truncating conversions we cannot record
5140 an inequality. */
5141 if (comp_code == NE_EXPR
5142 && (TYPE_PRECISION (TREE_TYPE (name2))
5143 < TYPE_PRECISION (TREE_TYPE (name))))
5144 continue;
5145 cst = fold_convert (TREE_TYPE (name2), val);
5147 else
5148 continue;
5150 if (TREE_OVERFLOW_P (cst))
5151 cst = drop_tree_overflow (cst);
5152 add_assert_info (asserts, name2, name2, comp_code, cst);
5156 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5157 && TREE_CODE (val) == INTEGER_CST)
5159 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5160 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5161 tree val2 = NULL_TREE;
5162 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5163 wide_int mask = wi::zero (prec);
5164 unsigned int nprec = prec;
5165 enum tree_code rhs_code = ERROR_MARK;
5167 if (is_gimple_assign (def_stmt))
5168 rhs_code = gimple_assign_rhs_code (def_stmt);
5170 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5171 assert that A != CST1 -+ CST2. */
5172 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5173 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5175 tree op0 = gimple_assign_rhs1 (def_stmt);
5176 tree op1 = gimple_assign_rhs2 (def_stmt);
5177 if (TREE_CODE (op0) == SSA_NAME
5178 && TREE_CODE (op1) == INTEGER_CST)
5180 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5181 ? MINUS_EXPR : PLUS_EXPR);
5182 op1 = int_const_binop (reverse_op, val, op1);
5183 if (TREE_OVERFLOW (op1))
5184 op1 = drop_tree_overflow (op1);
5185 add_assert_info (asserts, op0, op0, comp_code, op1);
5189 /* Add asserts for NAME cmp CST and NAME being defined
5190 as NAME = (int) NAME2. */
5191 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5192 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5193 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5194 && gimple_assign_cast_p (def_stmt))
5196 name2 = gimple_assign_rhs1 (def_stmt);
5197 if (CONVERT_EXPR_CODE_P (rhs_code)
5198 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5199 && TYPE_UNSIGNED (TREE_TYPE (name2))
5200 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5201 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5202 || !tree_int_cst_equal (val,
5203 TYPE_MIN_VALUE (TREE_TYPE (val)))))
5205 tree tmp, cst;
5206 enum tree_code new_comp_code = comp_code;
5208 cst = fold_convert (TREE_TYPE (name2),
5209 TYPE_MIN_VALUE (TREE_TYPE (val)));
5210 /* Build an expression for the range test. */
5211 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5212 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5213 fold_convert (TREE_TYPE (name2), val));
5214 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5216 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5217 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5218 build_int_cst (TREE_TYPE (name2), 1));
5221 if (dump_file)
5223 fprintf (dump_file, "Adding assert for ");
5224 print_generic_expr (dump_file, name2);
5225 fprintf (dump_file, " from ");
5226 print_generic_expr (dump_file, tmp);
5227 fprintf (dump_file, "\n");
5230 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
5234 /* Add asserts for NAME cmp CST and NAME being defined as
5235 NAME = NAME2 >> CST2.
5237 Extract CST2 from the right shift. */
5238 if (rhs_code == RSHIFT_EXPR)
5240 name2 = gimple_assign_rhs1 (def_stmt);
5241 cst2 = gimple_assign_rhs2 (def_stmt);
5242 if (TREE_CODE (name2) == SSA_NAME
5243 && tree_fits_uhwi_p (cst2)
5244 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5245 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5246 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))))
5248 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5249 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5252 if (val2 != NULL_TREE
5253 && TREE_CODE (val2) == INTEGER_CST
5254 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5255 TREE_TYPE (val),
5256 val2, cst2), val))
5258 enum tree_code new_comp_code = comp_code;
5259 tree tmp, new_val;
5261 tmp = name2;
5262 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5264 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5266 tree type = build_nonstandard_integer_type (prec, 1);
5267 tmp = build1 (NOP_EXPR, type, name2);
5268 val2 = fold_convert (type, val2);
5270 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5271 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5272 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5274 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5276 wide_int minval
5277 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5278 new_val = val2;
5279 if (minval == new_val)
5280 new_val = NULL_TREE;
5282 else
5284 wide_int maxval
5285 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5286 mask |= val2;
5287 if (mask == maxval)
5288 new_val = NULL_TREE;
5289 else
5290 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5293 if (new_val)
5295 if (dump_file)
5297 fprintf (dump_file, "Adding assert for ");
5298 print_generic_expr (dump_file, name2);
5299 fprintf (dump_file, " from ");
5300 print_generic_expr (dump_file, tmp);
5301 fprintf (dump_file, "\n");
5304 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
5308 /* Add asserts for NAME cmp CST and NAME being defined as
5309 NAME = NAME2 & CST2.
5311 Extract CST2 from the and.
5313 Also handle
5314 NAME = (unsigned) NAME2;
5315 casts where NAME's type is unsigned and has smaller precision
5316 than NAME2's type as if it was NAME = NAME2 & MASK. */
5317 names[0] = NULL_TREE;
5318 names[1] = NULL_TREE;
5319 cst2 = NULL_TREE;
5320 if (rhs_code == BIT_AND_EXPR
5321 || (CONVERT_EXPR_CODE_P (rhs_code)
5322 && INTEGRAL_TYPE_P (TREE_TYPE (val))
5323 && TYPE_UNSIGNED (TREE_TYPE (val))
5324 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5325 > prec))
5327 name2 = gimple_assign_rhs1 (def_stmt);
5328 if (rhs_code == BIT_AND_EXPR)
5329 cst2 = gimple_assign_rhs2 (def_stmt);
5330 else
5332 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5333 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5335 if (TREE_CODE (name2) == SSA_NAME
5336 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5337 && TREE_CODE (cst2) == INTEGER_CST
5338 && !integer_zerop (cst2)
5339 && (nprec > 1
5340 || TYPE_UNSIGNED (TREE_TYPE (val))))
5342 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5343 if (gimple_assign_cast_p (def_stmt2))
5345 names[1] = gimple_assign_rhs1 (def_stmt2);
5346 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5347 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5348 || (TYPE_PRECISION (TREE_TYPE (name2))
5349 != TYPE_PRECISION (TREE_TYPE (names[1]))))
5350 names[1] = NULL_TREE;
5352 names[0] = name2;
5355 if (names[0] || names[1])
5357 wide_int minv, maxv, valv, cst2v;
5358 wide_int tem, sgnbit;
5359 bool valid_p = false, valn, cst2n;
5360 enum tree_code ccode = comp_code;
5362 valv = wide_int::from (val, nprec, UNSIGNED);
5363 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5364 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5365 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5366 /* If CST2 doesn't have most significant bit set,
5367 but VAL is negative, we have comparison like
5368 if ((x & 0x123) > -4) (always true). Just give up. */
5369 if (!cst2n && valn)
5370 ccode = ERROR_MARK;
5371 if (cst2n)
5372 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5373 else
5374 sgnbit = wi::zero (nprec);
5375 minv = valv & cst2v;
5376 switch (ccode)
5378 case EQ_EXPR:
5379 /* Minimum unsigned value for equality is VAL & CST2
5380 (should be equal to VAL, otherwise we probably should
5381 have folded the comparison into false) and
5382 maximum unsigned value is VAL | ~CST2. */
5383 maxv = valv | ~cst2v;
5384 valid_p = true;
5385 break;
5387 case NE_EXPR:
5388 tem = valv | ~cst2v;
5389 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5390 if (valv == 0)
5392 cst2n = false;
5393 sgnbit = wi::zero (nprec);
5394 goto gt_expr;
5396 /* If (VAL | ~CST2) is all ones, handle it as
5397 (X & CST2) < VAL. */
5398 if (tem == -1)
5400 cst2n = false;
5401 valn = false;
5402 sgnbit = wi::zero (nprec);
5403 goto lt_expr;
5405 if (!cst2n && wi::neg_p (cst2v))
5406 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5407 if (sgnbit != 0)
5409 if (valv == sgnbit)
5411 cst2n = true;
5412 valn = true;
5413 goto gt_expr;
5415 if (tem == wi::mask (nprec - 1, false, nprec))
5417 cst2n = true;
5418 goto lt_expr;
5420 if (!cst2n)
5421 sgnbit = wi::zero (nprec);
5423 break;
5425 case GE_EXPR:
5426 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5427 is VAL and maximum unsigned value is ~0. For signed
5428 comparison, if CST2 doesn't have most significant bit
5429 set, handle it similarly. If CST2 has MSB set,
5430 the minimum is the same, and maximum is ~0U/2. */
5431 if (minv != valv)
5433 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5434 VAL. */
5435 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5436 if (minv == valv)
5437 break;
5439 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5440 valid_p = true;
5441 break;
5443 case GT_EXPR:
5444 gt_expr:
5445 /* Find out smallest MINV where MINV > VAL
5446 && (MINV & CST2) == MINV, if any. If VAL is signed and
5447 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5448 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5449 if (minv == valv)
5450 break;
5451 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5452 valid_p = true;
5453 break;
5455 case LE_EXPR:
5456 /* Minimum unsigned value for <= is 0 and maximum
5457 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5458 Otherwise, find smallest VAL2 where VAL2 > VAL
5459 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5460 as maximum.
5461 For signed comparison, if CST2 doesn't have most
5462 significant bit set, handle it similarly. If CST2 has
5463 MSB set, the maximum is the same and minimum is INT_MIN. */
5464 if (minv == valv)
5465 maxv = valv;
5466 else
5468 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5469 if (maxv == valv)
5470 break;
5471 maxv -= 1;
5473 maxv |= ~cst2v;
5474 minv = sgnbit;
5475 valid_p = true;
5476 break;
5478 case LT_EXPR:
5479 lt_expr:
5480 /* Minimum unsigned value for < is 0 and maximum
5481 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5482 Otherwise, find smallest VAL2 where VAL2 > VAL
5483 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5484 as maximum.
5485 For signed comparison, if CST2 doesn't have most
5486 significant bit set, handle it similarly. If CST2 has
5487 MSB set, the maximum is the same and minimum is INT_MIN. */
5488 if (minv == valv)
5490 if (valv == sgnbit)
5491 break;
5492 maxv = valv;
5494 else
5496 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5497 if (maxv == valv)
5498 break;
5500 maxv -= 1;
5501 maxv |= ~cst2v;
5502 minv = sgnbit;
5503 valid_p = true;
5504 break;
5506 default:
5507 break;
5509 if (valid_p
5510 && (maxv - minv) != -1)
5512 tree tmp, new_val, type;
5513 int i;
5515 for (i = 0; i < 2; i++)
5516 if (names[i])
5518 wide_int maxv2 = maxv;
5519 tmp = names[i];
5520 type = TREE_TYPE (names[i]);
5521 if (!TYPE_UNSIGNED (type))
5523 type = build_nonstandard_integer_type (nprec, 1);
5524 tmp = build1 (NOP_EXPR, type, names[i]);
5526 if (minv != 0)
5528 tmp = build2 (PLUS_EXPR, type, tmp,
5529 wide_int_to_tree (type, -minv));
5530 maxv2 = maxv - minv;
5532 new_val = wide_int_to_tree (type, maxv2);
5534 if (dump_file)
5536 fprintf (dump_file, "Adding assert for ");
5537 print_generic_expr (dump_file, names[i]);
5538 fprintf (dump_file, " from ");
5539 print_generic_expr (dump_file, tmp);
5540 fprintf (dump_file, "\n");
5543 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
5550 /* OP is an operand of a truth value expression which is known to have
5551 a particular value. Register any asserts for OP and for any
5552 operands in OP's defining statement.
5554 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5555 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5557 static void
5558 register_edge_assert_for_1 (tree op, enum tree_code code,
5559 edge e, vec<assert_info> &asserts)
5561 gimple *op_def;
5562 tree val;
5563 enum tree_code rhs_code;
5565 /* We only care about SSA_NAMEs. */
5566 if (TREE_CODE (op) != SSA_NAME)
5567 return;
5569 /* We know that OP will have a zero or nonzero value. */
5570 val = build_int_cst (TREE_TYPE (op), 0);
5571 add_assert_info (asserts, op, op, code, val);
5573 /* Now look at how OP is set. If it's set from a comparison,
5574 a truth operation or some bit operations, then we may be able
5575 to register information about the operands of that assignment. */
5576 op_def = SSA_NAME_DEF_STMT (op);
5577 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5578 return;
5580 rhs_code = gimple_assign_rhs_code (op_def);
5582 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5584 bool invert = (code == EQ_EXPR ? true : false);
5585 tree op0 = gimple_assign_rhs1 (op_def);
5586 tree op1 = gimple_assign_rhs2 (op_def);
5588 if (TREE_CODE (op0) == SSA_NAME)
5589 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
5590 if (TREE_CODE (op1) == SSA_NAME)
5591 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
5593 else if ((code == NE_EXPR
5594 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5595 || (code == EQ_EXPR
5596 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5598 /* Recurse on each operand. */
5599 tree op0 = gimple_assign_rhs1 (op_def);
5600 tree op1 = gimple_assign_rhs2 (op_def);
5601 if (TREE_CODE (op0) == SSA_NAME
5602 && has_single_use (op0))
5603 register_edge_assert_for_1 (op0, code, e, asserts);
5604 if (TREE_CODE (op1) == SSA_NAME
5605 && has_single_use (op1))
5606 register_edge_assert_for_1 (op1, code, e, asserts);
5608 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5609 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5611 /* Recurse, flipping CODE. */
5612 code = invert_tree_comparison (code, false);
5613 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
5615 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5617 /* Recurse through the copy. */
5618 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
5620 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5622 /* Recurse through the type conversion, unless it is a narrowing
5623 conversion or conversion from non-integral type. */
5624 tree rhs = gimple_assign_rhs1 (op_def);
5625 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5626 && (TYPE_PRECISION (TREE_TYPE (rhs))
5627 <= TYPE_PRECISION (TREE_TYPE (op))))
5628 register_edge_assert_for_1 (rhs, code, e, asserts);
5632 /* Check if comparison
5633 NAME COND_OP INTEGER_CST
5634 has a form of
5635 (X & 11...100..0) COND_OP XX...X00...0
5636 Such comparison can yield assertions like
5637 X >= XX...X00...0
5638 X <= XX...X11...1
5639 in case of COND_OP being NE_EXPR or
5640 X < XX...X00...0
5641 X > XX...X11...1
5642 in case of EQ_EXPR. */
5644 static bool
5645 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
5646 tree *new_name, tree *low, enum tree_code *low_code,
5647 tree *high, enum tree_code *high_code)
5649 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5651 if (!is_gimple_assign (def_stmt)
5652 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
5653 return false;
5655 tree t = gimple_assign_rhs1 (def_stmt);
5656 tree maskt = gimple_assign_rhs2 (def_stmt);
5657 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
5658 return false;
5660 wide_int mask = maskt;
5661 wide_int inv_mask = ~mask;
5662 wide_int val = valt; // Assume VALT is INTEGER_CST
5664 if ((inv_mask & (inv_mask + 1)) != 0
5665 || (val & mask) != val)
5666 return false;
5668 bool is_range = cond_code == EQ_EXPR;
5670 tree type = TREE_TYPE (t);
5671 wide_int min = wi::min_value (type),
5672 max = wi::max_value (type);
5674 if (is_range)
5676 *low_code = val == min ? ERROR_MARK : GE_EXPR;
5677 *high_code = val == max ? ERROR_MARK : LE_EXPR;
5679 else
5681 /* We can still generate assertion if one of alternatives
5682 is known to always be false. */
5683 if (val == min)
5685 *low_code = (enum tree_code) 0;
5686 *high_code = GT_EXPR;
5688 else if ((val | inv_mask) == max)
5690 *low_code = LT_EXPR;
5691 *high_code = (enum tree_code) 0;
5693 else
5694 return false;
5697 *new_name = t;
5698 *low = wide_int_to_tree (type, val);
5699 *high = wide_int_to_tree (type, val | inv_mask);
5701 if (wi::neg_p (val, TYPE_SIGN (type)))
5702 std::swap (*low, *high);
5704 return true;
5707 /* Try to register an edge assertion for SSA name NAME on edge E for
5708 the condition COND contributing to the conditional jump pointed to by
5709 SI. */
5711 static void
5712 register_edge_assert_for (tree name, edge e,
5713 enum tree_code cond_code, tree cond_op0,
5714 tree cond_op1, vec<assert_info> &asserts)
5716 tree val;
5717 enum tree_code comp_code;
5718 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5720 /* Do not attempt to infer anything in names that flow through
5721 abnormal edges. */
5722 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5723 return;
5725 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5726 cond_op0, cond_op1,
5727 is_else_edge,
5728 &comp_code, &val))
5729 return;
5731 /* Register ASSERT_EXPRs for name. */
5732 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
5733 cond_op1, is_else_edge, asserts);
5736 /* If COND is effectively an equality test of an SSA_NAME against
5737 the value zero or one, then we may be able to assert values
5738 for SSA_NAMEs which flow into COND. */
5740 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5741 statement of NAME we can assert both operands of the BIT_AND_EXPR
5742 have nonzero value. */
5743 if (((comp_code == EQ_EXPR && integer_onep (val))
5744 || (comp_code == NE_EXPR && integer_zerop (val))))
5746 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5748 if (is_gimple_assign (def_stmt)
5749 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5751 tree op0 = gimple_assign_rhs1 (def_stmt);
5752 tree op1 = gimple_assign_rhs2 (def_stmt);
5753 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
5754 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
5758 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5759 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5760 have zero value. */
5761 if (((comp_code == EQ_EXPR && integer_zerop (val))
5762 || (comp_code == NE_EXPR && integer_onep (val))))
5764 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5766 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5767 necessarily zero value, or if type-precision is one. */
5768 if (is_gimple_assign (def_stmt)
5769 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5770 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5771 || comp_code == EQ_EXPR)))
5773 tree op0 = gimple_assign_rhs1 (def_stmt);
5774 tree op1 = gimple_assign_rhs2 (def_stmt);
5775 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
5776 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
5780 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
5781 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5782 && TREE_CODE (val) == INTEGER_CST)
5784 enum tree_code low_code, high_code;
5785 tree low, high;
5786 if (is_masked_range_test (name, val, comp_code, &name, &low,
5787 &low_code, &high, &high_code))
5789 if (low_code != ERROR_MARK)
5790 register_edge_assert_for_2 (name, e, low_code, name,
5791 low, /*invert*/false, asserts);
5792 if (high_code != ERROR_MARK)
5793 register_edge_assert_for_2 (name, e, high_code, name,
5794 high, /*invert*/false, asserts);
5799 /* Finish found ASSERTS for E and register them at GSI. */
5801 static void
5802 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
5803 vec<assert_info> &asserts)
5805 for (unsigned i = 0; i < asserts.length (); ++i)
5806 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5807 reachable from E. */
5808 if (live_on_edge (e, asserts[i].name))
5809 register_new_assert_for (asserts[i].name, asserts[i].expr,
5810 asserts[i].comp_code, asserts[i].val,
5811 NULL, e, gsi);
5816 /* Determine whether the outgoing edges of BB should receive an
5817 ASSERT_EXPR for each of the operands of BB's LAST statement.
5818 The last statement of BB must be a COND_EXPR.
5820 If any of the sub-graphs rooted at BB have an interesting use of
5821 the predicate operands, an assert location node is added to the
5822 list of assertions for the corresponding operands. */
5824 static void
5825 find_conditional_asserts (basic_block bb, gcond *last)
5827 gimple_stmt_iterator bsi;
5828 tree op;
5829 edge_iterator ei;
5830 edge e;
5831 ssa_op_iter iter;
5833 bsi = gsi_for_stmt (last);
5835 /* Look for uses of the operands in each of the sub-graphs
5836 rooted at BB. We need to check each of the outgoing edges
5837 separately, so that we know what kind of ASSERT_EXPR to
5838 insert. */
5839 FOR_EACH_EDGE (e, ei, bb->succs)
5841 if (e->dest == bb)
5842 continue;
5844 /* Register the necessary assertions for each operand in the
5845 conditional predicate. */
5846 auto_vec<assert_info, 8> asserts;
5847 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5848 register_edge_assert_for (op, e,
5849 gimple_cond_code (last),
5850 gimple_cond_lhs (last),
5851 gimple_cond_rhs (last), asserts);
5852 finish_register_edge_assert_for (e, bsi, asserts);
5856 struct case_info
5858 tree expr;
5859 basic_block bb;
5862 /* Compare two case labels sorting first by the destination bb index
5863 and then by the case value. */
5865 static int
5866 compare_case_labels (const void *p1, const void *p2)
5868 const struct case_info *ci1 = (const struct case_info *) p1;
5869 const struct case_info *ci2 = (const struct case_info *) p2;
5870 int idx1 = ci1->bb->index;
5871 int idx2 = ci2->bb->index;
5873 if (idx1 < idx2)
5874 return -1;
5875 else if (idx1 == idx2)
5877 /* Make sure the default label is first in a group. */
5878 if (!CASE_LOW (ci1->expr))
5879 return -1;
5880 else if (!CASE_LOW (ci2->expr))
5881 return 1;
5882 else
5883 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5884 CASE_LOW (ci2->expr));
5886 else
5887 return 1;
5890 /* Determine whether the outgoing edges of BB should receive an
5891 ASSERT_EXPR for each of the operands of BB's LAST statement.
5892 The last statement of BB must be a SWITCH_EXPR.
5894 If any of the sub-graphs rooted at BB have an interesting use of
5895 the predicate operands, an assert location node is added to the
5896 list of assertions for the corresponding operands. */
5898 static void
5899 find_switch_asserts (basic_block bb, gswitch *last)
5901 gimple_stmt_iterator bsi;
5902 tree op;
5903 edge e;
5904 struct case_info *ci;
5905 size_t n = gimple_switch_num_labels (last);
5906 #if GCC_VERSION >= 4000
5907 unsigned int idx;
5908 #else
5909 /* Work around GCC 3.4 bug (PR 37086). */
5910 volatile unsigned int idx;
5911 #endif
5913 bsi = gsi_for_stmt (last);
5914 op = gimple_switch_index (last);
5915 if (TREE_CODE (op) != SSA_NAME)
5916 return;
5918 /* Build a vector of case labels sorted by destination label. */
5919 ci = XNEWVEC (struct case_info, n);
5920 for (idx = 0; idx < n; ++idx)
5922 ci[idx].expr = gimple_switch_label (last, idx);
5923 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5925 edge default_edge = find_edge (bb, ci[0].bb);
5926 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5928 for (idx = 0; idx < n; ++idx)
5930 tree min, max;
5931 tree cl = ci[idx].expr;
5932 basic_block cbb = ci[idx].bb;
5934 min = CASE_LOW (cl);
5935 max = CASE_HIGH (cl);
5937 /* If there are multiple case labels with the same destination
5938 we need to combine them to a single value range for the edge. */
5939 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5941 /* Skip labels until the last of the group. */
5942 do {
5943 ++idx;
5944 } while (idx < n && cbb == ci[idx].bb);
5945 --idx;
5947 /* Pick up the maximum of the case label range. */
5948 if (CASE_HIGH (ci[idx].expr))
5949 max = CASE_HIGH (ci[idx].expr);
5950 else
5951 max = CASE_LOW (ci[idx].expr);
5954 /* Can't extract a useful assertion out of a range that includes the
5955 default label. */
5956 if (min == NULL_TREE)
5957 continue;
5959 /* Find the edge to register the assert expr on. */
5960 e = find_edge (bb, cbb);
5962 /* Register the necessary assertions for the operand in the
5963 SWITCH_EXPR. */
5964 auto_vec<assert_info, 8> asserts;
5965 register_edge_assert_for (op, e,
5966 max ? GE_EXPR : EQ_EXPR,
5967 op, fold_convert (TREE_TYPE (op), min),
5968 asserts);
5969 if (max)
5970 register_edge_assert_for (op, e, LE_EXPR, op,
5971 fold_convert (TREE_TYPE (op), max),
5972 asserts);
5973 finish_register_edge_assert_for (e, bsi, asserts);
5976 XDELETEVEC (ci);
5978 if (!live_on_edge (default_edge, op))
5979 return;
5981 /* Now register along the default label assertions that correspond to the
5982 anti-range of each label. */
5983 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
5984 if (insertion_limit == 0)
5985 return;
5987 /* We can't do this if the default case shares a label with another case. */
5988 tree default_cl = gimple_switch_default_label (last);
5989 for (idx = 1; idx < n; idx++)
5991 tree min, max;
5992 tree cl = gimple_switch_label (last, idx);
5993 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
5994 continue;
5996 min = CASE_LOW (cl);
5997 max = CASE_HIGH (cl);
5999 /* Combine contiguous case ranges to reduce the number of assertions
6000 to insert. */
6001 for (idx = idx + 1; idx < n; idx++)
6003 tree next_min, next_max;
6004 tree next_cl = gimple_switch_label (last, idx);
6005 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
6006 break;
6008 next_min = CASE_LOW (next_cl);
6009 next_max = CASE_HIGH (next_cl);
6011 wide_int difference = wi::sub (next_min, max ? max : min);
6012 if (wi::eq_p (difference, 1))
6013 max = next_max ? next_max : next_min;
6014 else
6015 break;
6017 idx--;
6019 if (max == NULL_TREE)
6021 /* Register the assertion OP != MIN. */
6022 auto_vec<assert_info, 8> asserts;
6023 min = fold_convert (TREE_TYPE (op), min);
6024 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
6025 asserts);
6026 finish_register_edge_assert_for (default_edge, bsi, asserts);
6028 else
6030 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
6031 which will give OP the anti-range ~[MIN,MAX]. */
6032 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
6033 min = fold_convert (TREE_TYPE (uop), min);
6034 max = fold_convert (TREE_TYPE (uop), max);
6036 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
6037 tree rhs = int_const_binop (MINUS_EXPR, max, min);
6038 register_new_assert_for (op, lhs, GT_EXPR, rhs,
6039 NULL, default_edge, bsi);
6042 if (--insertion_limit == 0)
6043 break;
6048 /* Traverse all the statements in block BB looking for statements that
6049 may generate useful assertions for the SSA names in their operand.
6050 If a statement produces a useful assertion A for name N_i, then the
6051 list of assertions already generated for N_i is scanned to
6052 determine if A is actually needed.
6054 If N_i already had the assertion A at a location dominating the
6055 current location, then nothing needs to be done. Otherwise, the
6056 new location for A is recorded instead.
6058 1- For every statement S in BB, all the variables used by S are
6059 added to bitmap FOUND_IN_SUBGRAPH.
6061 2- If statement S uses an operand N in a way that exposes a known
6062 value range for N, then if N was not already generated by an
6063 ASSERT_EXPR, create a new assert location for N. For instance,
6064 if N is a pointer and the statement dereferences it, we can
6065 assume that N is not NULL.
6067 3- COND_EXPRs are a special case of #2. We can derive range
6068 information from the predicate but need to insert different
6069 ASSERT_EXPRs for each of the sub-graphs rooted at the
6070 conditional block. If the last statement of BB is a conditional
6071 expression of the form 'X op Y', then
6073 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6075 b) If the conditional is the only entry point to the sub-graph
6076 corresponding to the THEN_CLAUSE, recurse into it. On
6077 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6078 an ASSERT_EXPR is added for the corresponding variable.
6080 c) Repeat step (b) on the ELSE_CLAUSE.
6082 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6084 For instance,
6086 if (a == 9)
6087 b = a;
6088 else
6089 b = c + 1;
6091 In this case, an assertion on the THEN clause is useful to
6092 determine that 'a' is always 9 on that edge. However, an assertion
6093 on the ELSE clause would be unnecessary.
6095 4- If BB does not end in a conditional expression, then we recurse
6096 into BB's dominator children.
6098 At the end of the recursive traversal, every SSA name will have a
6099 list of locations where ASSERT_EXPRs should be added. When a new
6100 location for name N is found, it is registered by calling
6101 register_new_assert_for. That function keeps track of all the
6102 registered assertions to prevent adding unnecessary assertions.
6103 For instance, if a pointer P_4 is dereferenced more than once in a
6104 dominator tree, only the location dominating all the dereference of
6105 P_4 will receive an ASSERT_EXPR. */
6107 static void
6108 find_assert_locations_1 (basic_block bb, sbitmap live)
6110 gimple *last;
6112 last = last_stmt (bb);
6114 /* If BB's last statement is a conditional statement involving integer
6115 operands, determine if we need to add ASSERT_EXPRs. */
6116 if (last
6117 && gimple_code (last) == GIMPLE_COND
6118 && !fp_predicate (last)
6119 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6120 find_conditional_asserts (bb, as_a <gcond *> (last));
6122 /* If BB's last statement is a switch statement involving integer
6123 operands, determine if we need to add ASSERT_EXPRs. */
6124 if (last
6125 && gimple_code (last) == GIMPLE_SWITCH
6126 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6127 find_switch_asserts (bb, as_a <gswitch *> (last));
6129 /* Traverse all the statements in BB marking used names and looking
6130 for statements that may infer assertions for their used operands. */
6131 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6132 gsi_prev (&si))
6134 gimple *stmt;
6135 tree op;
6136 ssa_op_iter i;
6138 stmt = gsi_stmt (si);
6140 if (is_gimple_debug (stmt))
6141 continue;
6143 /* See if we can derive an assertion for any of STMT's operands. */
6144 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6146 tree value;
6147 enum tree_code comp_code;
6149 /* If op is not live beyond this stmt, do not bother to insert
6150 asserts for it. */
6151 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6152 continue;
6154 /* If OP is used in such a way that we can infer a value
6155 range for it, and we don't find a previous assertion for
6156 it, create a new assertion location node for OP. */
6157 if (infer_value_range (stmt, op, &comp_code, &value))
6159 /* If we are able to infer a nonzero value range for OP,
6160 then walk backwards through the use-def chain to see if OP
6161 was set via a typecast.
6163 If so, then we can also infer a nonzero value range
6164 for the operand of the NOP_EXPR. */
6165 if (comp_code == NE_EXPR && integer_zerop (value))
6167 tree t = op;
6168 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6170 while (is_gimple_assign (def_stmt)
6171 && CONVERT_EXPR_CODE_P
6172 (gimple_assign_rhs_code (def_stmt))
6173 && TREE_CODE
6174 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6175 && POINTER_TYPE_P
6176 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6178 t = gimple_assign_rhs1 (def_stmt);
6179 def_stmt = SSA_NAME_DEF_STMT (t);
6181 /* Note we want to register the assert for the
6182 operand of the NOP_EXPR after SI, not after the
6183 conversion. */
6184 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
6185 register_new_assert_for (t, t, comp_code, value,
6186 bb, NULL, si);
6190 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6194 /* Update live. */
6195 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6196 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6197 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6198 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6201 /* Traverse all PHI nodes in BB, updating live. */
6202 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6203 gsi_next (&si))
6205 use_operand_p arg_p;
6206 ssa_op_iter i;
6207 gphi *phi = si.phi ();
6208 tree res = gimple_phi_result (phi);
6210 if (virtual_operand_p (res))
6211 continue;
6213 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6215 tree arg = USE_FROM_PTR (arg_p);
6216 if (TREE_CODE (arg) == SSA_NAME)
6217 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6220 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6224 /* Do an RPO walk over the function computing SSA name liveness
6225 on-the-fly and deciding on assert expressions to insert. */
6227 static void
6228 find_assert_locations (void)
6230 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6231 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6232 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6233 int rpo_cnt, i;
6235 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6236 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6237 for (i = 0; i < rpo_cnt; ++i)
6238 bb_rpo[rpo[i]] = i;
6240 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6241 the order we compute liveness and insert asserts we otherwise
6242 fail to insert asserts into the loop latch. */
6243 loop_p loop;
6244 FOR_EACH_LOOP (loop, 0)
6246 i = loop->latch->index;
6247 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6248 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6249 !gsi_end_p (gsi); gsi_next (&gsi))
6251 gphi *phi = gsi.phi ();
6252 if (virtual_operand_p (gimple_phi_result (phi)))
6253 continue;
6254 tree arg = gimple_phi_arg_def (phi, j);
6255 if (TREE_CODE (arg) == SSA_NAME)
6257 if (live[i] == NULL)
6259 live[i] = sbitmap_alloc (num_ssa_names);
6260 bitmap_clear (live[i]);
6262 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6267 for (i = rpo_cnt - 1; i >= 0; --i)
6269 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6270 edge e;
6271 edge_iterator ei;
6273 if (!live[rpo[i]])
6275 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6276 bitmap_clear (live[rpo[i]]);
6279 /* Process BB and update the live information with uses in
6280 this block. */
6281 find_assert_locations_1 (bb, live[rpo[i]]);
6283 /* Merge liveness into the predecessor blocks and free it. */
6284 if (!bitmap_empty_p (live[rpo[i]]))
6286 int pred_rpo = i;
6287 FOR_EACH_EDGE (e, ei, bb->preds)
6289 int pred = e->src->index;
6290 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6291 continue;
6293 if (!live[pred])
6295 live[pred] = sbitmap_alloc (num_ssa_names);
6296 bitmap_clear (live[pred]);
6298 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6300 if (bb_rpo[pred] < pred_rpo)
6301 pred_rpo = bb_rpo[pred];
6304 /* Record the RPO number of the last visited block that needs
6305 live information from this block. */
6306 last_rpo[rpo[i]] = pred_rpo;
6308 else
6310 sbitmap_free (live[rpo[i]]);
6311 live[rpo[i]] = NULL;
6314 /* We can free all successors live bitmaps if all their
6315 predecessors have been visited already. */
6316 FOR_EACH_EDGE (e, ei, bb->succs)
6317 if (last_rpo[e->dest->index] == i
6318 && live[e->dest->index])
6320 sbitmap_free (live[e->dest->index]);
6321 live[e->dest->index] = NULL;
6325 XDELETEVEC (rpo);
6326 XDELETEVEC (bb_rpo);
6327 XDELETEVEC (last_rpo);
6328 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6329 if (live[i])
6330 sbitmap_free (live[i]);
6331 XDELETEVEC (live);
6334 /* Create an ASSERT_EXPR for NAME and insert it in the location
6335 indicated by LOC. Return true if we made any edge insertions. */
6337 static bool
6338 process_assert_insertions_for (tree name, assert_locus *loc)
6340 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6341 gimple *stmt;
6342 tree cond;
6343 gimple *assert_stmt;
6344 edge_iterator ei;
6345 edge e;
6347 /* If we have X <=> X do not insert an assert expr for that. */
6348 if (loc->expr == loc->val)
6349 return false;
6351 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6352 assert_stmt = build_assert_expr_for (cond, name);
6353 if (loc->e)
6355 /* We have been asked to insert the assertion on an edge. This
6356 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6357 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6358 || (gimple_code (gsi_stmt (loc->si))
6359 == GIMPLE_SWITCH));
6361 gsi_insert_on_edge (loc->e, assert_stmt);
6362 return true;
6365 /* If the stmt iterator points at the end then this is an insertion
6366 at the beginning of a block. */
6367 if (gsi_end_p (loc->si))
6369 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
6370 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
6371 return false;
6374 /* Otherwise, we can insert right after LOC->SI iff the
6375 statement must not be the last statement in the block. */
6376 stmt = gsi_stmt (loc->si);
6377 if (!stmt_ends_bb_p (stmt))
6379 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6380 return false;
6383 /* If STMT must be the last statement in BB, we can only insert new
6384 assertions on the non-abnormal edge out of BB. Note that since
6385 STMT is not control flow, there may only be one non-abnormal/eh edge
6386 out of BB. */
6387 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6388 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
6390 gsi_insert_on_edge (e, assert_stmt);
6391 return true;
6394 gcc_unreachable ();
6397 /* Qsort helper for sorting assert locations. If stable is true, don't
6398 use iterative_hash_expr because it can be unstable for -fcompare-debug,
6399 on the other side some pointers might be NULL. */
6401 template <bool stable>
6402 static int
6403 compare_assert_loc (const void *pa, const void *pb)
6405 assert_locus * const a = *(assert_locus * const *)pa;
6406 assert_locus * const b = *(assert_locus * const *)pb;
6408 /* If stable, some asserts might be optimized away already, sort
6409 them last. */
6410 if (stable)
6412 if (a == NULL)
6413 return b != NULL;
6414 else if (b == NULL)
6415 return -1;
6418 if (a->e == NULL && b->e != NULL)
6419 return 1;
6420 else if (a->e != NULL && b->e == NULL)
6421 return -1;
6423 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
6424 no need to test both a->e and b->e. */
6426 /* Sort after destination index. */
6427 if (a->e == NULL)
6429 else if (a->e->dest->index > b->e->dest->index)
6430 return 1;
6431 else if (a->e->dest->index < b->e->dest->index)
6432 return -1;
6434 /* Sort after comp_code. */
6435 if (a->comp_code > b->comp_code)
6436 return 1;
6437 else if (a->comp_code < b->comp_code)
6438 return -1;
6440 hashval_t ha, hb;
6442 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
6443 uses DECL_UID of the VAR_DECL, so sorting might differ between
6444 -g and -g0. When doing the removal of redundant assert exprs
6445 and commonization to successors, this does not matter, but for
6446 the final sort needs to be stable. */
6447 if (stable)
6449 ha = 0;
6450 hb = 0;
6452 else
6454 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
6455 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
6458 /* Break the tie using hashing and source/bb index. */
6459 if (ha == hb)
6460 return (a->e != NULL
6461 ? a->e->src->index - b->e->src->index
6462 : a->bb->index - b->bb->index);
6463 return ha > hb ? 1 : -1;
6466 /* Process all the insertions registered for every name N_i registered
6467 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6468 found in ASSERTS_FOR[i]. */
6470 static void
6471 process_assert_insertions (void)
6473 unsigned i;
6474 bitmap_iterator bi;
6475 bool update_edges_p = false;
6476 int num_asserts = 0;
6478 if (dump_file && (dump_flags & TDF_DETAILS))
6479 dump_all_asserts (dump_file);
6481 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6483 assert_locus *loc = asserts_for[i];
6484 gcc_assert (loc);
6486 auto_vec<assert_locus *, 16> asserts;
6487 for (; loc; loc = loc->next)
6488 asserts.safe_push (loc);
6489 asserts.qsort (compare_assert_loc<false>);
6491 /* Push down common asserts to successors and remove redundant ones. */
6492 unsigned ecnt = 0;
6493 assert_locus *common = NULL;
6494 unsigned commonj = 0;
6495 for (unsigned j = 0; j < asserts.length (); ++j)
6497 loc = asserts[j];
6498 if (! loc->e)
6499 common = NULL;
6500 else if (! common
6501 || loc->e->dest != common->e->dest
6502 || loc->comp_code != common->comp_code
6503 || ! operand_equal_p (loc->val, common->val, 0)
6504 || ! operand_equal_p (loc->expr, common->expr, 0))
6506 commonj = j;
6507 common = loc;
6508 ecnt = 1;
6510 else if (loc->e == asserts[j-1]->e)
6512 /* Remove duplicate asserts. */
6513 if (commonj == j - 1)
6515 commonj = j;
6516 common = loc;
6518 free (asserts[j-1]);
6519 asserts[j-1] = NULL;
6521 else
6523 ecnt++;
6524 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
6526 /* We have the same assertion on all incoming edges of a BB.
6527 Insert it at the beginning of that block. */
6528 loc->bb = loc->e->dest;
6529 loc->e = NULL;
6530 loc->si = gsi_none ();
6531 common = NULL;
6532 /* Clear asserts commoned. */
6533 for (; commonj != j; ++commonj)
6534 if (asserts[commonj])
6536 free (asserts[commonj]);
6537 asserts[commonj] = NULL;
6543 /* The asserts vector sorting above might be unstable for
6544 -fcompare-debug, sort again to ensure a stable sort. */
6545 asserts.qsort (compare_assert_loc<true>);
6546 for (unsigned j = 0; j < asserts.length (); ++j)
6548 loc = asserts[j];
6549 if (! loc)
6550 break;
6551 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6552 num_asserts++;
6553 free (loc);
6557 if (update_edges_p)
6558 gsi_commit_edge_inserts ();
6560 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6561 num_asserts);
6565 /* Traverse the flowgraph looking for conditional jumps to insert range
6566 expressions. These range expressions are meant to provide information
6567 to optimizations that need to reason in terms of value ranges. They
6568 will not be expanded into RTL. For instance, given:
6570 x = ...
6571 y = ...
6572 if (x < y)
6573 y = x - 2;
6574 else
6575 x = y + 3;
6577 this pass will transform the code into:
6579 x = ...
6580 y = ...
6581 if (x < y)
6583 x = ASSERT_EXPR <x, x < y>
6584 y = x - 2
6586 else
6588 y = ASSERT_EXPR <y, x >= y>
6589 x = y + 3
6592 The idea is that once copy and constant propagation have run, other
6593 optimizations will be able to determine what ranges of values can 'x'
6594 take in different paths of the code, simply by checking the reaching
6595 definition of 'x'. */
6597 static void
6598 insert_range_assertions (void)
6600 need_assert_for = BITMAP_ALLOC (NULL);
6601 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6603 calculate_dominance_info (CDI_DOMINATORS);
6605 find_assert_locations ();
6606 if (!bitmap_empty_p (need_assert_for))
6608 process_assert_insertions ();
6609 update_ssa (TODO_update_ssa_no_phi);
6612 if (dump_file && (dump_flags & TDF_DETAILS))
6614 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6615 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6618 free (asserts_for);
6619 BITMAP_FREE (need_assert_for);
6622 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6623 and "struct" hacks. If VRP can determine that the
6624 array subscript is a constant, check if it is outside valid
6625 range. If the array subscript is a RANGE, warn if it is
6626 non-overlapping with valid range.
6627 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6629 static void
6630 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6632 value_range *vr = NULL;
6633 tree low_sub, up_sub;
6634 tree low_bound, up_bound, up_bound_p1;
6636 if (TREE_NO_WARNING (ref))
6637 return;
6639 low_sub = up_sub = TREE_OPERAND (ref, 1);
6640 up_bound = array_ref_up_bound (ref);
6642 /* Can not check flexible arrays. */
6643 if (!up_bound
6644 || TREE_CODE (up_bound) != INTEGER_CST)
6645 return;
6647 /* Accesses to trailing arrays via pointers may access storage
6648 beyond the types array bounds. */
6649 if (warn_array_bounds < 2
6650 && array_at_struct_end_p (ref))
6651 return;
6653 low_bound = array_ref_low_bound (ref);
6654 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6655 build_int_cst (TREE_TYPE (up_bound), 1));
6657 /* Empty array. */
6658 if (tree_int_cst_equal (low_bound, up_bound_p1))
6660 warning_at (location, OPT_Warray_bounds,
6661 "array subscript is above array bounds");
6662 TREE_NO_WARNING (ref) = 1;
6665 if (TREE_CODE (low_sub) == SSA_NAME)
6667 vr = get_value_range (low_sub);
6668 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6670 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6671 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6675 if (vr && vr->type == VR_ANTI_RANGE)
6677 if (TREE_CODE (up_sub) == INTEGER_CST
6678 && (ignore_off_by_one
6679 ? tree_int_cst_lt (up_bound, up_sub)
6680 : tree_int_cst_le (up_bound, up_sub))
6681 && TREE_CODE (low_sub) == INTEGER_CST
6682 && tree_int_cst_le (low_sub, low_bound))
6684 warning_at (location, OPT_Warray_bounds,
6685 "array subscript is outside array bounds");
6686 TREE_NO_WARNING (ref) = 1;
6689 else if (TREE_CODE (up_sub) == INTEGER_CST
6690 && (ignore_off_by_one
6691 ? !tree_int_cst_le (up_sub, up_bound_p1)
6692 : !tree_int_cst_le (up_sub, up_bound)))
6694 if (dump_file && (dump_flags & TDF_DETAILS))
6696 fprintf (dump_file, "Array bound warning for ");
6697 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6698 fprintf (dump_file, "\n");
6700 warning_at (location, OPT_Warray_bounds,
6701 "array subscript is above array bounds");
6702 TREE_NO_WARNING (ref) = 1;
6704 else if (TREE_CODE (low_sub) == INTEGER_CST
6705 && tree_int_cst_lt (low_sub, low_bound))
6707 if (dump_file && (dump_flags & TDF_DETAILS))
6709 fprintf (dump_file, "Array bound warning for ");
6710 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6711 fprintf (dump_file, "\n");
6713 warning_at (location, OPT_Warray_bounds,
6714 "array subscript is below array bounds");
6715 TREE_NO_WARNING (ref) = 1;
6719 /* Searches if the expr T, located at LOCATION computes
6720 address of an ARRAY_REF, and call check_array_ref on it. */
6722 static void
6723 search_for_addr_array (tree t, location_t location)
6725 /* Check each ARRAY_REFs in the reference chain. */
6728 if (TREE_CODE (t) == ARRAY_REF)
6729 check_array_ref (location, t, true /*ignore_off_by_one*/);
6731 t = TREE_OPERAND (t, 0);
6733 while (handled_component_p (t));
6735 if (TREE_CODE (t) == MEM_REF
6736 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6737 && !TREE_NO_WARNING (t))
6739 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6740 tree low_bound, up_bound, el_sz;
6741 offset_int idx;
6742 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6743 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6744 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6745 return;
6747 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6748 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6749 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6750 if (!low_bound
6751 || TREE_CODE (low_bound) != INTEGER_CST
6752 || !up_bound
6753 || TREE_CODE (up_bound) != INTEGER_CST
6754 || !el_sz
6755 || TREE_CODE (el_sz) != INTEGER_CST)
6756 return;
6758 idx = mem_ref_offset (t);
6759 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6760 if (idx < 0)
6762 if (dump_file && (dump_flags & TDF_DETAILS))
6764 fprintf (dump_file, "Array bound warning for ");
6765 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6766 fprintf (dump_file, "\n");
6768 warning_at (location, OPT_Warray_bounds,
6769 "array subscript is below array bounds");
6770 TREE_NO_WARNING (t) = 1;
6772 else if (idx > (wi::to_offset (up_bound)
6773 - wi::to_offset (low_bound) + 1))
6775 if (dump_file && (dump_flags & TDF_DETAILS))
6777 fprintf (dump_file, "Array bound warning for ");
6778 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6779 fprintf (dump_file, "\n");
6781 warning_at (location, OPT_Warray_bounds,
6782 "array subscript is above array bounds");
6783 TREE_NO_WARNING (t) = 1;
6788 /* walk_tree() callback that checks if *TP is
6789 an ARRAY_REF inside an ADDR_EXPR (in which an array
6790 subscript one outside the valid range is allowed). Call
6791 check_array_ref for each ARRAY_REF found. The location is
6792 passed in DATA. */
6794 static tree
6795 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6797 tree t = *tp;
6798 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6799 location_t location;
6801 if (EXPR_HAS_LOCATION (t))
6802 location = EXPR_LOCATION (t);
6803 else
6805 location_t *locp = (location_t *) wi->info;
6806 location = *locp;
6809 *walk_subtree = TRUE;
6811 if (TREE_CODE (t) == ARRAY_REF)
6812 check_array_ref (location, t, false /*ignore_off_by_one*/);
6814 else if (TREE_CODE (t) == ADDR_EXPR)
6816 search_for_addr_array (t, location);
6817 *walk_subtree = FALSE;
6820 return NULL_TREE;
6823 /* Walk over all statements of all reachable BBs and call check_array_bounds
6824 on them. */
6826 static void
6827 check_all_array_refs (void)
6829 basic_block bb;
6830 gimple_stmt_iterator si;
6832 FOR_EACH_BB_FN (bb, cfun)
6834 edge_iterator ei;
6835 edge e;
6836 bool executable = false;
6838 /* Skip blocks that were found to be unreachable. */
6839 FOR_EACH_EDGE (e, ei, bb->preds)
6840 executable |= !!(e->flags & EDGE_EXECUTABLE);
6841 if (!executable)
6842 continue;
6844 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6846 gimple *stmt = gsi_stmt (si);
6847 struct walk_stmt_info wi;
6848 if (!gimple_has_location (stmt)
6849 || is_gimple_debug (stmt))
6850 continue;
6852 memset (&wi, 0, sizeof (wi));
6854 location_t loc = gimple_location (stmt);
6855 wi.info = &loc;
6857 walk_gimple_op (gsi_stmt (si),
6858 check_array_bounds,
6859 &wi);
6864 /* Return true if all imm uses of VAR are either in STMT, or
6865 feed (optionally through a chain of single imm uses) GIMPLE_COND
6866 in basic block COND_BB. */
6868 static bool
6869 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
6871 use_operand_p use_p, use2_p;
6872 imm_use_iterator iter;
6874 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6875 if (USE_STMT (use_p) != stmt)
6877 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
6878 if (is_gimple_debug (use_stmt))
6879 continue;
6880 while (is_gimple_assign (use_stmt)
6881 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6882 && single_imm_use (gimple_assign_lhs (use_stmt),
6883 &use2_p, &use_stmt2))
6884 use_stmt = use_stmt2;
6885 if (gimple_code (use_stmt) != GIMPLE_COND
6886 || gimple_bb (use_stmt) != cond_bb)
6887 return false;
6889 return true;
6892 /* Handle
6893 _4 = x_3 & 31;
6894 if (_4 != 0)
6895 goto <bb 6>;
6896 else
6897 goto <bb 7>;
6898 <bb 6>:
6899 __builtin_unreachable ();
6900 <bb 7>:
6901 x_5 = ASSERT_EXPR <x_3, ...>;
6902 If x_3 has no other immediate uses (checked by caller),
6903 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6904 from the non-zero bitmask. */
6906 static void
6907 maybe_set_nonzero_bits (basic_block bb, tree var)
6909 edge e = single_pred_edge (bb);
6910 basic_block cond_bb = e->src;
6911 gimple *stmt = last_stmt (cond_bb);
6912 tree cst;
6914 if (stmt == NULL
6915 || gimple_code (stmt) != GIMPLE_COND
6916 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6917 ? EQ_EXPR : NE_EXPR)
6918 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6919 || !integer_zerop (gimple_cond_rhs (stmt)))
6920 return;
6922 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6923 if (!is_gimple_assign (stmt)
6924 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6925 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6926 return;
6927 if (gimple_assign_rhs1 (stmt) != var)
6929 gimple *stmt2;
6931 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6932 return;
6933 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6934 if (!gimple_assign_cast_p (stmt2)
6935 || gimple_assign_rhs1 (stmt2) != var
6936 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6937 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6938 != TYPE_PRECISION (TREE_TYPE (var))))
6939 return;
6941 cst = gimple_assign_rhs2 (stmt);
6942 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
6945 /* Convert range assertion expressions into the implied copies and
6946 copy propagate away the copies. Doing the trivial copy propagation
6947 here avoids the need to run the full copy propagation pass after
6948 VRP.
6950 FIXME, this will eventually lead to copy propagation removing the
6951 names that had useful range information attached to them. For
6952 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6953 then N_i will have the range [3, +INF].
6955 However, by converting the assertion into the implied copy
6956 operation N_i = N_j, we will then copy-propagate N_j into the uses
6957 of N_i and lose the range information. We may want to hold on to
6958 ASSERT_EXPRs a little while longer as the ranges could be used in
6959 things like jump threading.
6961 The problem with keeping ASSERT_EXPRs around is that passes after
6962 VRP need to handle them appropriately.
6964 Another approach would be to make the range information a first
6965 class property of the SSA_NAME so that it can be queried from
6966 any pass. This is made somewhat more complex by the need for
6967 multiple ranges to be associated with one SSA_NAME. */
6969 static void
6970 remove_range_assertions (void)
6972 basic_block bb;
6973 gimple_stmt_iterator si;
6974 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6975 a basic block preceeded by GIMPLE_COND branching to it and
6976 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6977 int is_unreachable;
6979 /* Note that the BSI iterator bump happens at the bottom of the
6980 loop and no bump is necessary if we're removing the statement
6981 referenced by the current BSI. */
6982 FOR_EACH_BB_FN (bb, cfun)
6983 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6985 gimple *stmt = gsi_stmt (si);
6987 if (is_gimple_assign (stmt)
6988 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6990 tree lhs = gimple_assign_lhs (stmt);
6991 tree rhs = gimple_assign_rhs1 (stmt);
6992 tree var;
6994 var = ASSERT_EXPR_VAR (rhs);
6996 if (TREE_CODE (var) == SSA_NAME
6997 && !POINTER_TYPE_P (TREE_TYPE (lhs))
6998 && SSA_NAME_RANGE_INFO (lhs))
7000 if (is_unreachable == -1)
7002 is_unreachable = 0;
7003 if (single_pred_p (bb)
7004 && assert_unreachable_fallthru_edge_p
7005 (single_pred_edge (bb)))
7006 is_unreachable = 1;
7008 /* Handle
7009 if (x_7 >= 10 && x_7 < 20)
7010 __builtin_unreachable ();
7011 x_8 = ASSERT_EXPR <x_7, ...>;
7012 if the only uses of x_7 are in the ASSERT_EXPR and
7013 in the condition. In that case, we can copy the
7014 range info from x_8 computed in this pass also
7015 for x_7. */
7016 if (is_unreachable
7017 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
7018 single_pred (bb)))
7020 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
7021 SSA_NAME_RANGE_INFO (lhs)->get_min (),
7022 SSA_NAME_RANGE_INFO (lhs)->get_max ());
7023 maybe_set_nonzero_bits (bb, var);
7027 /* Propagate the RHS into every use of the LHS. For SSA names
7028 also propagate abnormals as it merely restores the original
7029 IL in this case (an replace_uses_by would assert). */
7030 if (TREE_CODE (var) == SSA_NAME)
7032 imm_use_iterator iter;
7033 use_operand_p use_p;
7034 gimple *use_stmt;
7035 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7036 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7037 SET_USE (use_p, var);
7039 else
7040 replace_uses_by (lhs, var);
7042 /* And finally, remove the copy, it is not needed. */
7043 gsi_remove (&si, true);
7044 release_defs (stmt);
7046 else
7048 if (!is_gimple_debug (gsi_stmt (si)))
7049 is_unreachable = 0;
7050 gsi_next (&si);
7056 /* Return true if STMT is interesting for VRP. */
7058 static bool
7059 stmt_interesting_for_vrp (gimple *stmt)
7061 if (gimple_code (stmt) == GIMPLE_PHI)
7063 tree res = gimple_phi_result (stmt);
7064 return (!virtual_operand_p (res)
7065 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
7066 || POINTER_TYPE_P (TREE_TYPE (res))));
7068 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7070 tree lhs = gimple_get_lhs (stmt);
7072 /* In general, assignments with virtual operands are not useful
7073 for deriving ranges, with the obvious exception of calls to
7074 builtin functions. */
7075 if (lhs && TREE_CODE (lhs) == SSA_NAME
7076 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7077 || POINTER_TYPE_P (TREE_TYPE (lhs)))
7078 && (is_gimple_call (stmt)
7079 || !gimple_vuse (stmt)))
7080 return true;
7081 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7082 switch (gimple_call_internal_fn (stmt))
7084 case IFN_ADD_OVERFLOW:
7085 case IFN_SUB_OVERFLOW:
7086 case IFN_MUL_OVERFLOW:
7087 case IFN_ATOMIC_COMPARE_EXCHANGE:
7088 /* These internal calls return _Complex integer type,
7089 but are interesting to VRP nevertheless. */
7090 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7091 return true;
7092 break;
7093 default:
7094 break;
7097 else if (gimple_code (stmt) == GIMPLE_COND
7098 || gimple_code (stmt) == GIMPLE_SWITCH)
7099 return true;
7101 return false;
7104 /* Initialize VRP lattice. */
7106 static void
7107 vrp_initialize_lattice ()
7109 values_propagated = false;
7110 num_vr_values = num_ssa_names;
7111 vr_value = XCNEWVEC (value_range *, num_vr_values);
7112 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
7113 bitmap_obstack_initialize (&vrp_equiv_obstack);
7116 /* Initialization required by ssa_propagate engine. */
7118 static void
7119 vrp_initialize ()
7121 basic_block bb;
7123 FOR_EACH_BB_FN (bb, cfun)
7125 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7126 gsi_next (&si))
7128 gphi *phi = si.phi ();
7129 if (!stmt_interesting_for_vrp (phi))
7131 tree lhs = PHI_RESULT (phi);
7132 set_value_range_to_varying (get_value_range (lhs));
7133 prop_set_simulate_again (phi, false);
7135 else
7136 prop_set_simulate_again (phi, true);
7139 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
7140 gsi_next (&si))
7142 gimple *stmt = gsi_stmt (si);
7144 /* If the statement is a control insn, then we do not
7145 want to avoid simulating the statement once. Failure
7146 to do so means that those edges will never get added. */
7147 if (stmt_ends_bb_p (stmt))
7148 prop_set_simulate_again (stmt, true);
7149 else if (!stmt_interesting_for_vrp (stmt))
7151 set_defs_to_varying (stmt);
7152 prop_set_simulate_again (stmt, false);
7154 else
7155 prop_set_simulate_again (stmt, true);
7160 /* Return the singleton value-range for NAME or NAME. */
7162 static inline tree
7163 vrp_valueize (tree name)
7165 if (TREE_CODE (name) == SSA_NAME)
7167 value_range *vr = get_value_range (name);
7168 if (vr->type == VR_RANGE
7169 && (TREE_CODE (vr->min) == SSA_NAME
7170 || is_gimple_min_invariant (vr->min))
7171 && vrp_operand_equal_p (vr->min, vr->max))
7172 return vr->min;
7174 return name;
7177 /* Return the singleton value-range for NAME if that is a constant
7178 but signal to not follow SSA edges. */
7180 static inline tree
7181 vrp_valueize_1 (tree name)
7183 if (TREE_CODE (name) == SSA_NAME)
7185 /* If the definition may be simulated again we cannot follow
7186 this SSA edge as the SSA propagator does not necessarily
7187 re-visit the use. */
7188 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7189 if (!gimple_nop_p (def_stmt)
7190 && prop_simulate_again_p (def_stmt))
7191 return NULL_TREE;
7192 value_range *vr = get_value_range (name);
7193 if (range_int_cst_singleton_p (vr))
7194 return vr->min;
7196 return name;
7199 /* Visit assignment STMT. If it produces an interesting range, record
7200 the range in VR and set LHS to OUTPUT_P. */
7202 static void
7203 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
7205 tree lhs;
7206 enum gimple_code code = gimple_code (stmt);
7207 lhs = gimple_get_lhs (stmt);
7208 *output_p = NULL_TREE;
7210 /* We only keep track of ranges in integral and pointer types. */
7211 if (TREE_CODE (lhs) == SSA_NAME
7212 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7213 /* It is valid to have NULL MIN/MAX values on a type. See
7214 build_range_type. */
7215 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7216 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7217 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7219 *output_p = lhs;
7221 /* Try folding the statement to a constant first. */
7222 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7223 vrp_valueize_1);
7224 if (tem)
7226 if (TREE_CODE (tem) == SSA_NAME
7227 && (SSA_NAME_IS_DEFAULT_DEF (tem)
7228 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
7230 extract_range_from_ssa_name (vr, tem);
7231 return;
7233 else if (is_gimple_min_invariant (tem))
7235 set_value_range_to_value (vr, tem, NULL);
7236 return;
7239 /* Then dispatch to value-range extracting functions. */
7240 if (code == GIMPLE_CALL)
7241 extract_range_basic (vr, stmt);
7242 else
7243 extract_range_from_assignment (vr, as_a <gassign *> (stmt));
7247 /* Helper that gets the value range of the SSA_NAME with version I
7248 or a symbolic range containing the SSA_NAME only if the value range
7249 is varying or undefined. */
7251 static inline value_range
7252 get_vr_for_comparison (int i)
7254 value_range vr = *get_value_range (ssa_name (i));
7256 /* If name N_i does not have a valid range, use N_i as its own
7257 range. This allows us to compare against names that may
7258 have N_i in their ranges. */
7259 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7261 vr.type = VR_RANGE;
7262 vr.min = ssa_name (i);
7263 vr.max = ssa_name (i);
7266 return vr;
7269 /* Compare all the value ranges for names equivalent to VAR with VAL
7270 using comparison code COMP. Return the same value returned by
7271 compare_range_with_value, including the setting of
7272 *STRICT_OVERFLOW_P. */
7274 static tree
7275 compare_name_with_value (enum tree_code comp, tree var, tree val,
7276 bool *strict_overflow_p, bool use_equiv_p)
7278 bitmap_iterator bi;
7279 unsigned i;
7280 bitmap e;
7281 tree retval, t;
7282 int used_strict_overflow;
7283 bool sop;
7284 value_range equiv_vr;
7286 /* Get the set of equivalences for VAR. */
7287 e = get_value_range (var)->equiv;
7289 /* Start at -1. Set it to 0 if we do a comparison without relying
7290 on overflow, or 1 if all comparisons rely on overflow. */
7291 used_strict_overflow = -1;
7293 /* Compare vars' value range with val. */
7294 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7295 sop = false;
7296 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7297 if (retval)
7298 used_strict_overflow = sop ? 1 : 0;
7300 /* If the equiv set is empty we have done all work we need to do. */
7301 if (e == NULL)
7303 if (retval
7304 && used_strict_overflow > 0)
7305 *strict_overflow_p = true;
7306 return retval;
7309 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7311 tree name = ssa_name (i);
7312 if (! name)
7313 continue;
7315 if (! use_equiv_p
7316 && ! SSA_NAME_IS_DEFAULT_DEF (name)
7317 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
7318 continue;
7320 equiv_vr = get_vr_for_comparison (i);
7321 sop = false;
7322 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7323 if (t)
7325 /* If we get different answers from different members
7326 of the equivalence set this check must be in a dead
7327 code region. Folding it to a trap representation
7328 would be correct here. For now just return don't-know. */
7329 if (retval != NULL
7330 && t != retval)
7332 retval = NULL_TREE;
7333 break;
7335 retval = t;
7337 if (!sop)
7338 used_strict_overflow = 0;
7339 else if (used_strict_overflow < 0)
7340 used_strict_overflow = 1;
7344 if (retval
7345 && used_strict_overflow > 0)
7346 *strict_overflow_p = true;
7348 return retval;
7352 /* Given a comparison code COMP and names N1 and N2, compare all the
7353 ranges equivalent to N1 against all the ranges equivalent to N2
7354 to determine the value of N1 COMP N2. Return the same value
7355 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7356 whether we relied on undefined signed overflow in the comparison. */
7359 static tree
7360 compare_names (enum tree_code comp, tree n1, tree n2,
7361 bool *strict_overflow_p)
7363 tree t, retval;
7364 bitmap e1, e2;
7365 bitmap_iterator bi1, bi2;
7366 unsigned i1, i2;
7367 int used_strict_overflow;
7368 static bitmap_obstack *s_obstack = NULL;
7369 static bitmap s_e1 = NULL, s_e2 = NULL;
7371 /* Compare the ranges of every name equivalent to N1 against the
7372 ranges of every name equivalent to N2. */
7373 e1 = get_value_range (n1)->equiv;
7374 e2 = get_value_range (n2)->equiv;
7376 /* Use the fake bitmaps if e1 or e2 are not available. */
7377 if (s_obstack == NULL)
7379 s_obstack = XNEW (bitmap_obstack);
7380 bitmap_obstack_initialize (s_obstack);
7381 s_e1 = BITMAP_ALLOC (s_obstack);
7382 s_e2 = BITMAP_ALLOC (s_obstack);
7384 if (e1 == NULL)
7385 e1 = s_e1;
7386 if (e2 == NULL)
7387 e2 = s_e2;
7389 /* Add N1 and N2 to their own set of equivalences to avoid
7390 duplicating the body of the loop just to check N1 and N2
7391 ranges. */
7392 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7393 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7395 /* If the equivalence sets have a common intersection, then the two
7396 names can be compared without checking their ranges. */
7397 if (bitmap_intersect_p (e1, e2))
7399 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7400 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7402 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7403 ? boolean_true_node
7404 : boolean_false_node;
7407 /* Start at -1. Set it to 0 if we do a comparison without relying
7408 on overflow, or 1 if all comparisons rely on overflow. */
7409 used_strict_overflow = -1;
7411 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7412 N2 to their own set of equivalences to avoid duplicating the body
7413 of the loop just to check N1 and N2 ranges. */
7414 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7416 if (! ssa_name (i1))
7417 continue;
7419 value_range vr1 = get_vr_for_comparison (i1);
7421 t = retval = NULL_TREE;
7422 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7424 if (! ssa_name (i2))
7425 continue;
7427 bool sop = false;
7429 value_range vr2 = get_vr_for_comparison (i2);
7431 t = compare_ranges (comp, &vr1, &vr2, &sop);
7432 if (t)
7434 /* If we get different answers from different members
7435 of the equivalence set this check must be in a dead
7436 code region. Folding it to a trap representation
7437 would be correct here. For now just return don't-know. */
7438 if (retval != NULL
7439 && t != retval)
7441 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7442 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7443 return NULL_TREE;
7445 retval = t;
7447 if (!sop)
7448 used_strict_overflow = 0;
7449 else if (used_strict_overflow < 0)
7450 used_strict_overflow = 1;
7454 if (retval)
7456 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7457 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7458 if (used_strict_overflow > 0)
7459 *strict_overflow_p = true;
7460 return retval;
7464 /* None of the equivalent ranges are useful in computing this
7465 comparison. */
7466 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7467 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7468 return NULL_TREE;
7471 /* Helper function for vrp_evaluate_conditional_warnv & other
7472 optimizers. */
7474 static tree
7475 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7476 tree op0, tree op1,
7477 bool * strict_overflow_p)
7479 value_range *vr0, *vr1;
7481 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7482 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7484 tree res = NULL_TREE;
7485 if (vr0 && vr1)
7486 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7487 if (!res && vr0)
7488 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7489 if (!res && vr1)
7490 res = (compare_range_with_value
7491 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7492 return res;
7495 /* Helper function for vrp_evaluate_conditional_warnv. */
7497 static tree
7498 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7499 tree op1, bool use_equiv_p,
7500 bool *strict_overflow_p, bool *only_ranges)
7502 tree ret;
7503 if (only_ranges)
7504 *only_ranges = true;
7506 /* We only deal with integral and pointer types. */
7507 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7508 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7509 return NULL_TREE;
7511 /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
7512 as a simple equality test, then prefer that over its current form
7513 for evaluation.
7515 An overflow test which collapses to an equality test can always be
7516 expressed as a comparison of one argument against zero. Overflow
7517 occurs when the chosen argument is zero and does not occur if the
7518 chosen argument is not zero. */
7519 tree x;
7520 if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
7522 wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
7523 /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
7524 B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
7525 B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
7526 B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
7527 if (integer_zerop (x))
7529 op1 = x;
7530 code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
7532 /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
7533 B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
7534 B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
7535 B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
7536 else if (wi::eq_p (x, max - 1))
7538 op0 = op1;
7539 op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
7540 code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
7544 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7545 (code, op0, op1, strict_overflow_p)))
7546 return ret;
7547 if (only_ranges)
7548 *only_ranges = false;
7549 /* Do not use compare_names during propagation, it's quadratic. */
7550 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7551 && use_equiv_p)
7552 return compare_names (code, op0, op1, strict_overflow_p);
7553 else if (TREE_CODE (op0) == SSA_NAME)
7554 return compare_name_with_value (code, op0, op1,
7555 strict_overflow_p, use_equiv_p);
7556 else if (TREE_CODE (op1) == SSA_NAME)
7557 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7558 strict_overflow_p, use_equiv_p);
7559 return NULL_TREE;
7562 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7563 information. Return NULL if the conditional can not be evaluated.
7564 The ranges of all the names equivalent with the operands in COND
7565 will be used when trying to compute the value. If the result is
7566 based on undefined signed overflow, issue a warning if
7567 appropriate. */
7569 static tree
7570 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7572 bool sop;
7573 tree ret;
7574 bool only_ranges;
7576 /* Some passes and foldings leak constants with overflow flag set
7577 into the IL. Avoid doing wrong things with these and bail out. */
7578 if ((TREE_CODE (op0) == INTEGER_CST
7579 && TREE_OVERFLOW (op0))
7580 || (TREE_CODE (op1) == INTEGER_CST
7581 && TREE_OVERFLOW (op1)))
7582 return NULL_TREE;
7584 sop = false;
7585 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7586 &only_ranges);
7588 if (ret && sop)
7590 enum warn_strict_overflow_code wc;
7591 const char* warnmsg;
7593 if (is_gimple_min_invariant (ret))
7595 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7596 warnmsg = G_("assuming signed overflow does not occur when "
7597 "simplifying conditional to constant");
7599 else
7601 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7602 warnmsg = G_("assuming signed overflow does not occur when "
7603 "simplifying conditional");
7606 if (issue_strict_overflow_warning (wc))
7608 location_t location;
7610 if (!gimple_has_location (stmt))
7611 location = input_location;
7612 else
7613 location = gimple_location (stmt);
7614 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7618 if (warn_type_limits
7619 && ret && only_ranges
7620 && TREE_CODE_CLASS (code) == tcc_comparison
7621 && TREE_CODE (op0) == SSA_NAME)
7623 /* If the comparison is being folded and the operand on the LHS
7624 is being compared against a constant value that is outside of
7625 the natural range of OP0's type, then the predicate will
7626 always fold regardless of the value of OP0. If -Wtype-limits
7627 was specified, emit a warning. */
7628 tree type = TREE_TYPE (op0);
7629 value_range *vr0 = get_value_range (op0);
7631 if (vr0->type == VR_RANGE
7632 && INTEGRAL_TYPE_P (type)
7633 && vrp_val_is_min (vr0->min)
7634 && vrp_val_is_max (vr0->max)
7635 && is_gimple_min_invariant (op1))
7637 location_t location;
7639 if (!gimple_has_location (stmt))
7640 location = input_location;
7641 else
7642 location = gimple_location (stmt);
7644 warning_at (location, OPT_Wtype_limits,
7645 integer_zerop (ret)
7646 ? G_("comparison always false "
7647 "due to limited range of data type")
7648 : G_("comparison always true "
7649 "due to limited range of data type"));
7653 return ret;
7657 /* Visit conditional statement STMT. If we can determine which edge
7658 will be taken out of STMT's basic block, record it in
7659 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
7661 static void
7662 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7664 tree val;
7666 *taken_edge_p = NULL;
7668 if (dump_file && (dump_flags & TDF_DETAILS))
7670 tree use;
7671 ssa_op_iter i;
7673 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7674 print_gimple_stmt (dump_file, stmt, 0);
7675 fprintf (dump_file, "\nWith known ranges\n");
7677 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7679 fprintf (dump_file, "\t");
7680 print_generic_expr (dump_file, use);
7681 fprintf (dump_file, ": ");
7682 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7685 fprintf (dump_file, "\n");
7688 /* Compute the value of the predicate COND by checking the known
7689 ranges of each of its operands.
7691 Note that we cannot evaluate all the equivalent ranges here
7692 because those ranges may not yet be final and with the current
7693 propagation strategy, we cannot determine when the value ranges
7694 of the names in the equivalence set have changed.
7696 For instance, given the following code fragment
7698 i_5 = PHI <8, i_13>
7700 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7701 if (i_14 == 1)
7704 Assume that on the first visit to i_14, i_5 has the temporary
7705 range [8, 8] because the second argument to the PHI function is
7706 not yet executable. We derive the range ~[0, 0] for i_14 and the
7707 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7708 the first time, since i_14 is equivalent to the range [8, 8], we
7709 determine that the predicate is always false.
7711 On the next round of propagation, i_13 is determined to be
7712 VARYING, which causes i_5 to drop down to VARYING. So, another
7713 visit to i_14 is scheduled. In this second visit, we compute the
7714 exact same range and equivalence set for i_14, namely ~[0, 0] and
7715 { i_5 }. But we did not have the previous range for i_5
7716 registered, so vrp_visit_assignment thinks that the range for
7717 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7718 is not visited again, which stops propagation from visiting
7719 statements in the THEN clause of that if().
7721 To properly fix this we would need to keep the previous range
7722 value for the names in the equivalence set. This way we would've
7723 discovered that from one visit to the other i_5 changed from
7724 range [8, 8] to VR_VARYING.
7726 However, fixing this apparent limitation may not be worth the
7727 additional checking. Testing on several code bases (GCC, DLV,
7728 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7729 4 more predicates folded in SPEC. */
7731 bool sop;
7732 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7733 gimple_cond_lhs (stmt),
7734 gimple_cond_rhs (stmt),
7735 false, &sop, NULL);
7736 if (val)
7737 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7739 if (dump_file && (dump_flags & TDF_DETAILS))
7741 fprintf (dump_file, "\nPredicate evaluates to: ");
7742 if (val == NULL_TREE)
7743 fprintf (dump_file, "DON'T KNOW\n");
7744 else
7745 print_generic_stmt (dump_file, val);
7749 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7750 that includes the value VAL. The search is restricted to the range
7751 [START_IDX, n - 1] where n is the size of VEC.
7753 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7754 returned.
7756 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7757 it is placed in IDX and false is returned.
7759 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7760 returned. */
7762 static bool
7763 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7765 size_t n = gimple_switch_num_labels (stmt);
7766 size_t low, high;
7768 /* Find case label for minimum of the value range or the next one.
7769 At each iteration we are searching in [low, high - 1]. */
7771 for (low = start_idx, high = n; high != low; )
7773 tree t;
7774 int cmp;
7775 /* Note that i != high, so we never ask for n. */
7776 size_t i = (high + low) / 2;
7777 t = gimple_switch_label (stmt, i);
7779 /* Cache the result of comparing CASE_LOW and val. */
7780 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7782 if (cmp == 0)
7784 /* Ranges cannot be empty. */
7785 *idx = i;
7786 return true;
7788 else if (cmp > 0)
7789 high = i;
7790 else
7792 low = i + 1;
7793 if (CASE_HIGH (t) != NULL
7794 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7796 *idx = i;
7797 return true;
7802 *idx = high;
7803 return false;
7806 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7807 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7808 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7809 then MAX_IDX < MIN_IDX.
7810 Returns true if the default label is not needed. */
7812 static bool
7813 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
7814 size_t *max_idx)
7816 size_t i, j;
7817 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7818 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7820 if (i == j
7821 && min_take_default
7822 && max_take_default)
7824 /* Only the default case label reached.
7825 Return an empty range. */
7826 *min_idx = 1;
7827 *max_idx = 0;
7828 return false;
7830 else
7832 bool take_default = min_take_default || max_take_default;
7833 tree low, high;
7834 size_t k;
7836 if (max_take_default)
7837 j--;
7839 /* If the case label range is continuous, we do not need
7840 the default case label. Verify that. */
7841 high = CASE_LOW (gimple_switch_label (stmt, i));
7842 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7843 high = CASE_HIGH (gimple_switch_label (stmt, i));
7844 for (k = i + 1; k <= j; ++k)
7846 low = CASE_LOW (gimple_switch_label (stmt, k));
7847 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7849 take_default = true;
7850 break;
7852 high = low;
7853 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7854 high = CASE_HIGH (gimple_switch_label (stmt, k));
7857 *min_idx = i;
7858 *max_idx = j;
7859 return !take_default;
7863 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7864 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7865 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7866 Returns true if the default label is not needed. */
7868 static bool
7869 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
7870 size_t *max_idx1, size_t *min_idx2,
7871 size_t *max_idx2)
7873 size_t i, j, k, l;
7874 unsigned int n = gimple_switch_num_labels (stmt);
7875 bool take_default;
7876 tree case_low, case_high;
7877 tree min = vr->min, max = vr->max;
7879 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7881 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7883 /* Set second range to emtpy. */
7884 *min_idx2 = 1;
7885 *max_idx2 = 0;
7887 if (vr->type == VR_RANGE)
7889 *min_idx1 = i;
7890 *max_idx1 = j;
7891 return !take_default;
7894 /* Set first range to all case labels. */
7895 *min_idx1 = 1;
7896 *max_idx1 = n - 1;
7898 if (i > j)
7899 return false;
7901 /* Make sure all the values of case labels [i , j] are contained in
7902 range [MIN, MAX]. */
7903 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7904 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7905 if (tree_int_cst_compare (case_low, min) < 0)
7906 i += 1;
7907 if (case_high != NULL_TREE
7908 && tree_int_cst_compare (max, case_high) < 0)
7909 j -= 1;
7911 if (i > j)
7912 return false;
7914 /* If the range spans case labels [i, j], the corresponding anti-range spans
7915 the labels [1, i - 1] and [j + 1, n - 1]. */
7916 k = j + 1;
7917 l = n - 1;
7918 if (k > l)
7920 k = 1;
7921 l = 0;
7924 j = i - 1;
7925 i = 1;
7926 if (i > j)
7928 i = k;
7929 j = l;
7930 k = 1;
7931 l = 0;
7934 *min_idx1 = i;
7935 *max_idx1 = j;
7936 *min_idx2 = k;
7937 *max_idx2 = l;
7938 return false;
7941 /* Visit switch statement STMT. If we can determine which edge
7942 will be taken out of STMT's basic block, record it in
7943 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
7945 static void
7946 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
7948 tree op, val;
7949 value_range *vr;
7950 size_t i = 0, j = 0, k, l;
7951 bool take_default;
7953 *taken_edge_p = NULL;
7954 op = gimple_switch_index (stmt);
7955 if (TREE_CODE (op) != SSA_NAME)
7956 return;
7958 vr = get_value_range (op);
7959 if (dump_file && (dump_flags & TDF_DETAILS))
7961 fprintf (dump_file, "\nVisiting switch expression with operand ");
7962 print_generic_expr (dump_file, op);
7963 fprintf (dump_file, " with known range ");
7964 dump_value_range (dump_file, vr);
7965 fprintf (dump_file, "\n");
7968 if ((vr->type != VR_RANGE
7969 && vr->type != VR_ANTI_RANGE)
7970 || symbolic_range_p (vr))
7971 return;
7973 /* Find the single edge that is taken from the switch expression. */
7974 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7976 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7977 label */
7978 if (j < i)
7980 gcc_assert (take_default);
7981 val = gimple_switch_default_label (stmt);
7983 else
7985 /* Check if labels with index i to j and maybe the default label
7986 are all reaching the same label. */
7988 val = gimple_switch_label (stmt, i);
7989 if (take_default
7990 && CASE_LABEL (gimple_switch_default_label (stmt))
7991 != CASE_LABEL (val))
7993 if (dump_file && (dump_flags & TDF_DETAILS))
7994 fprintf (dump_file, " not a single destination for this "
7995 "range\n");
7996 return;
7998 for (++i; i <= j; ++i)
8000 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
8002 if (dump_file && (dump_flags & TDF_DETAILS))
8003 fprintf (dump_file, " not a single destination for this "
8004 "range\n");
8005 return;
8008 for (; k <= l; ++k)
8010 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
8012 if (dump_file && (dump_flags & TDF_DETAILS))
8013 fprintf (dump_file, " not a single destination for this "
8014 "range\n");
8015 return;
8020 *taken_edge_p = find_edge (gimple_bb (stmt),
8021 label_to_block (CASE_LABEL (val)));
8023 if (dump_file && (dump_flags & TDF_DETAILS))
8025 fprintf (dump_file, " will take edge to ");
8026 print_generic_stmt (dump_file, CASE_LABEL (val));
8031 /* Evaluate statement STMT. If the statement produces a useful range,
8032 set VR and corepsponding OUTPUT_P.
8034 If STMT is a conditional branch and we can determine its truth
8035 value, the taken edge is recorded in *TAKEN_EDGE_P. */
8037 static void
8038 extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
8039 tree *output_p, value_range *vr)
8042 if (dump_file && (dump_flags & TDF_DETAILS))
8044 fprintf (dump_file, "\nVisiting statement:\n");
8045 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
8048 if (!stmt_interesting_for_vrp (stmt))
8049 gcc_assert (stmt_ends_bb_p (stmt));
8050 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
8051 vrp_visit_assignment_or_call (stmt, output_p, vr);
8052 else if (gimple_code (stmt) == GIMPLE_COND)
8053 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
8054 else if (gimple_code (stmt) == GIMPLE_SWITCH)
8055 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
8058 /* Evaluate statement STMT. If the statement produces a useful range,
8059 return SSA_PROP_INTERESTING and record the SSA name with the
8060 interesting range into *OUTPUT_P.
8062 If STMT is a conditional branch and we can determine its truth
8063 value, the taken edge is recorded in *TAKEN_EDGE_P.
8065 If STMT produces a varying value, return SSA_PROP_VARYING. */
8067 static enum ssa_prop_result
8068 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
8070 value_range vr = VR_INITIALIZER;
8071 tree lhs = gimple_get_lhs (stmt);
8072 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
8074 if (*output_p)
8076 if (update_value_range (*output_p, &vr))
8078 if (dump_file && (dump_flags & TDF_DETAILS))
8080 fprintf (dump_file, "Found new range for ");
8081 print_generic_expr (dump_file, *output_p);
8082 fprintf (dump_file, ": ");
8083 dump_value_range (dump_file, &vr);
8084 fprintf (dump_file, "\n");
8087 if (vr.type == VR_VARYING)
8088 return SSA_PROP_VARYING;
8090 return SSA_PROP_INTERESTING;
8092 return SSA_PROP_NOT_INTERESTING;
8095 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
8096 switch (gimple_call_internal_fn (stmt))
8098 case IFN_ADD_OVERFLOW:
8099 case IFN_SUB_OVERFLOW:
8100 case IFN_MUL_OVERFLOW:
8101 case IFN_ATOMIC_COMPARE_EXCHANGE:
8102 /* These internal calls return _Complex integer type,
8103 which VRP does not track, but the immediate uses
8104 thereof might be interesting. */
8105 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8107 imm_use_iterator iter;
8108 use_operand_p use_p;
8109 enum ssa_prop_result res = SSA_PROP_VARYING;
8111 set_value_range_to_varying (get_value_range (lhs));
8113 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
8115 gimple *use_stmt = USE_STMT (use_p);
8116 if (!is_gimple_assign (use_stmt))
8117 continue;
8118 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
8119 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
8120 continue;
8121 tree rhs1 = gimple_assign_rhs1 (use_stmt);
8122 tree use_lhs = gimple_assign_lhs (use_stmt);
8123 if (TREE_CODE (rhs1) != rhs_code
8124 || TREE_OPERAND (rhs1, 0) != lhs
8125 || TREE_CODE (use_lhs) != SSA_NAME
8126 || !stmt_interesting_for_vrp (use_stmt)
8127 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
8128 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
8129 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
8130 continue;
8132 /* If there is a change in the value range for any of the
8133 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
8134 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
8135 or IMAGPART_EXPR immediate uses, but none of them have
8136 a change in their value ranges, return
8137 SSA_PROP_NOT_INTERESTING. If there are no
8138 {REAL,IMAG}PART_EXPR uses at all,
8139 return SSA_PROP_VARYING. */
8140 value_range new_vr = VR_INITIALIZER;
8141 extract_range_basic (&new_vr, use_stmt);
8142 value_range *old_vr = get_value_range (use_lhs);
8143 if (old_vr->type != new_vr.type
8144 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
8145 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
8146 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
8147 res = SSA_PROP_INTERESTING;
8148 else
8149 res = SSA_PROP_NOT_INTERESTING;
8150 BITMAP_FREE (new_vr.equiv);
8151 if (res == SSA_PROP_INTERESTING)
8153 *output_p = lhs;
8154 return res;
8158 return res;
8160 break;
8161 default:
8162 break;
8165 /* All other statements produce nothing of interest for VRP, so mark
8166 their outputs varying and prevent further simulation. */
8167 set_defs_to_varying (stmt);
8169 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
8172 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8173 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8174 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8175 possible such range. The resulting range is not canonicalized. */
8177 static void
8178 union_ranges (enum value_range_type *vr0type,
8179 tree *vr0min, tree *vr0max,
8180 enum value_range_type vr1type,
8181 tree vr1min, tree vr1max)
8183 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8184 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8186 /* [] is vr0, () is vr1 in the following classification comments. */
8187 if (mineq && maxeq)
8189 /* [( )] */
8190 if (*vr0type == vr1type)
8191 /* Nothing to do for equal ranges. */
8193 else if ((*vr0type == VR_RANGE
8194 && vr1type == VR_ANTI_RANGE)
8195 || (*vr0type == VR_ANTI_RANGE
8196 && vr1type == VR_RANGE))
8198 /* For anti-range with range union the result is varying. */
8199 goto give_up;
8201 else
8202 gcc_unreachable ();
8204 else if (operand_less_p (*vr0max, vr1min) == 1
8205 || operand_less_p (vr1max, *vr0min) == 1)
8207 /* [ ] ( ) or ( ) [ ]
8208 If the ranges have an empty intersection, result of the union
8209 operation is the anti-range or if both are anti-ranges
8210 it covers all. */
8211 if (*vr0type == VR_ANTI_RANGE
8212 && vr1type == VR_ANTI_RANGE)
8213 goto give_up;
8214 else if (*vr0type == VR_ANTI_RANGE
8215 && vr1type == VR_RANGE)
8217 else if (*vr0type == VR_RANGE
8218 && vr1type == VR_ANTI_RANGE)
8220 *vr0type = vr1type;
8221 *vr0min = vr1min;
8222 *vr0max = vr1max;
8224 else if (*vr0type == VR_RANGE
8225 && vr1type == VR_RANGE)
8227 /* The result is the convex hull of both ranges. */
8228 if (operand_less_p (*vr0max, vr1min) == 1)
8230 /* If the result can be an anti-range, create one. */
8231 if (TREE_CODE (*vr0max) == INTEGER_CST
8232 && TREE_CODE (vr1min) == INTEGER_CST
8233 && vrp_val_is_min (*vr0min)
8234 && vrp_val_is_max (vr1max))
8236 tree min = int_const_binop (PLUS_EXPR,
8237 *vr0max,
8238 build_int_cst (TREE_TYPE (*vr0max), 1));
8239 tree max = int_const_binop (MINUS_EXPR,
8240 vr1min,
8241 build_int_cst (TREE_TYPE (vr1min), 1));
8242 if (!operand_less_p (max, min))
8244 *vr0type = VR_ANTI_RANGE;
8245 *vr0min = min;
8246 *vr0max = max;
8248 else
8249 *vr0max = vr1max;
8251 else
8252 *vr0max = vr1max;
8254 else
8256 /* If the result can be an anti-range, create one. */
8257 if (TREE_CODE (vr1max) == INTEGER_CST
8258 && TREE_CODE (*vr0min) == INTEGER_CST
8259 && vrp_val_is_min (vr1min)
8260 && vrp_val_is_max (*vr0max))
8262 tree min = int_const_binop (PLUS_EXPR,
8263 vr1max,
8264 build_int_cst (TREE_TYPE (vr1max), 1));
8265 tree max = int_const_binop (MINUS_EXPR,
8266 *vr0min,
8267 build_int_cst (TREE_TYPE (*vr0min), 1));
8268 if (!operand_less_p (max, min))
8270 *vr0type = VR_ANTI_RANGE;
8271 *vr0min = min;
8272 *vr0max = max;
8274 else
8275 *vr0min = vr1min;
8277 else
8278 *vr0min = vr1min;
8281 else
8282 gcc_unreachable ();
8284 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8285 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8287 /* [ ( ) ] or [( ) ] or [ ( )] */
8288 if (*vr0type == VR_RANGE
8289 && vr1type == VR_RANGE)
8291 else if (*vr0type == VR_ANTI_RANGE
8292 && vr1type == VR_ANTI_RANGE)
8294 *vr0type = vr1type;
8295 *vr0min = vr1min;
8296 *vr0max = vr1max;
8298 else if (*vr0type == VR_ANTI_RANGE
8299 && vr1type == VR_RANGE)
8301 /* Arbitrarily choose the right or left gap. */
8302 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8303 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8304 build_int_cst (TREE_TYPE (vr1min), 1));
8305 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8306 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8307 build_int_cst (TREE_TYPE (vr1max), 1));
8308 else
8309 goto give_up;
8311 else if (*vr0type == VR_RANGE
8312 && vr1type == VR_ANTI_RANGE)
8313 /* The result covers everything. */
8314 goto give_up;
8315 else
8316 gcc_unreachable ();
8318 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8319 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8321 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8322 if (*vr0type == VR_RANGE
8323 && vr1type == VR_RANGE)
8325 *vr0type = vr1type;
8326 *vr0min = vr1min;
8327 *vr0max = vr1max;
8329 else if (*vr0type == VR_ANTI_RANGE
8330 && vr1type == VR_ANTI_RANGE)
8332 else if (*vr0type == VR_RANGE
8333 && vr1type == VR_ANTI_RANGE)
8335 *vr0type = VR_ANTI_RANGE;
8336 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8338 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8339 build_int_cst (TREE_TYPE (*vr0min), 1));
8340 *vr0min = vr1min;
8342 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8344 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8345 build_int_cst (TREE_TYPE (*vr0max), 1));
8346 *vr0max = vr1max;
8348 else
8349 goto give_up;
8351 else if (*vr0type == VR_ANTI_RANGE
8352 && vr1type == VR_RANGE)
8353 /* The result covers everything. */
8354 goto give_up;
8355 else
8356 gcc_unreachable ();
8358 else if ((operand_less_p (vr1min, *vr0max) == 1
8359 || operand_equal_p (vr1min, *vr0max, 0))
8360 && operand_less_p (*vr0min, vr1min) == 1
8361 && operand_less_p (*vr0max, vr1max) == 1)
8363 /* [ ( ] ) or [ ]( ) */
8364 if (*vr0type == VR_RANGE
8365 && vr1type == VR_RANGE)
8366 *vr0max = vr1max;
8367 else if (*vr0type == VR_ANTI_RANGE
8368 && vr1type == VR_ANTI_RANGE)
8369 *vr0min = vr1min;
8370 else if (*vr0type == VR_ANTI_RANGE
8371 && vr1type == VR_RANGE)
8373 if (TREE_CODE (vr1min) == INTEGER_CST)
8374 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8375 build_int_cst (TREE_TYPE (vr1min), 1));
8376 else
8377 goto give_up;
8379 else if (*vr0type == VR_RANGE
8380 && vr1type == VR_ANTI_RANGE)
8382 if (TREE_CODE (*vr0max) == INTEGER_CST)
8384 *vr0type = vr1type;
8385 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8386 build_int_cst (TREE_TYPE (*vr0max), 1));
8387 *vr0max = vr1max;
8389 else
8390 goto give_up;
8392 else
8393 gcc_unreachable ();
8395 else if ((operand_less_p (*vr0min, vr1max) == 1
8396 || operand_equal_p (*vr0min, vr1max, 0))
8397 && operand_less_p (vr1min, *vr0min) == 1
8398 && operand_less_p (vr1max, *vr0max) == 1)
8400 /* ( [ ) ] or ( )[ ] */
8401 if (*vr0type == VR_RANGE
8402 && vr1type == VR_RANGE)
8403 *vr0min = vr1min;
8404 else if (*vr0type == VR_ANTI_RANGE
8405 && vr1type == VR_ANTI_RANGE)
8406 *vr0max = vr1max;
8407 else if (*vr0type == VR_ANTI_RANGE
8408 && vr1type == VR_RANGE)
8410 if (TREE_CODE (vr1max) == INTEGER_CST)
8411 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8412 build_int_cst (TREE_TYPE (vr1max), 1));
8413 else
8414 goto give_up;
8416 else if (*vr0type == VR_RANGE
8417 && vr1type == VR_ANTI_RANGE)
8419 if (TREE_CODE (*vr0min) == INTEGER_CST)
8421 *vr0type = vr1type;
8422 *vr0min = vr1min;
8423 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8424 build_int_cst (TREE_TYPE (*vr0min), 1));
8426 else
8427 goto give_up;
8429 else
8430 gcc_unreachable ();
8432 else
8433 goto give_up;
8435 return;
8437 give_up:
8438 *vr0type = VR_VARYING;
8439 *vr0min = NULL_TREE;
8440 *vr0max = NULL_TREE;
8443 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8444 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8445 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8446 possible such range. The resulting range is not canonicalized. */
8448 static void
8449 intersect_ranges (enum value_range_type *vr0type,
8450 tree *vr0min, tree *vr0max,
8451 enum value_range_type vr1type,
8452 tree vr1min, tree vr1max)
8454 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8455 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8457 /* [] is vr0, () is vr1 in the following classification comments. */
8458 if (mineq && maxeq)
8460 /* [( )] */
8461 if (*vr0type == vr1type)
8462 /* Nothing to do for equal ranges. */
8464 else if ((*vr0type == VR_RANGE
8465 && vr1type == VR_ANTI_RANGE)
8466 || (*vr0type == VR_ANTI_RANGE
8467 && vr1type == VR_RANGE))
8469 /* For anti-range with range intersection the result is empty. */
8470 *vr0type = VR_UNDEFINED;
8471 *vr0min = NULL_TREE;
8472 *vr0max = NULL_TREE;
8474 else
8475 gcc_unreachable ();
8477 else if (operand_less_p (*vr0max, vr1min) == 1
8478 || operand_less_p (vr1max, *vr0min) == 1)
8480 /* [ ] ( ) or ( ) [ ]
8481 If the ranges have an empty intersection, the result of the
8482 intersect operation is the range for intersecting an
8483 anti-range with a range or empty when intersecting two ranges. */
8484 if (*vr0type == VR_RANGE
8485 && vr1type == VR_ANTI_RANGE)
8487 else if (*vr0type == VR_ANTI_RANGE
8488 && vr1type == VR_RANGE)
8490 *vr0type = vr1type;
8491 *vr0min = vr1min;
8492 *vr0max = vr1max;
8494 else if (*vr0type == VR_RANGE
8495 && vr1type == VR_RANGE)
8497 *vr0type = VR_UNDEFINED;
8498 *vr0min = NULL_TREE;
8499 *vr0max = NULL_TREE;
8501 else if (*vr0type == VR_ANTI_RANGE
8502 && vr1type == VR_ANTI_RANGE)
8504 /* If the anti-ranges are adjacent to each other merge them. */
8505 if (TREE_CODE (*vr0max) == INTEGER_CST
8506 && TREE_CODE (vr1min) == INTEGER_CST
8507 && operand_less_p (*vr0max, vr1min) == 1
8508 && integer_onep (int_const_binop (MINUS_EXPR,
8509 vr1min, *vr0max)))
8510 *vr0max = vr1max;
8511 else if (TREE_CODE (vr1max) == INTEGER_CST
8512 && TREE_CODE (*vr0min) == INTEGER_CST
8513 && operand_less_p (vr1max, *vr0min) == 1
8514 && integer_onep (int_const_binop (MINUS_EXPR,
8515 *vr0min, vr1max)))
8516 *vr0min = vr1min;
8517 /* Else arbitrarily take VR0. */
8520 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8521 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8523 /* [ ( ) ] or [( ) ] or [ ( )] */
8524 if (*vr0type == VR_RANGE
8525 && vr1type == VR_RANGE)
8527 /* If both are ranges the result is the inner one. */
8528 *vr0type = vr1type;
8529 *vr0min = vr1min;
8530 *vr0max = vr1max;
8532 else if (*vr0type == VR_RANGE
8533 && vr1type == VR_ANTI_RANGE)
8535 /* Choose the right gap if the left one is empty. */
8536 if (mineq)
8538 if (TREE_CODE (vr1max) != INTEGER_CST)
8539 *vr0min = vr1max;
8540 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
8541 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
8542 *vr0min
8543 = int_const_binop (MINUS_EXPR, vr1max,
8544 build_int_cst (TREE_TYPE (vr1max), -1));
8545 else
8546 *vr0min
8547 = int_const_binop (PLUS_EXPR, vr1max,
8548 build_int_cst (TREE_TYPE (vr1max), 1));
8550 /* Choose the left gap if the right one is empty. */
8551 else if (maxeq)
8553 if (TREE_CODE (vr1min) != INTEGER_CST)
8554 *vr0max = vr1min;
8555 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
8556 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
8557 *vr0max
8558 = int_const_binop (PLUS_EXPR, vr1min,
8559 build_int_cst (TREE_TYPE (vr1min), -1));
8560 else
8561 *vr0max
8562 = int_const_binop (MINUS_EXPR, vr1min,
8563 build_int_cst (TREE_TYPE (vr1min), 1));
8565 /* Choose the anti-range if the range is effectively varying. */
8566 else if (vrp_val_is_min (*vr0min)
8567 && vrp_val_is_max (*vr0max))
8569 *vr0type = vr1type;
8570 *vr0min = vr1min;
8571 *vr0max = vr1max;
8573 /* Else choose the range. */
8575 else if (*vr0type == VR_ANTI_RANGE
8576 && vr1type == VR_ANTI_RANGE)
8577 /* If both are anti-ranges the result is the outer one. */
8579 else if (*vr0type == VR_ANTI_RANGE
8580 && vr1type == VR_RANGE)
8582 /* The intersection is empty. */
8583 *vr0type = VR_UNDEFINED;
8584 *vr0min = NULL_TREE;
8585 *vr0max = NULL_TREE;
8587 else
8588 gcc_unreachable ();
8590 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8591 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8593 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8594 if (*vr0type == VR_RANGE
8595 && vr1type == VR_RANGE)
8596 /* Choose the inner range. */
8598 else if (*vr0type == VR_ANTI_RANGE
8599 && vr1type == VR_RANGE)
8601 /* Choose the right gap if the left is empty. */
8602 if (mineq)
8604 *vr0type = VR_RANGE;
8605 if (TREE_CODE (*vr0max) != INTEGER_CST)
8606 *vr0min = *vr0max;
8607 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
8608 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
8609 *vr0min
8610 = int_const_binop (MINUS_EXPR, *vr0max,
8611 build_int_cst (TREE_TYPE (*vr0max), -1));
8612 else
8613 *vr0min
8614 = int_const_binop (PLUS_EXPR, *vr0max,
8615 build_int_cst (TREE_TYPE (*vr0max), 1));
8616 *vr0max = vr1max;
8618 /* Choose the left gap if the right is empty. */
8619 else if (maxeq)
8621 *vr0type = VR_RANGE;
8622 if (TREE_CODE (*vr0min) != INTEGER_CST)
8623 *vr0max = *vr0min;
8624 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
8625 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
8626 *vr0max
8627 = int_const_binop (PLUS_EXPR, *vr0min,
8628 build_int_cst (TREE_TYPE (*vr0min), -1));
8629 else
8630 *vr0max
8631 = int_const_binop (MINUS_EXPR, *vr0min,
8632 build_int_cst (TREE_TYPE (*vr0min), 1));
8633 *vr0min = vr1min;
8635 /* Choose the anti-range if the range is effectively varying. */
8636 else if (vrp_val_is_min (vr1min)
8637 && vrp_val_is_max (vr1max))
8639 /* Choose the anti-range if it is ~[0,0], that range is special
8640 enough to special case when vr1's range is relatively wide. */
8641 else if (*vr0min == *vr0max
8642 && integer_zerop (*vr0min)
8643 && (TYPE_PRECISION (TREE_TYPE (*vr0min))
8644 == TYPE_PRECISION (ptr_type_node))
8645 && TREE_CODE (vr1max) == INTEGER_CST
8646 && TREE_CODE (vr1min) == INTEGER_CST
8647 && (wi::clz (wi::sub (vr1max, vr1min))
8648 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
8650 /* Else choose the range. */
8651 else
8653 *vr0type = vr1type;
8654 *vr0min = vr1min;
8655 *vr0max = vr1max;
8658 else if (*vr0type == VR_ANTI_RANGE
8659 && vr1type == VR_ANTI_RANGE)
8661 /* If both are anti-ranges the result is the outer one. */
8662 *vr0type = vr1type;
8663 *vr0min = vr1min;
8664 *vr0max = vr1max;
8666 else if (vr1type == VR_ANTI_RANGE
8667 && *vr0type == VR_RANGE)
8669 /* The intersection is empty. */
8670 *vr0type = VR_UNDEFINED;
8671 *vr0min = NULL_TREE;
8672 *vr0max = NULL_TREE;
8674 else
8675 gcc_unreachable ();
8677 else if ((operand_less_p (vr1min, *vr0max) == 1
8678 || operand_equal_p (vr1min, *vr0max, 0))
8679 && operand_less_p (*vr0min, vr1min) == 1)
8681 /* [ ( ] ) or [ ]( ) */
8682 if (*vr0type == VR_ANTI_RANGE
8683 && vr1type == VR_ANTI_RANGE)
8684 *vr0max = vr1max;
8685 else if (*vr0type == VR_RANGE
8686 && vr1type == VR_RANGE)
8687 *vr0min = vr1min;
8688 else if (*vr0type == VR_RANGE
8689 && vr1type == VR_ANTI_RANGE)
8691 if (TREE_CODE (vr1min) == INTEGER_CST)
8692 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8693 build_int_cst (TREE_TYPE (vr1min), 1));
8694 else
8695 *vr0max = vr1min;
8697 else if (*vr0type == VR_ANTI_RANGE
8698 && vr1type == VR_RANGE)
8700 *vr0type = VR_RANGE;
8701 if (TREE_CODE (*vr0max) == INTEGER_CST)
8702 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8703 build_int_cst (TREE_TYPE (*vr0max), 1));
8704 else
8705 *vr0min = *vr0max;
8706 *vr0max = vr1max;
8708 else
8709 gcc_unreachable ();
8711 else if ((operand_less_p (*vr0min, vr1max) == 1
8712 || operand_equal_p (*vr0min, vr1max, 0))
8713 && operand_less_p (vr1min, *vr0min) == 1)
8715 /* ( [ ) ] or ( )[ ] */
8716 if (*vr0type == VR_ANTI_RANGE
8717 && vr1type == VR_ANTI_RANGE)
8718 *vr0min = vr1min;
8719 else if (*vr0type == VR_RANGE
8720 && vr1type == VR_RANGE)
8721 *vr0max = vr1max;
8722 else if (*vr0type == VR_RANGE
8723 && vr1type == VR_ANTI_RANGE)
8725 if (TREE_CODE (vr1max) == INTEGER_CST)
8726 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8727 build_int_cst (TREE_TYPE (vr1max), 1));
8728 else
8729 *vr0min = vr1max;
8731 else if (*vr0type == VR_ANTI_RANGE
8732 && vr1type == VR_RANGE)
8734 *vr0type = VR_RANGE;
8735 if (TREE_CODE (*vr0min) == INTEGER_CST)
8736 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8737 build_int_cst (TREE_TYPE (*vr0min), 1));
8738 else
8739 *vr0max = *vr0min;
8740 *vr0min = vr1min;
8742 else
8743 gcc_unreachable ();
8746 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8747 result for the intersection. That's always a conservative
8748 correct estimate unless VR1 is a constant singleton range
8749 in which case we choose that. */
8750 if (vr1type == VR_RANGE
8751 && is_gimple_min_invariant (vr1min)
8752 && vrp_operand_equal_p (vr1min, vr1max))
8754 *vr0type = vr1type;
8755 *vr0min = vr1min;
8756 *vr0max = vr1max;
8759 return;
8763 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8764 in *VR0. This may not be the smallest possible such range. */
8766 static void
8767 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8769 value_range saved;
8771 /* If either range is VR_VARYING the other one wins. */
8772 if (vr1->type == VR_VARYING)
8773 return;
8774 if (vr0->type == VR_VARYING)
8776 copy_value_range (vr0, vr1);
8777 return;
8780 /* When either range is VR_UNDEFINED the resulting range is
8781 VR_UNDEFINED, too. */
8782 if (vr0->type == VR_UNDEFINED)
8783 return;
8784 if (vr1->type == VR_UNDEFINED)
8786 set_value_range_to_undefined (vr0);
8787 return;
8790 /* Save the original vr0 so we can return it as conservative intersection
8791 result when our worker turns things to varying. */
8792 saved = *vr0;
8793 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8794 vr1->type, vr1->min, vr1->max);
8795 /* Make sure to canonicalize the result though as the inversion of a
8796 VR_RANGE can still be a VR_RANGE. */
8797 set_and_canonicalize_value_range (vr0, vr0->type,
8798 vr0->min, vr0->max, vr0->equiv);
8799 /* If that failed, use the saved original VR0. */
8800 if (vr0->type == VR_VARYING)
8802 *vr0 = saved;
8803 return;
8805 /* If the result is VR_UNDEFINED there is no need to mess with
8806 the equivalencies. */
8807 if (vr0->type == VR_UNDEFINED)
8808 return;
8810 /* The resulting set of equivalences for range intersection is the union of
8811 the two sets. */
8812 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8813 bitmap_ior_into (vr0->equiv, vr1->equiv);
8814 else if (vr1->equiv && !vr0->equiv)
8816 vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
8817 bitmap_copy (vr0->equiv, vr1->equiv);
8821 void
8822 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
8824 if (dump_file && (dump_flags & TDF_DETAILS))
8826 fprintf (dump_file, "Intersecting\n ");
8827 dump_value_range (dump_file, vr0);
8828 fprintf (dump_file, "\nand\n ");
8829 dump_value_range (dump_file, vr1);
8830 fprintf (dump_file, "\n");
8832 vrp_intersect_ranges_1 (vr0, vr1);
8833 if (dump_file && (dump_flags & TDF_DETAILS))
8835 fprintf (dump_file, "to\n ");
8836 dump_value_range (dump_file, vr0);
8837 fprintf (dump_file, "\n");
8841 /* Meet operation for value ranges. Given two value ranges VR0 and
8842 VR1, store in VR0 a range that contains both VR0 and VR1. This
8843 may not be the smallest possible such range. */
8845 static void
8846 vrp_meet_1 (value_range *vr0, const value_range *vr1)
8848 value_range saved;
8850 if (vr0->type == VR_UNDEFINED)
8852 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8853 return;
8856 if (vr1->type == VR_UNDEFINED)
8858 /* VR0 already has the resulting range. */
8859 return;
8862 if (vr0->type == VR_VARYING)
8864 /* Nothing to do. VR0 already has the resulting range. */
8865 return;
8868 if (vr1->type == VR_VARYING)
8870 set_value_range_to_varying (vr0);
8871 return;
8874 saved = *vr0;
8875 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8876 vr1->type, vr1->min, vr1->max);
8877 if (vr0->type == VR_VARYING)
8879 /* Failed to find an efficient meet. Before giving up and setting
8880 the result to VARYING, see if we can at least derive a useful
8881 anti-range. FIXME, all this nonsense about distinguishing
8882 anti-ranges from ranges is necessary because of the odd
8883 semantics of range_includes_zero_p and friends. */
8884 if (((saved.type == VR_RANGE
8885 && range_includes_zero_p (saved.min, saved.max) == 0)
8886 || (saved.type == VR_ANTI_RANGE
8887 && range_includes_zero_p (saved.min, saved.max) == 1))
8888 && ((vr1->type == VR_RANGE
8889 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8890 || (vr1->type == VR_ANTI_RANGE
8891 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8893 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8895 /* Since this meet operation did not result from the meeting of
8896 two equivalent names, VR0 cannot have any equivalences. */
8897 if (vr0->equiv)
8898 bitmap_clear (vr0->equiv);
8899 return;
8902 set_value_range_to_varying (vr0);
8903 return;
8905 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8906 vr0->equiv);
8907 if (vr0->type == VR_VARYING)
8908 return;
8910 /* The resulting set of equivalences is always the intersection of
8911 the two sets. */
8912 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8913 bitmap_and_into (vr0->equiv, vr1->equiv);
8914 else if (vr0->equiv && !vr1->equiv)
8915 bitmap_clear (vr0->equiv);
8918 void
8919 vrp_meet (value_range *vr0, const value_range *vr1)
8921 if (dump_file && (dump_flags & TDF_DETAILS))
8923 fprintf (dump_file, "Meeting\n ");
8924 dump_value_range (dump_file, vr0);
8925 fprintf (dump_file, "\nand\n ");
8926 dump_value_range (dump_file, vr1);
8927 fprintf (dump_file, "\n");
8929 vrp_meet_1 (vr0, vr1);
8930 if (dump_file && (dump_flags & TDF_DETAILS))
8932 fprintf (dump_file, "to\n ");
8933 dump_value_range (dump_file, vr0);
8934 fprintf (dump_file, "\n");
8939 /* Visit all arguments for PHI node PHI that flow through executable
8940 edges. If a valid value range can be derived from all the incoming
8941 value ranges, set a new range in VR_RESULT. */
8943 static void
8944 extract_range_from_phi_node (gphi *phi, value_range *vr_result)
8946 size_t i;
8947 tree lhs = PHI_RESULT (phi);
8948 value_range *lhs_vr = get_value_range (lhs);
8949 bool first = true;
8950 int edges, old_edges;
8951 struct loop *l;
8953 if (dump_file && (dump_flags & TDF_DETAILS))
8955 fprintf (dump_file, "\nVisiting PHI node: ");
8956 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8959 bool may_simulate_backedge_again = false;
8960 edges = 0;
8961 for (i = 0; i < gimple_phi_num_args (phi); i++)
8963 edge e = gimple_phi_arg_edge (phi, i);
8965 if (dump_file && (dump_flags & TDF_DETAILS))
8967 fprintf (dump_file,
8968 " Argument #%d (%d -> %d %sexecutable)\n",
8969 (int) i, e->src->index, e->dest->index,
8970 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8973 if (e->flags & EDGE_EXECUTABLE)
8975 tree arg = PHI_ARG_DEF (phi, i);
8976 value_range vr_arg;
8978 ++edges;
8980 if (TREE_CODE (arg) == SSA_NAME)
8982 /* See if we are eventually going to change one of the args. */
8983 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
8984 if (! gimple_nop_p (def_stmt)
8985 && prop_simulate_again_p (def_stmt)
8986 && e->flags & EDGE_DFS_BACK)
8987 may_simulate_backedge_again = true;
8989 vr_arg = *(get_value_range (arg));
8990 /* Do not allow equivalences or symbolic ranges to leak in from
8991 backedges. That creates invalid equivalencies.
8992 See PR53465 and PR54767. */
8993 if (e->flags & EDGE_DFS_BACK)
8995 if (vr_arg.type == VR_RANGE
8996 || vr_arg.type == VR_ANTI_RANGE)
8998 vr_arg.equiv = NULL;
8999 if (symbolic_range_p (&vr_arg))
9001 vr_arg.type = VR_VARYING;
9002 vr_arg.min = NULL_TREE;
9003 vr_arg.max = NULL_TREE;
9007 else
9009 /* If the non-backedge arguments range is VR_VARYING then
9010 we can still try recording a simple equivalence. */
9011 if (vr_arg.type == VR_VARYING)
9013 vr_arg.type = VR_RANGE;
9014 vr_arg.min = arg;
9015 vr_arg.max = arg;
9016 vr_arg.equiv = NULL;
9020 else
9022 if (TREE_OVERFLOW_P (arg))
9023 arg = drop_tree_overflow (arg);
9025 vr_arg.type = VR_RANGE;
9026 vr_arg.min = arg;
9027 vr_arg.max = arg;
9028 vr_arg.equiv = NULL;
9031 if (dump_file && (dump_flags & TDF_DETAILS))
9033 fprintf (dump_file, "\t");
9034 print_generic_expr (dump_file, arg, dump_flags);
9035 fprintf (dump_file, ": ");
9036 dump_value_range (dump_file, &vr_arg);
9037 fprintf (dump_file, "\n");
9040 if (first)
9041 copy_value_range (vr_result, &vr_arg);
9042 else
9043 vrp_meet (vr_result, &vr_arg);
9044 first = false;
9046 if (vr_result->type == VR_VARYING)
9047 break;
9051 if (vr_result->type == VR_VARYING)
9052 goto varying;
9053 else if (vr_result->type == VR_UNDEFINED)
9054 goto update_range;
9056 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
9057 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
9059 /* To prevent infinite iterations in the algorithm, derive ranges
9060 when the new value is slightly bigger or smaller than the
9061 previous one. We don't do this if we have seen a new executable
9062 edge; this helps us avoid an infinity for conditionals
9063 which are not in a loop. If the old value-range was VR_UNDEFINED
9064 use the updated range and iterate one more time. If we will not
9065 simulate this PHI again via the backedge allow us to iterate. */
9066 if (edges > 0
9067 && gimple_phi_num_args (phi) > 1
9068 && edges == old_edges
9069 && lhs_vr->type != VR_UNDEFINED
9070 && may_simulate_backedge_again)
9072 /* Compare old and new ranges, fall back to varying if the
9073 values are not comparable. */
9074 int cmp_min = compare_values (lhs_vr->min, vr_result->min);
9075 if (cmp_min == -2)
9076 goto varying;
9077 int cmp_max = compare_values (lhs_vr->max, vr_result->max);
9078 if (cmp_max == -2)
9079 goto varying;
9081 /* For non VR_RANGE or for pointers fall back to varying if
9082 the range changed. */
9083 if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
9084 || POINTER_TYPE_P (TREE_TYPE (lhs)))
9085 && (cmp_min != 0 || cmp_max != 0))
9086 goto varying;
9088 /* If the new minimum is larger than the previous one
9089 retain the old value. If the new minimum value is smaller
9090 than the previous one and not -INF go all the way to -INF + 1.
9091 In the first case, to avoid infinite bouncing between different
9092 minimums, and in the other case to avoid iterating millions of
9093 times to reach -INF. Going to -INF + 1 also lets the following
9094 iteration compute whether there will be any overflow, at the
9095 expense of one additional iteration. */
9096 if (cmp_min < 0)
9097 vr_result->min = lhs_vr->min;
9098 else if (cmp_min > 0
9099 && !vrp_val_is_min (vr_result->min))
9100 vr_result->min
9101 = int_const_binop (PLUS_EXPR,
9102 vrp_val_min (TREE_TYPE (vr_result->min)),
9103 build_int_cst (TREE_TYPE (vr_result->min), 1));
9105 /* Similarly for the maximum value. */
9106 if (cmp_max > 0)
9107 vr_result->max = lhs_vr->max;
9108 else if (cmp_max < 0
9109 && !vrp_val_is_max (vr_result->max))
9110 vr_result->max
9111 = int_const_binop (MINUS_EXPR,
9112 vrp_val_max (TREE_TYPE (vr_result->min)),
9113 build_int_cst (TREE_TYPE (vr_result->min), 1));
9115 /* If we dropped either bound to +-INF then if this is a loop
9116 PHI node SCEV may known more about its value-range. */
9117 if (cmp_min > 0 || cmp_min < 0
9118 || cmp_max < 0 || cmp_max > 0)
9119 goto scev_check;
9121 goto infinite_check;
9124 goto update_range;
9126 varying:
9127 set_value_range_to_varying (vr_result);
9129 scev_check:
9130 /* If this is a loop PHI node SCEV may known more about its value-range.
9131 scev_check can be reached from two paths, one is a fall through from above
9132 "varying" label, the other is direct goto from code block which tries to
9133 avoid infinite simulation. */
9134 if ((l = loop_containing_stmt (phi))
9135 && l->header == gimple_bb (phi))
9136 adjust_range_with_scev (vr_result, l, phi, lhs);
9138 infinite_check:
9139 /* If we will end up with a (-INF, +INF) range, set it to
9140 VARYING. Same if the previous max value was invalid for
9141 the type and we end up with vr_result.min > vr_result.max. */
9142 if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
9143 && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
9144 || compare_values (vr_result->min, vr_result->max) > 0))
9146 else
9147 set_value_range_to_varying (vr_result);
9149 /* If the new range is different than the previous value, keep
9150 iterating. */
9151 update_range:
9152 return;
9155 /* Visit all arguments for PHI node PHI that flow through executable
9156 edges. If a valid value range can be derived from all the incoming
9157 value ranges, set a new range for the LHS of PHI. */
9159 static enum ssa_prop_result
9160 vrp_visit_phi_node (gphi *phi)
9162 tree lhs = PHI_RESULT (phi);
9163 value_range vr_result = VR_INITIALIZER;
9164 extract_range_from_phi_node (phi, &vr_result);
9165 if (update_value_range (lhs, &vr_result))
9167 if (dump_file && (dump_flags & TDF_DETAILS))
9169 fprintf (dump_file, "Found new range for ");
9170 print_generic_expr (dump_file, lhs);
9171 fprintf (dump_file, ": ");
9172 dump_value_range (dump_file, &vr_result);
9173 fprintf (dump_file, "\n");
9176 if (vr_result.type == VR_VARYING)
9177 return SSA_PROP_VARYING;
9179 return SSA_PROP_INTERESTING;
9182 /* Nothing changed, don't add outgoing edges. */
9183 return SSA_PROP_NOT_INTERESTING;
9186 /* Simplify boolean operations if the source is known
9187 to be already a boolean. */
9188 static bool
9189 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9191 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9192 tree lhs, op0, op1;
9193 bool need_conversion;
9195 /* We handle only !=/== case here. */
9196 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
9198 op0 = gimple_assign_rhs1 (stmt);
9199 if (!op_with_boolean_value_range_p (op0))
9200 return false;
9202 op1 = gimple_assign_rhs2 (stmt);
9203 if (!op_with_boolean_value_range_p (op1))
9204 return false;
9206 /* Reduce number of cases to handle to NE_EXPR. As there is no
9207 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
9208 if (rhs_code == EQ_EXPR)
9210 if (TREE_CODE (op1) == INTEGER_CST)
9211 op1 = int_const_binop (BIT_XOR_EXPR, op1,
9212 build_int_cst (TREE_TYPE (op1), 1));
9213 else
9214 return false;
9217 lhs = gimple_assign_lhs (stmt);
9218 need_conversion
9219 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
9221 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
9222 if (need_conversion
9223 && !TYPE_UNSIGNED (TREE_TYPE (op0))
9224 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
9225 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
9226 return false;
9228 /* For A != 0 we can substitute A itself. */
9229 if (integer_zerop (op1))
9230 gimple_assign_set_rhs_with_ops (gsi,
9231 need_conversion
9232 ? NOP_EXPR : TREE_CODE (op0), op0);
9233 /* For A != B we substitute A ^ B. Either with conversion. */
9234 else if (need_conversion)
9236 tree tem = make_ssa_name (TREE_TYPE (op0));
9237 gassign *newop
9238 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
9239 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
9240 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
9241 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
9242 set_range_info (tem, VR_RANGE,
9243 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
9244 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
9245 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
9247 /* Or without. */
9248 else
9249 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
9250 update_stmt (gsi_stmt (*gsi));
9251 fold_stmt (gsi, follow_single_use_edges);
9253 return true;
9256 /* Simplify a division or modulo operator to a right shift or bitwise and
9257 if the first operand is unsigned or is greater than zero and the second
9258 operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
9259 constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
9260 optimize it into just op0 if op0's range is known to be a subset of
9261 [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
9262 modulo. */
9264 static bool
9265 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9267 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9268 tree val = NULL;
9269 tree op0 = gimple_assign_rhs1 (stmt);
9270 tree op1 = gimple_assign_rhs2 (stmt);
9271 tree op0min = NULL_TREE, op0max = NULL_TREE;
9272 tree op1min = op1;
9273 value_range *vr = NULL;
9275 if (TREE_CODE (op0) == INTEGER_CST)
9277 op0min = op0;
9278 op0max = op0;
9280 else
9282 vr = get_value_range (op0);
9283 if (range_int_cst_p (vr))
9285 op0min = vr->min;
9286 op0max = vr->max;
9290 if (rhs_code == TRUNC_MOD_EXPR
9291 && TREE_CODE (op1) == SSA_NAME)
9293 value_range *vr1 = get_value_range (op1);
9294 if (range_int_cst_p (vr1))
9295 op1min = vr1->min;
9297 if (rhs_code == TRUNC_MOD_EXPR
9298 && TREE_CODE (op1min) == INTEGER_CST
9299 && tree_int_cst_sgn (op1min) == 1
9300 && op0max
9301 && tree_int_cst_lt (op0max, op1min))
9303 if (TYPE_UNSIGNED (TREE_TYPE (op0))
9304 || tree_int_cst_sgn (op0min) >= 0
9305 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
9306 op0min))
9308 /* If op0 already has the range op0 % op1 has,
9309 then TRUNC_MOD_EXPR won't change anything. */
9310 gimple_assign_set_rhs_from_tree (gsi, op0);
9311 return true;
9315 if (TREE_CODE (op0) != SSA_NAME)
9316 return false;
9318 if (!integer_pow2p (op1))
9320 /* X % -Y can be only optimized into X % Y either if
9321 X is not INT_MIN, or Y is not -1. Fold it now, as after
9322 remove_range_assertions the range info might be not available
9323 anymore. */
9324 if (rhs_code == TRUNC_MOD_EXPR
9325 && fold_stmt (gsi, follow_single_use_edges))
9326 return true;
9327 return false;
9330 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9331 val = integer_one_node;
9332 else
9334 bool sop = false;
9336 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9338 if (val
9339 && sop
9340 && integer_onep (val)
9341 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9343 location_t location;
9345 if (!gimple_has_location (stmt))
9346 location = input_location;
9347 else
9348 location = gimple_location (stmt);
9349 warning_at (location, OPT_Wstrict_overflow,
9350 "assuming signed overflow does not occur when "
9351 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9355 if (val && integer_onep (val))
9357 tree t;
9359 if (rhs_code == TRUNC_DIV_EXPR)
9361 t = build_int_cst (integer_type_node, tree_log2 (op1));
9362 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9363 gimple_assign_set_rhs1 (stmt, op0);
9364 gimple_assign_set_rhs2 (stmt, t);
9366 else
9368 t = build_int_cst (TREE_TYPE (op1), 1);
9369 t = int_const_binop (MINUS_EXPR, op1, t);
9370 t = fold_convert (TREE_TYPE (op0), t);
9372 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9373 gimple_assign_set_rhs1 (stmt, op0);
9374 gimple_assign_set_rhs2 (stmt, t);
9377 update_stmt (stmt);
9378 fold_stmt (gsi, follow_single_use_edges);
9379 return true;
9382 return false;
9385 /* Simplify a min or max if the ranges of the two operands are
9386 disjoint. Return true if we do simplify. */
9388 static bool
9389 simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9391 tree op0 = gimple_assign_rhs1 (stmt);
9392 tree op1 = gimple_assign_rhs2 (stmt);
9393 bool sop = false;
9394 tree val;
9396 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9397 (LE_EXPR, op0, op1, &sop));
9398 if (!val)
9400 sop = false;
9401 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9402 (LT_EXPR, op0, op1, &sop));
9405 if (val)
9407 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9409 location_t location;
9411 if (!gimple_has_location (stmt))
9412 location = input_location;
9413 else
9414 location = gimple_location (stmt);
9415 warning_at (location, OPT_Wstrict_overflow,
9416 "assuming signed overflow does not occur when "
9417 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9420 /* VAL == TRUE -> OP0 < or <= op1
9421 VAL == FALSE -> OP0 > or >= op1. */
9422 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9423 == integer_zerop (val)) ? op0 : op1;
9424 gimple_assign_set_rhs_from_tree (gsi, res);
9425 return true;
9428 return false;
9431 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9432 ABS_EXPR. If the operand is <= 0, then simplify the
9433 ABS_EXPR into a NEGATE_EXPR. */
9435 static bool
9436 simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9438 tree op = gimple_assign_rhs1 (stmt);
9439 value_range *vr = get_value_range (op);
9441 if (vr)
9443 tree val = NULL;
9444 bool sop = false;
9446 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9447 if (!val)
9449 /* The range is neither <= 0 nor > 0. Now see if it is
9450 either < 0 or >= 0. */
9451 sop = false;
9452 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9453 &sop);
9456 if (val)
9458 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9460 location_t location;
9462 if (!gimple_has_location (stmt))
9463 location = input_location;
9464 else
9465 location = gimple_location (stmt);
9466 warning_at (location, OPT_Wstrict_overflow,
9467 "assuming signed overflow does not occur when "
9468 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9471 gimple_assign_set_rhs1 (stmt, op);
9472 if (integer_zerop (val))
9473 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9474 else
9475 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9476 update_stmt (stmt);
9477 fold_stmt (gsi, follow_single_use_edges);
9478 return true;
9482 return false;
9485 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9486 If all the bits that are being cleared by & are already
9487 known to be zero from VR, or all the bits that are being
9488 set by | are already known to be one from VR, the bit
9489 operation is redundant. */
9491 static bool
9492 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9494 tree op0 = gimple_assign_rhs1 (stmt);
9495 tree op1 = gimple_assign_rhs2 (stmt);
9496 tree op = NULL_TREE;
9497 value_range vr0 = VR_INITIALIZER;
9498 value_range vr1 = VR_INITIALIZER;
9499 wide_int may_be_nonzero0, may_be_nonzero1;
9500 wide_int must_be_nonzero0, must_be_nonzero1;
9501 wide_int mask;
9503 if (TREE_CODE (op0) == SSA_NAME)
9504 vr0 = *(get_value_range (op0));
9505 else if (is_gimple_min_invariant (op0))
9506 set_value_range_to_value (&vr0, op0, NULL);
9507 else
9508 return false;
9510 if (TREE_CODE (op1) == SSA_NAME)
9511 vr1 = *(get_value_range (op1));
9512 else if (is_gimple_min_invariant (op1))
9513 set_value_range_to_value (&vr1, op1, NULL);
9514 else
9515 return false;
9517 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9518 &must_be_nonzero0))
9519 return false;
9520 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9521 &must_be_nonzero1))
9522 return false;
9524 switch (gimple_assign_rhs_code (stmt))
9526 case BIT_AND_EXPR:
9527 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9528 if (mask == 0)
9530 op = op0;
9531 break;
9533 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9534 if (mask == 0)
9536 op = op1;
9537 break;
9539 break;
9540 case BIT_IOR_EXPR:
9541 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9542 if (mask == 0)
9544 op = op1;
9545 break;
9547 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9548 if (mask == 0)
9550 op = op0;
9551 break;
9553 break;
9554 default:
9555 gcc_unreachable ();
9558 if (op == NULL_TREE)
9559 return false;
9561 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9562 update_stmt (gsi_stmt (*gsi));
9563 return true;
9566 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9567 a known value range VR.
9569 If there is one and only one value which will satisfy the
9570 conditional, then return that value. Else return NULL.
9572 If signed overflow must be undefined for the value to satisfy
9573 the conditional, then set *STRICT_OVERFLOW_P to true. */
9575 static tree
9576 test_for_singularity (enum tree_code cond_code, tree op0,
9577 tree op1, value_range *vr)
9579 tree min = NULL;
9580 tree max = NULL;
9582 /* Extract minimum/maximum values which satisfy the conditional as it was
9583 written. */
9584 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9586 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9588 max = op1;
9589 if (cond_code == LT_EXPR)
9591 tree one = build_int_cst (TREE_TYPE (op0), 1);
9592 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9593 /* Signal to compare_values_warnv this expr doesn't overflow. */
9594 if (EXPR_P (max))
9595 TREE_NO_WARNING (max) = 1;
9598 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9600 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9602 min = op1;
9603 if (cond_code == GT_EXPR)
9605 tree one = build_int_cst (TREE_TYPE (op0), 1);
9606 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9607 /* Signal to compare_values_warnv this expr doesn't overflow. */
9608 if (EXPR_P (min))
9609 TREE_NO_WARNING (min) = 1;
9613 /* Now refine the minimum and maximum values using any
9614 value range information we have for op0. */
9615 if (min && max)
9617 if (compare_values (vr->min, min) == 1)
9618 min = vr->min;
9619 if (compare_values (vr->max, max) == -1)
9620 max = vr->max;
9622 /* If the new min/max values have converged to a single value,
9623 then there is only one value which can satisfy the condition,
9624 return that value. */
9625 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9626 return min;
9628 return NULL;
9631 /* Return whether the value range *VR fits in an integer type specified
9632 by PRECISION and UNSIGNED_P. */
9634 static bool
9635 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9637 tree src_type;
9638 unsigned src_precision;
9639 widest_int tem;
9640 signop src_sgn;
9642 /* We can only handle integral and pointer types. */
9643 src_type = TREE_TYPE (vr->min);
9644 if (!INTEGRAL_TYPE_P (src_type)
9645 && !POINTER_TYPE_P (src_type))
9646 return false;
9648 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9649 and so is an identity transform. */
9650 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9651 src_sgn = TYPE_SIGN (src_type);
9652 if ((src_precision < dest_precision
9653 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9654 || (src_precision == dest_precision && src_sgn == dest_sgn))
9655 return true;
9657 /* Now we can only handle ranges with constant bounds. */
9658 if (vr->type != VR_RANGE
9659 || TREE_CODE (vr->min) != INTEGER_CST
9660 || TREE_CODE (vr->max) != INTEGER_CST)
9661 return false;
9663 /* For sign changes, the MSB of the wide_int has to be clear.
9664 An unsigned value with its MSB set cannot be represented by
9665 a signed wide_int, while a negative value cannot be represented
9666 by an unsigned wide_int. */
9667 if (src_sgn != dest_sgn
9668 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9669 return false;
9671 /* Then we can perform the conversion on both ends and compare
9672 the result for equality. */
9673 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9674 if (tem != wi::to_widest (vr->min))
9675 return false;
9676 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9677 if (tem != wi::to_widest (vr->max))
9678 return false;
9680 return true;
9683 /* Simplify a conditional using a relational operator to an equality
9684 test if the range information indicates only one value can satisfy
9685 the original conditional. */
9687 static bool
9688 simplify_cond_using_ranges_1 (gcond *stmt)
9690 tree op0 = gimple_cond_lhs (stmt);
9691 tree op1 = gimple_cond_rhs (stmt);
9692 enum tree_code cond_code = gimple_cond_code (stmt);
9694 if (cond_code != NE_EXPR
9695 && cond_code != EQ_EXPR
9696 && TREE_CODE (op0) == SSA_NAME
9697 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9698 && is_gimple_min_invariant (op1))
9700 value_range *vr = get_value_range (op0);
9702 /* If we have range information for OP0, then we might be
9703 able to simplify this conditional. */
9704 if (vr->type == VR_RANGE)
9706 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
9707 if (new_tree)
9709 if (dump_file)
9711 fprintf (dump_file, "Simplified relational ");
9712 print_gimple_stmt (dump_file, stmt, 0);
9713 fprintf (dump_file, " into ");
9716 gimple_cond_set_code (stmt, EQ_EXPR);
9717 gimple_cond_set_lhs (stmt, op0);
9718 gimple_cond_set_rhs (stmt, new_tree);
9720 update_stmt (stmt);
9722 if (dump_file)
9724 print_gimple_stmt (dump_file, stmt, 0);
9725 fprintf (dump_file, "\n");
9728 return true;
9731 /* Try again after inverting the condition. We only deal
9732 with integral types here, so no need to worry about
9733 issues with inverting FP comparisons. */
9734 new_tree = test_for_singularity
9735 (invert_tree_comparison (cond_code, false),
9736 op0, op1, vr);
9737 if (new_tree)
9739 if (dump_file)
9741 fprintf (dump_file, "Simplified relational ");
9742 print_gimple_stmt (dump_file, stmt, 0);
9743 fprintf (dump_file, " into ");
9746 gimple_cond_set_code (stmt, NE_EXPR);
9747 gimple_cond_set_lhs (stmt, op0);
9748 gimple_cond_set_rhs (stmt, new_tree);
9750 update_stmt (stmt);
9752 if (dump_file)
9754 print_gimple_stmt (dump_file, stmt, 0);
9755 fprintf (dump_file, "\n");
9758 return true;
9762 return false;
9765 /* STMT is a conditional at the end of a basic block.
9767 If the conditional is of the form SSA_NAME op constant and the SSA_NAME
9768 was set via a type conversion, try to replace the SSA_NAME with the RHS
9769 of the type conversion. Doing so makes the conversion dead which helps
9770 subsequent passes. */
9772 static void
9773 simplify_cond_using_ranges_2 (gcond *stmt)
9775 tree op0 = gimple_cond_lhs (stmt);
9776 tree op1 = gimple_cond_rhs (stmt);
9778 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9779 see if OP0 was set by a type conversion where the source of
9780 the conversion is another SSA_NAME with a range that fits
9781 into the range of OP0's type.
9783 If so, the conversion is redundant as the earlier SSA_NAME can be
9784 used for the comparison directly if we just massage the constant in the
9785 comparison. */
9786 if (TREE_CODE (op0) == SSA_NAME
9787 && TREE_CODE (op1) == INTEGER_CST)
9789 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9790 tree innerop;
9792 if (!is_gimple_assign (def_stmt)
9793 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9794 return;
9796 innerop = gimple_assign_rhs1 (def_stmt);
9798 if (TREE_CODE (innerop) == SSA_NAME
9799 && !POINTER_TYPE_P (TREE_TYPE (innerop))
9800 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
9801 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
9803 value_range *vr = get_value_range (innerop);
9805 if (range_int_cst_p (vr)
9806 && range_fits_type_p (vr,
9807 TYPE_PRECISION (TREE_TYPE (op0)),
9808 TYPE_SIGN (TREE_TYPE (op0)))
9809 && int_fits_type_p (op1, TREE_TYPE (innerop)))
9811 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9812 gimple_cond_set_lhs (stmt, innerop);
9813 gimple_cond_set_rhs (stmt, newconst);
9814 update_stmt (stmt);
9815 if (dump_file && (dump_flags & TDF_DETAILS))
9817 fprintf (dump_file, "Folded into: ");
9818 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
9819 fprintf (dump_file, "\n");
9826 /* Simplify a switch statement using the value range of the switch
9827 argument. */
9829 static bool
9830 simplify_switch_using_ranges (gswitch *stmt)
9832 tree op = gimple_switch_index (stmt);
9833 value_range *vr = NULL;
9834 bool take_default;
9835 edge e;
9836 edge_iterator ei;
9837 size_t i = 0, j = 0, n, n2;
9838 tree vec2;
9839 switch_update su;
9840 size_t k = 1, l = 0;
9842 if (TREE_CODE (op) == SSA_NAME)
9844 vr = get_value_range (op);
9846 /* We can only handle integer ranges. */
9847 if ((vr->type != VR_RANGE
9848 && vr->type != VR_ANTI_RANGE)
9849 || symbolic_range_p (vr))
9850 return false;
9852 /* Find case label for min/max of the value range. */
9853 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9855 else if (TREE_CODE (op) == INTEGER_CST)
9857 take_default = !find_case_label_index (stmt, 1, op, &i);
9858 if (take_default)
9860 i = 1;
9861 j = 0;
9863 else
9865 j = i;
9868 else
9869 return false;
9871 n = gimple_switch_num_labels (stmt);
9873 /* We can truncate the case label ranges that partially overlap with OP's
9874 value range. */
9875 size_t min_idx = 1, max_idx = 0;
9876 if (vr != NULL)
9877 find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
9878 if (min_idx <= max_idx)
9880 tree min_label = gimple_switch_label (stmt, min_idx);
9881 tree max_label = gimple_switch_label (stmt, max_idx);
9883 /* Avoid changing the type of the case labels when truncating. */
9884 tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
9885 tree vr_min = fold_convert (case_label_type, vr->min);
9886 tree vr_max = fold_convert (case_label_type, vr->max);
9888 if (vr->type == VR_RANGE)
9890 /* If OP's value range is [2,8] and the low label range is
9891 0 ... 3, truncate the label's range to 2 .. 3. */
9892 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
9893 && CASE_HIGH (min_label) != NULL_TREE
9894 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
9895 CASE_LOW (min_label) = vr_min;
9897 /* If OP's value range is [2,8] and the high label range is
9898 7 ... 10, truncate the label's range to 7 .. 8. */
9899 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
9900 && CASE_HIGH (max_label) != NULL_TREE
9901 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
9902 CASE_HIGH (max_label) = vr_max;
9904 else if (vr->type == VR_ANTI_RANGE)
9906 tree one_cst = build_one_cst (case_label_type);
9908 if (min_label == max_label)
9910 /* If OP's value range is ~[7,8] and the label's range is
9911 7 ... 10, truncate the label's range to 9 ... 10. */
9912 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
9913 && CASE_HIGH (min_label) != NULL_TREE
9914 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
9915 CASE_LOW (min_label)
9916 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
9918 /* If OP's value range is ~[7,8] and the label's range is
9919 5 ... 8, truncate the label's range to 5 ... 6. */
9920 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
9921 && CASE_HIGH (min_label) != NULL_TREE
9922 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
9923 CASE_HIGH (min_label)
9924 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
9926 else
9928 /* If OP's value range is ~[2,8] and the low label range is
9929 0 ... 3, truncate the label's range to 0 ... 1. */
9930 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
9931 && CASE_HIGH (min_label) != NULL_TREE
9932 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
9933 CASE_HIGH (min_label)
9934 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
9936 /* If OP's value range is ~[2,8] and the high label range is
9937 7 ... 10, truncate the label's range to 9 ... 10. */
9938 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
9939 && CASE_HIGH (max_label) != NULL_TREE
9940 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
9941 CASE_LOW (max_label)
9942 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
9946 /* Canonicalize singleton case ranges. */
9947 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
9948 CASE_HIGH (min_label) = NULL_TREE;
9949 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
9950 CASE_HIGH (max_label) = NULL_TREE;
9953 /* We can also eliminate case labels that lie completely outside OP's value
9954 range. */
9956 /* Bail out if this is just all edges taken. */
9957 if (i == 1
9958 && j == n - 1
9959 && take_default)
9960 return false;
9962 /* Build a new vector of taken case labels. */
9963 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9964 n2 = 0;
9966 /* Add the default edge, if necessary. */
9967 if (take_default)
9968 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9970 for (; i <= j; ++i, ++n2)
9971 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9973 for (; k <= l; ++k, ++n2)
9974 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9976 /* Mark needed edges. */
9977 for (i = 0; i < n2; ++i)
9979 e = find_edge (gimple_bb (stmt),
9980 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9981 e->aux = (void *)-1;
9984 /* Queue not needed edges for later removal. */
9985 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9987 if (e->aux == (void *)-1)
9989 e->aux = NULL;
9990 continue;
9993 if (dump_file && (dump_flags & TDF_DETAILS))
9995 fprintf (dump_file, "removing unreachable case label\n");
9997 to_remove_edges.safe_push (e);
9998 e->flags &= ~EDGE_EXECUTABLE;
10001 /* And queue an update for the stmt. */
10002 su.stmt = stmt;
10003 su.vec = vec2;
10004 to_update_switch_stmts.safe_push (su);
10005 return false;
10008 /* Simplify an integral conversion from an SSA name in STMT. */
10010 static bool
10011 simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10013 tree innerop, middleop, finaltype;
10014 gimple *def_stmt;
10015 signop inner_sgn, middle_sgn, final_sgn;
10016 unsigned inner_prec, middle_prec, final_prec;
10017 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
10019 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
10020 if (!INTEGRAL_TYPE_P (finaltype))
10021 return false;
10022 middleop = gimple_assign_rhs1 (stmt);
10023 def_stmt = SSA_NAME_DEF_STMT (middleop);
10024 if (!is_gimple_assign (def_stmt)
10025 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
10026 return false;
10027 innerop = gimple_assign_rhs1 (def_stmt);
10028 if (TREE_CODE (innerop) != SSA_NAME
10029 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
10030 return false;
10032 /* Get the value-range of the inner operand. Use get_range_info in
10033 case innerop was created during substitute-and-fold. */
10034 wide_int imin, imax;
10035 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
10036 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
10037 return false;
10038 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
10039 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
10041 /* Simulate the conversion chain to check if the result is equal if
10042 the middle conversion is removed. */
10043 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
10044 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
10045 final_prec = TYPE_PRECISION (finaltype);
10047 /* If the first conversion is not injective, the second must not
10048 be widening. */
10049 if (wi::gtu_p (innermax - innermin,
10050 wi::mask <widest_int> (middle_prec, false))
10051 && middle_prec < final_prec)
10052 return false;
10053 /* We also want a medium value so that we can track the effect that
10054 narrowing conversions with sign change have. */
10055 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
10056 if (inner_sgn == UNSIGNED)
10057 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
10058 else
10059 innermed = 0;
10060 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
10061 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
10062 innermed = innermin;
10064 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
10065 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
10066 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
10067 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
10069 /* Require that the final conversion applied to both the original
10070 and the intermediate range produces the same result. */
10071 final_sgn = TYPE_SIGN (finaltype);
10072 if (wi::ext (middlemin, final_prec, final_sgn)
10073 != wi::ext (innermin, final_prec, final_sgn)
10074 || wi::ext (middlemed, final_prec, final_sgn)
10075 != wi::ext (innermed, final_prec, final_sgn)
10076 || wi::ext (middlemax, final_prec, final_sgn)
10077 != wi::ext (innermax, final_prec, final_sgn))
10078 return false;
10080 gimple_assign_set_rhs1 (stmt, innerop);
10081 fold_stmt (gsi, follow_single_use_edges);
10082 return true;
10085 /* Simplify a conversion from integral SSA name to float in STMT. */
10087 static bool
10088 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
10089 gimple *stmt)
10091 tree rhs1 = gimple_assign_rhs1 (stmt);
10092 value_range *vr = get_value_range (rhs1);
10093 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
10094 machine_mode mode;
10095 tree tem;
10096 gassign *conv;
10098 /* We can only handle constant ranges. */
10099 if (vr->type != VR_RANGE
10100 || TREE_CODE (vr->min) != INTEGER_CST
10101 || TREE_CODE (vr->max) != INTEGER_CST)
10102 return false;
10104 /* First check if we can use a signed type in place of an unsigned. */
10105 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
10106 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
10107 != CODE_FOR_nothing)
10108 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
10109 mode = TYPE_MODE (TREE_TYPE (rhs1));
10110 /* If we can do the conversion in the current input mode do nothing. */
10111 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
10112 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
10113 return false;
10114 /* Otherwise search for a mode we can use, starting from the narrowest
10115 integer mode available. */
10116 else
10118 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
10121 /* If we cannot do a signed conversion to float from mode
10122 or if the value-range does not fit in the signed type
10123 try with a wider mode. */
10124 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
10125 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
10126 break;
10128 mode = GET_MODE_WIDER_MODE (mode);
10129 /* But do not widen the input. Instead leave that to the
10130 optabs expansion code. */
10131 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
10132 return false;
10134 while (mode != VOIDmode);
10135 if (mode == VOIDmode)
10136 return false;
10139 /* It works, insert a truncation or sign-change before the
10140 float conversion. */
10141 tem = make_ssa_name (build_nonstandard_integer_type
10142 (GET_MODE_PRECISION (mode), 0));
10143 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
10144 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
10145 gimple_assign_set_rhs1 (stmt, tem);
10146 fold_stmt (gsi, follow_single_use_edges);
10148 return true;
10151 /* Simplify an internal fn call using ranges if possible. */
10153 static bool
10154 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
10156 enum tree_code subcode;
10157 bool is_ubsan = false;
10158 bool ovf = false;
10159 switch (gimple_call_internal_fn (stmt))
10161 case IFN_UBSAN_CHECK_ADD:
10162 subcode = PLUS_EXPR;
10163 is_ubsan = true;
10164 break;
10165 case IFN_UBSAN_CHECK_SUB:
10166 subcode = MINUS_EXPR;
10167 is_ubsan = true;
10168 break;
10169 case IFN_UBSAN_CHECK_MUL:
10170 subcode = MULT_EXPR;
10171 is_ubsan = true;
10172 break;
10173 case IFN_ADD_OVERFLOW:
10174 subcode = PLUS_EXPR;
10175 break;
10176 case IFN_SUB_OVERFLOW:
10177 subcode = MINUS_EXPR;
10178 break;
10179 case IFN_MUL_OVERFLOW:
10180 subcode = MULT_EXPR;
10181 break;
10182 default:
10183 return false;
10186 tree op0 = gimple_call_arg (stmt, 0);
10187 tree op1 = gimple_call_arg (stmt, 1);
10188 tree type;
10189 if (is_ubsan)
10191 type = TREE_TYPE (op0);
10192 if (VECTOR_TYPE_P (type))
10193 return false;
10195 else if (gimple_call_lhs (stmt) == NULL_TREE)
10196 return false;
10197 else
10198 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
10199 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
10200 || (is_ubsan && ovf))
10201 return false;
10203 gimple *g;
10204 location_t loc = gimple_location (stmt);
10205 if (is_ubsan)
10206 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
10207 else
10209 int prec = TYPE_PRECISION (type);
10210 tree utype = type;
10211 if (ovf
10212 || !useless_type_conversion_p (type, TREE_TYPE (op0))
10213 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
10214 utype = build_nonstandard_integer_type (prec, 1);
10215 if (TREE_CODE (op0) == INTEGER_CST)
10216 op0 = fold_convert (utype, op0);
10217 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
10219 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
10220 gimple_set_location (g, loc);
10221 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10222 op0 = gimple_assign_lhs (g);
10224 if (TREE_CODE (op1) == INTEGER_CST)
10225 op1 = fold_convert (utype, op1);
10226 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
10228 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
10229 gimple_set_location (g, loc);
10230 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10231 op1 = gimple_assign_lhs (g);
10233 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
10234 gimple_set_location (g, loc);
10235 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10236 if (utype != type)
10238 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
10239 gimple_assign_lhs (g));
10240 gimple_set_location (g, loc);
10241 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10243 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
10244 gimple_assign_lhs (g),
10245 build_int_cst (type, ovf));
10247 gimple_set_location (g, loc);
10248 gsi_replace (gsi, g, false);
10249 return true;
10252 /* Return true if VAR is a two-valued variable. Set a and b with the
10253 two-values when it is true. Return false otherwise. */
10255 static bool
10256 two_valued_val_range_p (tree var, tree *a, tree *b)
10258 value_range *vr = get_value_range (var);
10259 if ((vr->type != VR_RANGE
10260 && vr->type != VR_ANTI_RANGE)
10261 || TREE_CODE (vr->min) != INTEGER_CST
10262 || TREE_CODE (vr->max) != INTEGER_CST)
10263 return false;
10265 if (vr->type == VR_RANGE
10266 && wi::sub (vr->max, vr->min) == 1)
10268 *a = vr->min;
10269 *b = vr->max;
10270 return true;
10273 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
10274 if (vr->type == VR_ANTI_RANGE
10275 && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
10276 && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
10278 *a = vrp_val_min (TREE_TYPE (var));
10279 *b = vrp_val_max (TREE_TYPE (var));
10280 return true;
10283 return false;
10286 /* Simplify STMT using ranges if possible. */
10288 static bool
10289 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
10291 gimple *stmt = gsi_stmt (*gsi);
10292 if (is_gimple_assign (stmt))
10294 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
10295 tree rhs1 = gimple_assign_rhs1 (stmt);
10296 tree rhs2 = gimple_assign_rhs2 (stmt);
10297 tree lhs = gimple_assign_lhs (stmt);
10298 tree val1 = NULL_TREE, val2 = NULL_TREE;
10299 use_operand_p use_p;
10300 gimple *use_stmt;
10302 /* Convert:
10303 LHS = CST BINOP VAR
10304 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10306 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
10308 Also handles:
10309 LHS = VAR BINOP CST
10310 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10312 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
10314 if (TREE_CODE_CLASS (rhs_code) == tcc_binary
10315 && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10316 && ((TREE_CODE (rhs1) == INTEGER_CST
10317 && TREE_CODE (rhs2) == SSA_NAME)
10318 || (TREE_CODE (rhs2) == INTEGER_CST
10319 && TREE_CODE (rhs1) == SSA_NAME))
10320 && single_imm_use (lhs, &use_p, &use_stmt)
10321 && gimple_code (use_stmt) == GIMPLE_COND)
10324 tree new_rhs1 = NULL_TREE;
10325 tree new_rhs2 = NULL_TREE;
10326 tree cmp_var = NULL_TREE;
10328 if (TREE_CODE (rhs2) == SSA_NAME
10329 && two_valued_val_range_p (rhs2, &val1, &val2))
10331 /* Optimize RHS1 OP [VAL1, VAL2]. */
10332 new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
10333 new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
10334 cmp_var = rhs2;
10336 else if (TREE_CODE (rhs1) == SSA_NAME
10337 && two_valued_val_range_p (rhs1, &val1, &val2))
10339 /* Optimize [VAL1, VAL2] OP RHS2. */
10340 new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
10341 new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
10342 cmp_var = rhs1;
10345 /* If we could not find two-vals or the optimzation is invalid as
10346 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
10347 if (new_rhs1 && new_rhs2)
10349 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
10350 gimple_assign_set_rhs_with_ops (gsi,
10351 COND_EXPR, cond,
10352 new_rhs1,
10353 new_rhs2);
10354 update_stmt (gsi_stmt (*gsi));
10355 fold_stmt (gsi, follow_single_use_edges);
10356 return true;
10360 switch (rhs_code)
10362 case EQ_EXPR:
10363 case NE_EXPR:
10364 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
10365 if the RHS is zero or one, and the LHS are known to be boolean
10366 values. */
10367 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10368 return simplify_truth_ops_using_ranges (gsi, stmt);
10369 break;
10371 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
10372 and BIT_AND_EXPR respectively if the first operand is greater
10373 than zero and the second operand is an exact power of two.
10374 Also optimize TRUNC_MOD_EXPR away if the second operand is
10375 constant and the first operand already has the right value
10376 range. */
10377 case TRUNC_DIV_EXPR:
10378 case TRUNC_MOD_EXPR:
10379 if ((TREE_CODE (rhs1) == SSA_NAME
10380 || TREE_CODE (rhs1) == INTEGER_CST)
10381 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10382 return simplify_div_or_mod_using_ranges (gsi, stmt);
10383 break;
10385 /* Transform ABS (X) into X or -X as appropriate. */
10386 case ABS_EXPR:
10387 if (TREE_CODE (rhs1) == SSA_NAME
10388 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10389 return simplify_abs_using_ranges (gsi, stmt);
10390 break;
10392 case BIT_AND_EXPR:
10393 case BIT_IOR_EXPR:
10394 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
10395 if all the bits being cleared are already cleared or
10396 all the bits being set are already set. */
10397 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10398 return simplify_bit_ops_using_ranges (gsi, stmt);
10399 break;
10401 CASE_CONVERT:
10402 if (TREE_CODE (rhs1) == SSA_NAME
10403 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10404 return simplify_conversion_using_ranges (gsi, stmt);
10405 break;
10407 case FLOAT_EXPR:
10408 if (TREE_CODE (rhs1) == SSA_NAME
10409 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10410 return simplify_float_conversion_using_ranges (gsi, stmt);
10411 break;
10413 case MIN_EXPR:
10414 case MAX_EXPR:
10415 return simplify_min_or_max_using_ranges (gsi, stmt);
10417 default:
10418 break;
10421 else if (gimple_code (stmt) == GIMPLE_COND)
10422 return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
10423 else if (gimple_code (stmt) == GIMPLE_SWITCH)
10424 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
10425 else if (is_gimple_call (stmt)
10426 && gimple_call_internal_p (stmt))
10427 return simplify_internal_call_using_ranges (gsi, stmt);
10429 return false;
10432 /* If the statement pointed by SI has a predicate whose value can be
10433 computed using the value range information computed by VRP, compute
10434 its value and return true. Otherwise, return false. */
10436 static bool
10437 fold_predicate_in (gimple_stmt_iterator *si)
10439 bool assignment_p = false;
10440 tree val;
10441 gimple *stmt = gsi_stmt (*si);
10443 if (is_gimple_assign (stmt)
10444 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10446 assignment_p = true;
10447 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10448 gimple_assign_rhs1 (stmt),
10449 gimple_assign_rhs2 (stmt),
10450 stmt);
10452 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10453 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10454 gimple_cond_lhs (cond_stmt),
10455 gimple_cond_rhs (cond_stmt),
10456 stmt);
10457 else
10458 return false;
10460 if (val)
10462 if (assignment_p)
10463 val = fold_convert (gimple_expr_type (stmt), val);
10465 if (dump_file)
10467 fprintf (dump_file, "Folding predicate ");
10468 print_gimple_expr (dump_file, stmt, 0);
10469 fprintf (dump_file, " to ");
10470 print_generic_expr (dump_file, val);
10471 fprintf (dump_file, "\n");
10474 if (is_gimple_assign (stmt))
10475 gimple_assign_set_rhs_from_tree (si, val);
10476 else
10478 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10479 gcond *cond_stmt = as_a <gcond *> (stmt);
10480 if (integer_zerop (val))
10481 gimple_cond_make_false (cond_stmt);
10482 else if (integer_onep (val))
10483 gimple_cond_make_true (cond_stmt);
10484 else
10485 gcc_unreachable ();
10488 return true;
10491 return false;
10494 /* Callback for substitute_and_fold folding the stmt at *SI. */
10496 static bool
10497 vrp_fold_stmt (gimple_stmt_iterator *si)
10499 if (fold_predicate_in (si))
10500 return true;
10502 return simplify_stmt_using_ranges (si);
10505 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
10506 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
10507 BB. If no such ASSERT_EXPR is found, return OP. */
10509 static tree
10510 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
10512 imm_use_iterator imm_iter;
10513 gimple *use_stmt;
10514 use_operand_p use_p;
10516 if (TREE_CODE (op) == SSA_NAME)
10518 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
10520 use_stmt = USE_STMT (use_p);
10521 if (use_stmt != stmt
10522 && gimple_assign_single_p (use_stmt)
10523 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
10524 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
10525 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
10526 return gimple_assign_lhs (use_stmt);
10529 return op;
10532 /* A trivial wrapper so that we can present the generic jump threading
10533 code with a simple API for simplifying statements. STMT is the
10534 statement we want to simplify, WITHIN_STMT provides the location
10535 for any overflow warnings. */
10537 static tree
10538 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10539 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
10540 basic_block bb)
10542 /* First see if the conditional is in the hash table. */
10543 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
10544 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
10545 return cached_lhs;
10547 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10549 tree op0 = gimple_cond_lhs (cond_stmt);
10550 op0 = lhs_of_dominating_assert (op0, bb, stmt);
10552 tree op1 = gimple_cond_rhs (cond_stmt);
10553 op1 = lhs_of_dominating_assert (op1, bb, stmt);
10555 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10556 op0, op1, within_stmt);
10559 /* We simplify a switch statement by trying to determine which case label
10560 will be taken. If we are successful then we return the corresponding
10561 CASE_LABEL_EXPR. */
10562 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
10564 tree op = gimple_switch_index (switch_stmt);
10565 if (TREE_CODE (op) != SSA_NAME)
10566 return NULL_TREE;
10568 op = lhs_of_dominating_assert (op, bb, stmt);
10570 value_range *vr = get_value_range (op);
10571 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
10572 || symbolic_range_p (vr))
10573 return NULL_TREE;
10575 if (vr->type == VR_RANGE)
10577 size_t i, j;
10578 /* Get the range of labels that contain a part of the operand's
10579 value range. */
10580 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
10582 /* Is there only one such label? */
10583 if (i == j)
10585 tree label = gimple_switch_label (switch_stmt, i);
10587 /* The i'th label will be taken only if the value range of the
10588 operand is entirely within the bounds of this label. */
10589 if (CASE_HIGH (label) != NULL_TREE
10590 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
10591 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
10592 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
10593 && tree_int_cst_equal (vr->min, vr->max)))
10594 return label;
10597 /* If there are no such labels then the default label will be
10598 taken. */
10599 if (i > j)
10600 return gimple_switch_label (switch_stmt, 0);
10603 if (vr->type == VR_ANTI_RANGE)
10605 unsigned n = gimple_switch_num_labels (switch_stmt);
10606 tree min_label = gimple_switch_label (switch_stmt, 1);
10607 tree max_label = gimple_switch_label (switch_stmt, n - 1);
10609 /* The default label will be taken only if the anti-range of the
10610 operand is entirely outside the bounds of all the (non-default)
10611 case labels. */
10612 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
10613 && (CASE_HIGH (max_label) != NULL_TREE
10614 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
10615 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
10616 return gimple_switch_label (switch_stmt, 0);
10619 return NULL_TREE;
10622 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10624 value_range new_vr = VR_INITIALIZER;
10625 tree lhs = gimple_assign_lhs (assign_stmt);
10627 if (TREE_CODE (lhs) == SSA_NAME
10628 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10629 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10631 extract_range_from_assignment (&new_vr, assign_stmt);
10632 if (range_int_cst_singleton_p (&new_vr))
10633 return new_vr.min;
10637 return NULL_TREE;
10640 class vrp_dom_walker : public dom_walker
10642 public:
10643 vrp_dom_walker (cdi_direction direction,
10644 class const_and_copies *const_and_copies,
10645 class avail_exprs_stack *avail_exprs_stack)
10646 : dom_walker (direction, true),
10647 m_const_and_copies (const_and_copies),
10648 m_avail_exprs_stack (avail_exprs_stack),
10649 m_dummy_cond (NULL) {}
10651 virtual edge before_dom_children (basic_block);
10652 virtual void after_dom_children (basic_block);
10654 private:
10655 class const_and_copies *m_const_and_copies;
10656 class avail_exprs_stack *m_avail_exprs_stack;
10658 gcond *m_dummy_cond;
10661 /* Called before processing dominator children of BB. We want to look
10662 at ASSERT_EXPRs and record information from them in the appropriate
10663 tables.
10665 We could look at other statements here. It's not seen as likely
10666 to significantly increase the jump threads we discover. */
10668 edge
10669 vrp_dom_walker::before_dom_children (basic_block bb)
10671 gimple_stmt_iterator gsi;
10673 m_avail_exprs_stack->push_marker ();
10674 m_const_and_copies->push_marker ();
10675 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
10677 gimple *stmt = gsi_stmt (gsi);
10678 if (gimple_assign_single_p (stmt)
10679 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
10681 tree rhs1 = gimple_assign_rhs1 (stmt);
10682 tree cond = TREE_OPERAND (rhs1, 1);
10683 tree inverted = invert_truthvalue (cond);
10684 vec<cond_equivalence> p;
10685 p.create (3);
10686 record_conditions (&p, cond, inverted);
10687 for (unsigned int i = 0; i < p.length (); i++)
10688 m_avail_exprs_stack->record_cond (&p[i]);
10690 tree lhs = gimple_assign_lhs (stmt);
10691 m_const_and_copies->record_const_or_copy (lhs,
10692 TREE_OPERAND (rhs1, 0));
10693 p.release ();
10694 continue;
10696 break;
10698 return NULL;
10701 /* Called after processing dominator children of BB. This is where we
10702 actually call into the threader. */
10703 void
10704 vrp_dom_walker::after_dom_children (basic_block bb)
10706 if (!m_dummy_cond)
10707 m_dummy_cond = gimple_build_cond (NE_EXPR,
10708 integer_zero_node, integer_zero_node,
10709 NULL, NULL);
10711 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
10712 m_avail_exprs_stack,
10713 simplify_stmt_for_jump_threading);
10715 m_avail_exprs_stack->pop_to_marker ();
10716 m_const_and_copies->pop_to_marker ();
10719 /* Blocks which have more than one predecessor and more than
10720 one successor present jump threading opportunities, i.e.,
10721 when the block is reached from a specific predecessor, we
10722 may be able to determine which of the outgoing edges will
10723 be traversed. When this optimization applies, we are able
10724 to avoid conditionals at runtime and we may expose secondary
10725 optimization opportunities.
10727 This routine is effectively a driver for the generic jump
10728 threading code. It basically just presents the generic code
10729 with edges that may be suitable for jump threading.
10731 Unlike DOM, we do not iterate VRP if jump threading was successful.
10732 While iterating may expose new opportunities for VRP, it is expected
10733 those opportunities would be very limited and the compile time cost
10734 to expose those opportunities would be significant.
10736 As jump threading opportunities are discovered, they are registered
10737 for later realization. */
10739 static void
10740 identify_jump_threads (void)
10742 int i;
10743 edge e;
10745 /* Ugh. When substituting values earlier in this pass we can
10746 wipe the dominance information. So rebuild the dominator
10747 information as we need it within the jump threading code. */
10748 calculate_dominance_info (CDI_DOMINATORS);
10750 /* We do not allow VRP information to be used for jump threading
10751 across a back edge in the CFG. Otherwise it becomes too
10752 difficult to avoid eliminating loop exit tests. Of course
10753 EDGE_DFS_BACK is not accurate at this time so we have to
10754 recompute it. */
10755 mark_dfs_back_edges ();
10757 /* Do not thread across edges we are about to remove. Just marking
10758 them as EDGE_IGNORE will do. */
10759 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10760 e->flags |= EDGE_IGNORE;
10762 /* Allocate our unwinder stack to unwind any temporary equivalences
10763 that might be recorded. */
10764 const_and_copies *equiv_stack = new const_and_copies ();
10766 hash_table<expr_elt_hasher> *avail_exprs
10767 = new hash_table<expr_elt_hasher> (1024);
10768 avail_exprs_stack *avail_exprs_stack
10769 = new class avail_exprs_stack (avail_exprs);
10771 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
10772 walker.walk (cfun->cfg->x_entry_block_ptr);
10774 /* Clear EDGE_IGNORE. */
10775 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10776 e->flags &= ~EDGE_IGNORE;
10778 /* We do not actually update the CFG or SSA graphs at this point as
10779 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10780 handle ASSERT_EXPRs gracefully. */
10781 delete equiv_stack;
10782 delete avail_exprs;
10783 delete avail_exprs_stack;
10786 /* Free VRP lattice. */
10788 static void
10789 vrp_free_lattice ()
10791 /* Free allocated memory. */
10792 free (vr_value);
10793 free (vr_phi_edge_counts);
10794 bitmap_obstack_release (&vrp_equiv_obstack);
10795 vrp_value_range_pool.release ();
10797 /* So that we can distinguish between VRP data being available
10798 and not available. */
10799 vr_value = NULL;
10800 vr_phi_edge_counts = NULL;
10803 /* Traverse all the blocks folding conditionals with known ranges. */
10805 static void
10806 vrp_finalize (bool warn_array_bounds_p)
10808 size_t i;
10810 values_propagated = true;
10812 if (dump_file)
10814 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10815 dump_all_value_ranges (dump_file);
10816 fprintf (dump_file, "\n");
10819 /* Set value range to non pointer SSA_NAMEs. */
10820 for (i = 0; i < num_vr_values; i++)
10821 if (vr_value[i])
10823 tree name = ssa_name (i);
10825 if (!name
10826 || (vr_value[i]->type == VR_VARYING)
10827 || (vr_value[i]->type == VR_UNDEFINED)
10828 || (TREE_CODE (vr_value[i]->min) != INTEGER_CST)
10829 || (TREE_CODE (vr_value[i]->max) != INTEGER_CST))
10830 continue;
10832 if (POINTER_TYPE_P (TREE_TYPE (name))
10833 && ((vr_value[i]->type == VR_RANGE
10834 && range_includes_zero_p (vr_value[i]->min,
10835 vr_value[i]->max) == 0)
10836 || (vr_value[i]->type == VR_ANTI_RANGE
10837 && range_includes_zero_p (vr_value[i]->min,
10838 vr_value[i]->max) == 1)))
10839 set_ptr_nonnull (name);
10840 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
10841 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10842 vr_value[i]->max);
10845 substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
10847 if (warn_array_bounds && warn_array_bounds_p)
10848 check_all_array_refs ();
10851 /* evrp_dom_walker visits the basic blocks in the dominance order and set
10852 the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
10853 discover more VRs. */
10855 class evrp_dom_walker : public dom_walker
10857 public:
10858 evrp_dom_walker ()
10859 : dom_walker (CDI_DOMINATORS), stack (10)
10861 need_eh_cleanup = BITMAP_ALLOC (NULL);
10863 ~evrp_dom_walker ()
10865 BITMAP_FREE (need_eh_cleanup);
10867 virtual edge before_dom_children (basic_block);
10868 virtual void after_dom_children (basic_block);
10869 void push_value_range (tree var, value_range *vr);
10870 value_range *pop_value_range (tree var);
10871 value_range *try_find_new_range (tree, tree op, tree_code code, tree limit);
10873 /* Cond_stack holds the old VR. */
10874 auto_vec<std::pair <tree, value_range*> > stack;
10875 bitmap need_eh_cleanup;
10876 auto_vec<gimple *> stmts_to_fixup;
10877 auto_vec<gimple *> stmts_to_remove;
10880 /* Find new range for NAME such that (OP CODE LIMIT) is true. */
10882 value_range *
10883 evrp_dom_walker::try_find_new_range (tree name,
10884 tree op, tree_code code, tree limit)
10886 value_range vr = VR_INITIALIZER;
10887 value_range *old_vr = get_value_range (name);
10889 /* Discover VR when condition is true. */
10890 extract_range_for_var_from_comparison_expr (name, code, op,
10891 limit, &vr);
10892 /* If we found any usable VR, set the VR to ssa_name and create a
10893 PUSH old value in the stack with the old VR. */
10894 if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
10896 if (old_vr->type == vr.type
10897 && vrp_operand_equal_p (old_vr->min, vr.min)
10898 && vrp_operand_equal_p (old_vr->max, vr.max))
10899 return NULL;
10900 value_range *new_vr = vrp_value_range_pool.allocate ();
10901 *new_vr = vr;
10902 return new_vr;
10904 return NULL;
10907 /* See if there is any new scope is entered with new VR and set that VR to
10908 ssa_name before visiting the statements in the scope. */
10910 edge
10911 evrp_dom_walker::before_dom_children (basic_block bb)
10913 tree op0 = NULL_TREE;
10914 edge_iterator ei;
10915 edge e;
10917 if (dump_file && (dump_flags & TDF_DETAILS))
10918 fprintf (dump_file, "Visiting BB%d\n", bb->index);
10920 stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
10922 edge pred_e = NULL;
10923 FOR_EACH_EDGE (e, ei, bb->preds)
10925 /* Ignore simple backedges from this to allow recording conditions
10926 in loop headers. */
10927 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
10928 continue;
10929 if (! pred_e)
10930 pred_e = e;
10931 else
10933 pred_e = NULL;
10934 break;
10937 if (pred_e)
10939 gimple *stmt = last_stmt (pred_e->src);
10940 if (stmt
10941 && gimple_code (stmt) == GIMPLE_COND
10942 && (op0 = gimple_cond_lhs (stmt))
10943 && TREE_CODE (op0) == SSA_NAME
10944 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
10945 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
10947 if (dump_file && (dump_flags & TDF_DETAILS))
10949 fprintf (dump_file, "Visiting controlling predicate ");
10950 print_gimple_stmt (dump_file, stmt, 0);
10952 /* Entering a new scope. Try to see if we can find a VR
10953 here. */
10954 tree op1 = gimple_cond_rhs (stmt);
10955 if (TREE_OVERFLOW_P (op1))
10956 op1 = drop_tree_overflow (op1);
10957 tree_code code = gimple_cond_code (stmt);
10959 auto_vec<assert_info, 8> asserts;
10960 register_edge_assert_for (op0, pred_e, code, op0, op1, asserts);
10961 if (TREE_CODE (op1) == SSA_NAME)
10962 register_edge_assert_for (op1, pred_e, code, op0, op1, asserts);
10964 auto_vec<std::pair<tree, value_range *>, 8> vrs;
10965 for (unsigned i = 0; i < asserts.length (); ++i)
10967 value_range *vr = try_find_new_range (asserts[i].name,
10968 asserts[i].expr,
10969 asserts[i].comp_code,
10970 asserts[i].val);
10971 if (vr)
10972 vrs.safe_push (std::make_pair (asserts[i].name, vr));
10974 /* Push updated ranges only after finding all of them to avoid
10975 ordering issues that can lead to worse ranges. */
10976 for (unsigned i = 0; i < vrs.length (); ++i)
10977 push_value_range (vrs[i].first, vrs[i].second);
10981 /* Visit PHI stmts and discover any new VRs possible. */
10982 bool has_unvisited_preds = false;
10983 FOR_EACH_EDGE (e, ei, bb->preds)
10984 if (e->flags & EDGE_EXECUTABLE
10985 && !(e->src->flags & BB_VISITED))
10987 has_unvisited_preds = true;
10988 break;
10991 for (gphi_iterator gpi = gsi_start_phis (bb);
10992 !gsi_end_p (gpi); gsi_next (&gpi))
10994 gphi *phi = gpi.phi ();
10995 tree lhs = PHI_RESULT (phi);
10996 if (virtual_operand_p (lhs))
10997 continue;
10998 value_range vr_result = VR_INITIALIZER;
10999 bool interesting = stmt_interesting_for_vrp (phi);
11000 if (interesting && dump_file && (dump_flags & TDF_DETAILS))
11002 fprintf (dump_file, "Visiting PHI node ");
11003 print_gimple_stmt (dump_file, phi, 0);
11005 if (!has_unvisited_preds
11006 && interesting)
11007 extract_range_from_phi_node (phi, &vr_result);
11008 else
11010 set_value_range_to_varying (&vr_result);
11011 /* When we have an unvisited executable predecessor we can't
11012 use PHI arg ranges which may be still UNDEFINED but have
11013 to use VARYING for them. But we can still resort to
11014 SCEV for loop header PHIs. */
11015 struct loop *l;
11016 if (interesting
11017 && (l = loop_containing_stmt (phi))
11018 && l->header == gimple_bb (phi))
11019 adjust_range_with_scev (&vr_result, l, phi, lhs);
11021 update_value_range (lhs, &vr_result);
11023 /* Mark PHIs whose lhs we fully propagate for removal. */
11024 tree val = op_with_constant_singleton_value_range (lhs);
11025 if (val && may_propagate_copy (lhs, val))
11027 stmts_to_remove.safe_push (phi);
11028 continue;
11031 /* Set the SSA with the value range. */
11032 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
11034 if ((vr_result.type == VR_RANGE
11035 || vr_result.type == VR_ANTI_RANGE)
11036 && (TREE_CODE (vr_result.min) == INTEGER_CST)
11037 && (TREE_CODE (vr_result.max) == INTEGER_CST))
11038 set_range_info (lhs,
11039 vr_result.type, vr_result.min, vr_result.max);
11041 else if (POINTER_TYPE_P (TREE_TYPE (lhs))
11042 && ((vr_result.type == VR_RANGE
11043 && range_includes_zero_p (vr_result.min,
11044 vr_result.max) == 0)
11045 || (vr_result.type == VR_ANTI_RANGE
11046 && range_includes_zero_p (vr_result.min,
11047 vr_result.max) == 1)))
11048 set_ptr_nonnull (lhs);
11051 edge taken_edge = NULL;
11053 /* Visit all other stmts and discover any new VRs possible. */
11054 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
11055 !gsi_end_p (gsi); gsi_next (&gsi))
11057 gimple *stmt = gsi_stmt (gsi);
11058 tree output = NULL_TREE;
11059 gimple *old_stmt = stmt;
11060 bool was_noreturn = (is_gimple_call (stmt)
11061 && gimple_call_noreturn_p (stmt));
11063 if (dump_file && (dump_flags & TDF_DETAILS))
11065 fprintf (dump_file, "Visiting stmt ");
11066 print_gimple_stmt (dump_file, stmt, 0);
11069 if (gcond *cond = dyn_cast <gcond *> (stmt))
11071 vrp_visit_cond_stmt (cond, &taken_edge);
11072 if (taken_edge)
11074 if (taken_edge->flags & EDGE_TRUE_VALUE)
11075 gimple_cond_make_true (cond);
11076 else if (taken_edge->flags & EDGE_FALSE_VALUE)
11077 gimple_cond_make_false (cond);
11078 else
11079 gcc_unreachable ();
11080 update_stmt (stmt);
11083 else if (stmt_interesting_for_vrp (stmt))
11085 edge taken_edge;
11086 value_range vr = VR_INITIALIZER;
11087 extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
11088 if (output
11089 && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
11091 update_value_range (output, &vr);
11092 vr = *get_value_range (output);
11094 /* Mark stmts whose output we fully propagate for removal. */
11095 tree val;
11096 if ((val = op_with_constant_singleton_value_range (output))
11097 && may_propagate_copy (output, val)
11098 && !stmt_could_throw_p (stmt)
11099 && !gimple_has_side_effects (stmt))
11101 stmts_to_remove.safe_push (stmt);
11102 continue;
11105 /* Set the SSA with the value range. */
11106 if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
11108 if ((vr.type == VR_RANGE
11109 || vr.type == VR_ANTI_RANGE)
11110 && (TREE_CODE (vr.min) == INTEGER_CST)
11111 && (TREE_CODE (vr.max) == INTEGER_CST))
11112 set_range_info (output, vr.type, vr.min, vr.max);
11114 else if (POINTER_TYPE_P (TREE_TYPE (output))
11115 && ((vr.type == VR_RANGE
11116 && range_includes_zero_p (vr.min,
11117 vr.max) == 0)
11118 || (vr.type == VR_ANTI_RANGE
11119 && range_includes_zero_p (vr.min,
11120 vr.max) == 1)))
11121 set_ptr_nonnull (output);
11123 else
11124 set_defs_to_varying (stmt);
11126 else
11127 set_defs_to_varying (stmt);
11129 /* See if we can derive a range for any of STMT's operands. */
11130 tree op;
11131 ssa_op_iter i;
11132 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
11134 tree value;
11135 enum tree_code comp_code;
11137 /* If OP is used in such a way that we can infer a value
11138 range for it, and we don't find a previous assertion for
11139 it, create a new assertion location node for OP. */
11140 if (infer_value_range (stmt, op, &comp_code, &value))
11142 /* If we are able to infer a nonzero value range for OP,
11143 then walk backwards through the use-def chain to see if OP
11144 was set via a typecast.
11145 If so, then we can also infer a nonzero value range
11146 for the operand of the NOP_EXPR. */
11147 if (comp_code == NE_EXPR && integer_zerop (value))
11149 tree t = op;
11150 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
11151 while (is_gimple_assign (def_stmt)
11152 && CONVERT_EXPR_CODE_P
11153 (gimple_assign_rhs_code (def_stmt))
11154 && TREE_CODE
11155 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
11156 && POINTER_TYPE_P
11157 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
11159 t = gimple_assign_rhs1 (def_stmt);
11160 def_stmt = SSA_NAME_DEF_STMT (t);
11162 /* Add VR when (T COMP_CODE value) condition is
11163 true. */
11164 value_range *op_range
11165 = try_find_new_range (t, t, comp_code, value);
11166 if (op_range)
11167 push_value_range (t, op_range);
11170 /* Add VR when (OP COMP_CODE value) condition is true. */
11171 value_range *op_range = try_find_new_range (op, op,
11172 comp_code, value);
11173 if (op_range)
11174 push_value_range (op, op_range);
11178 /* Try folding stmts with the VR discovered. */
11179 bool did_replace
11180 = replace_uses_in (stmt, op_with_constant_singleton_value_range);
11181 if (fold_stmt (&gsi, follow_single_use_edges)
11182 || did_replace)
11184 stmt = gsi_stmt (gsi);
11185 update_stmt (stmt);
11186 did_replace = true;
11189 if (did_replace)
11191 /* If we cleaned up EH information from the statement,
11192 remove EH edges. */
11193 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
11194 bitmap_set_bit (need_eh_cleanup, bb->index);
11196 /* If we turned a not noreturn call into a noreturn one
11197 schedule it for fixup. */
11198 if (!was_noreturn
11199 && is_gimple_call (stmt)
11200 && gimple_call_noreturn_p (stmt))
11201 stmts_to_fixup.safe_push (stmt);
11203 if (gimple_assign_single_p (stmt))
11205 tree rhs = gimple_assign_rhs1 (stmt);
11206 if (TREE_CODE (rhs) == ADDR_EXPR)
11207 recompute_tree_invariant_for_addr_expr (rhs);
11212 /* Visit BB successor PHI nodes and replace PHI args. */
11213 FOR_EACH_EDGE (e, ei, bb->succs)
11215 for (gphi_iterator gpi = gsi_start_phis (e->dest);
11216 !gsi_end_p (gpi); gsi_next (&gpi))
11218 gphi *phi = gpi.phi ();
11219 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
11220 tree arg = USE_FROM_PTR (use_p);
11221 if (TREE_CODE (arg) != SSA_NAME
11222 || virtual_operand_p (arg))
11223 continue;
11224 tree val = op_with_constant_singleton_value_range (arg);
11225 if (val && may_propagate_copy (arg, val))
11226 propagate_value (use_p, val);
11230 bb->flags |= BB_VISITED;
11232 return taken_edge;
11235 /* Restore/pop VRs valid only for BB when we leave BB. */
11237 void
11238 evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
11240 gcc_checking_assert (!stack.is_empty ());
11241 while (stack.last ().first != NULL_TREE)
11242 pop_value_range (stack.last ().first);
11243 stack.pop ();
11246 /* Push the Value Range of VAR to the stack and update it with new VR. */
11248 void
11249 evrp_dom_walker::push_value_range (tree var, value_range *vr)
11251 if (SSA_NAME_VERSION (var) >= num_vr_values)
11252 return;
11253 if (dump_file && (dump_flags & TDF_DETAILS))
11255 fprintf (dump_file, "pushing new range for ");
11256 print_generic_expr (dump_file, var);
11257 fprintf (dump_file, ": ");
11258 dump_value_range (dump_file, vr);
11259 fprintf (dump_file, "\n");
11261 stack.safe_push (std::make_pair (var, get_value_range (var)));
11262 vr_value[SSA_NAME_VERSION (var)] = vr;
11265 /* Pop the Value Range from the vrp_stack and update VAR with it. */
11267 value_range *
11268 evrp_dom_walker::pop_value_range (tree var)
11270 value_range *vr = stack.last ().second;
11271 gcc_checking_assert (var == stack.last ().first);
11272 if (dump_file && (dump_flags & TDF_DETAILS))
11274 fprintf (dump_file, "popping range for ");
11275 print_generic_expr (dump_file, var);
11276 fprintf (dump_file, ", restoring ");
11277 dump_value_range (dump_file, vr);
11278 fprintf (dump_file, "\n");
11280 vr_value[SSA_NAME_VERSION (var)] = vr;
11281 stack.pop ();
11282 return vr;
11286 /* Main entry point for the early vrp pass which is a simplified non-iterative
11287 version of vrp where basic blocks are visited in dominance order. Value
11288 ranges discovered in early vrp will also be used by ipa-vrp. */
11290 static unsigned int
11291 execute_early_vrp ()
11293 edge e;
11294 edge_iterator ei;
11295 basic_block bb;
11297 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11298 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11299 scev_initialize ();
11300 calculate_dominance_info (CDI_DOMINATORS);
11301 FOR_EACH_BB_FN (bb, cfun)
11303 bb->flags &= ~BB_VISITED;
11304 FOR_EACH_EDGE (e, ei, bb->preds)
11305 e->flags |= EDGE_EXECUTABLE;
11307 vrp_initialize_lattice ();
11309 /* Walk stmts in dominance order and propagate VRP. */
11310 evrp_dom_walker walker;
11311 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11313 if (dump_file)
11315 fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
11316 dump_all_value_ranges (dump_file);
11317 fprintf (dump_file, "\n");
11320 /* Remove stmts in reverse order to make debug stmt creation possible. */
11321 while (! walker.stmts_to_remove.is_empty ())
11323 gimple *stmt = walker.stmts_to_remove.pop ();
11324 if (dump_file && dump_flags & TDF_DETAILS)
11326 fprintf (dump_file, "Removing dead stmt ");
11327 print_gimple_stmt (dump_file, stmt, 0);
11328 fprintf (dump_file, "\n");
11330 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
11331 if (gimple_code (stmt) == GIMPLE_PHI)
11332 remove_phi_node (&gsi, true);
11333 else
11335 unlink_stmt_vdef (stmt);
11336 gsi_remove (&gsi, true);
11337 release_defs (stmt);
11341 if (!bitmap_empty_p (walker.need_eh_cleanup))
11342 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
11344 /* Fixup stmts that became noreturn calls. This may require splitting
11345 blocks and thus isn't possible during the dominator walk. Do this
11346 in reverse order so we don't inadvertedly remove a stmt we want to
11347 fixup by visiting a dominating now noreturn call first. */
11348 while (!walker.stmts_to_fixup.is_empty ())
11350 gimple *stmt = walker.stmts_to_fixup.pop ();
11351 fixup_noreturn_call (stmt);
11354 vrp_free_lattice ();
11355 scev_finalize ();
11356 loop_optimizer_finalize ();
11357 return 0;
11361 /* Main entry point to VRP (Value Range Propagation). This pass is
11362 loosely based on J. R. C. Patterson, ``Accurate Static Branch
11363 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
11364 Programming Language Design and Implementation, pp. 67-78, 1995.
11365 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
11367 This is essentially an SSA-CCP pass modified to deal with ranges
11368 instead of constants.
11370 While propagating ranges, we may find that two or more SSA name
11371 have equivalent, though distinct ranges. For instance,
11373 1 x_9 = p_3->a;
11374 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
11375 3 if (p_4 == q_2)
11376 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
11377 5 endif
11378 6 if (q_2)
11380 In the code above, pointer p_5 has range [q_2, q_2], but from the
11381 code we can also determine that p_5 cannot be NULL and, if q_2 had
11382 a non-varying range, p_5's range should also be compatible with it.
11384 These equivalences are created by two expressions: ASSERT_EXPR and
11385 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
11386 result of another assertion, then we can use the fact that p_5 and
11387 p_4 are equivalent when evaluating p_5's range.
11389 Together with value ranges, we also propagate these equivalences
11390 between names so that we can take advantage of information from
11391 multiple ranges when doing final replacement. Note that this
11392 equivalency relation is transitive but not symmetric.
11394 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
11395 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
11396 in contexts where that assertion does not hold (e.g., in line 6).
11398 TODO, the main difference between this pass and Patterson's is that
11399 we do not propagate edge probabilities. We only compute whether
11400 edges can be taken or not. That is, instead of having a spectrum
11401 of jump probabilities between 0 and 1, we only deal with 0, 1 and
11402 DON'T KNOW. In the future, it may be worthwhile to propagate
11403 probabilities to aid branch prediction. */
11405 static unsigned int
11406 execute_vrp (bool warn_array_bounds_p)
11408 int i;
11409 edge e;
11410 switch_update *su;
11412 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11413 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11414 scev_initialize ();
11416 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
11417 Inserting assertions may split edges which will invalidate
11418 EDGE_DFS_BACK. */
11419 insert_range_assertions ();
11421 to_remove_edges.create (10);
11422 to_update_switch_stmts.create (5);
11423 threadedge_initialize_values ();
11425 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
11426 mark_dfs_back_edges ();
11428 vrp_initialize_lattice ();
11429 vrp_initialize ();
11430 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
11431 vrp_finalize (warn_array_bounds_p);
11433 /* We must identify jump threading opportunities before we release
11434 the datastructures built by VRP. */
11435 identify_jump_threads ();
11437 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
11438 was set by a type conversion can often be rewritten to use the
11439 RHS of the type conversion.
11441 However, doing so inhibits jump threading through the comparison.
11442 So that transformation is not performed until after jump threading
11443 is complete. */
11444 basic_block bb;
11445 FOR_EACH_BB_FN (bb, cfun)
11447 gimple *last = last_stmt (bb);
11448 if (last && gimple_code (last) == GIMPLE_COND)
11449 simplify_cond_using_ranges_2 (as_a <gcond *> (last));
11452 vrp_free_lattice ();
11454 free_numbers_of_iterations_estimates (cfun);
11456 /* ASSERT_EXPRs must be removed before finalizing jump threads
11457 as finalizing jump threads calls the CFG cleanup code which
11458 does not properly handle ASSERT_EXPRs. */
11459 remove_range_assertions ();
11461 /* If we exposed any new variables, go ahead and put them into
11462 SSA form now, before we handle jump threading. This simplifies
11463 interactions between rewriting of _DECL nodes into SSA form
11464 and rewriting SSA_NAME nodes into SSA form after block
11465 duplication and CFG manipulation. */
11466 update_ssa (TODO_update_ssa);
11468 /* We identified all the jump threading opportunities earlier, but could
11469 not transform the CFG at that time. This routine transforms the
11470 CFG and arranges for the dominator tree to be rebuilt if necessary.
11472 Note the SSA graph update will occur during the normal TODO
11473 processing by the pass manager. */
11474 thread_through_all_blocks (false);
11476 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
11477 CFG in a broken state and requires a cfg_cleanup run. */
11478 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
11479 remove_edge (e);
11480 /* Update SWITCH_EXPR case label vector. */
11481 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
11483 size_t j;
11484 size_t n = TREE_VEC_LENGTH (su->vec);
11485 tree label;
11486 gimple_switch_set_num_labels (su->stmt, n);
11487 for (j = 0; j < n; j++)
11488 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
11489 /* As we may have replaced the default label with a regular one
11490 make sure to make it a real default label again. This ensures
11491 optimal expansion. */
11492 label = gimple_switch_label (su->stmt, 0);
11493 CASE_LOW (label) = NULL_TREE;
11494 CASE_HIGH (label) = NULL_TREE;
11497 if (to_remove_edges.length () > 0)
11499 free_dominance_info (CDI_DOMINATORS);
11500 loops_state_set (LOOPS_NEED_FIXUP);
11503 to_remove_edges.release ();
11504 to_update_switch_stmts.release ();
11505 threadedge_finalize_values ();
11507 scev_finalize ();
11508 loop_optimizer_finalize ();
11509 return 0;
11512 namespace {
11514 const pass_data pass_data_vrp =
11516 GIMPLE_PASS, /* type */
11517 "vrp", /* name */
11518 OPTGROUP_NONE, /* optinfo_flags */
11519 TV_TREE_VRP, /* tv_id */
11520 PROP_ssa, /* properties_required */
11521 0, /* properties_provided */
11522 0, /* properties_destroyed */
11523 0, /* todo_flags_start */
11524 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
11527 class pass_vrp : public gimple_opt_pass
11529 public:
11530 pass_vrp (gcc::context *ctxt)
11531 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
11534 /* opt_pass methods: */
11535 opt_pass * clone () { return new pass_vrp (m_ctxt); }
11536 void set_pass_param (unsigned int n, bool param)
11538 gcc_assert (n == 0);
11539 warn_array_bounds_p = param;
11541 virtual bool gate (function *) { return flag_tree_vrp != 0; }
11542 virtual unsigned int execute (function *)
11543 { return execute_vrp (warn_array_bounds_p); }
11545 private:
11546 bool warn_array_bounds_p;
11547 }; // class pass_vrp
11549 } // anon namespace
11551 gimple_opt_pass *
11552 make_pass_vrp (gcc::context *ctxt)
11554 return new pass_vrp (ctxt);
11557 namespace {
11559 const pass_data pass_data_early_vrp =
11561 GIMPLE_PASS, /* type */
11562 "evrp", /* name */
11563 OPTGROUP_NONE, /* optinfo_flags */
11564 TV_TREE_EARLY_VRP, /* tv_id */
11565 PROP_ssa, /* properties_required */
11566 0, /* properties_provided */
11567 0, /* properties_destroyed */
11568 0, /* todo_flags_start */
11569 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
11572 class pass_early_vrp : public gimple_opt_pass
11574 public:
11575 pass_early_vrp (gcc::context *ctxt)
11576 : gimple_opt_pass (pass_data_early_vrp, ctxt)
11579 /* opt_pass methods: */
11580 opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
11581 virtual bool gate (function *)
11583 return flag_tree_vrp != 0;
11585 virtual unsigned int execute (function *)
11586 { return execute_early_vrp (); }
11588 }; // class pass_vrp
11589 } // anon namespace
11591 gimple_opt_pass *
11592 make_pass_early_vrp (gcc::context *ctxt)
11594 return new pass_early_vrp (ctxt);