1 /* Code for range operators.
2 Copyright (C) 2017-2021 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-fold.h"
43 #include "gimple-iterator.h"
44 #include "gimple-walk.h"
49 // Return the upper limit for a type.
51 static inline wide_int
52 max_limit (const_tree type
)
54 return wi::max_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
57 // Return the lower limit for a type.
59 static inline wide_int
60 min_limit (const_tree type
)
62 return wi::min_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
65 // If the range of either op1 or op2 is undefined, set the result to
66 // varying and return TRUE. If the caller truely cares about a result,
67 // they should pass in a varying if it has an undefined that it wants
68 // treated as a varying.
71 empty_range_varying (irange
&r
, tree type
,
72 const irange
&op1
, const irange
& op2
)
74 if (op1
.undefined_p () || op2
.undefined_p ())
83 // Return false if shifting by OP is undefined behavior. Otherwise, return
84 // true and the range it is to be shifted by. This allows trimming out of
85 // undefined ranges, leaving only valid ranges if there are any.
88 get_shift_range (irange
&r
, tree type
, const irange
&op
)
90 if (op
.undefined_p ())
93 // Build valid range and intersect it with the shift range.
94 r
= value_range (build_int_cst_type (op
.type (), 0),
95 build_int_cst_type (op
.type (), TYPE_PRECISION (type
) - 1));
98 // If there are no valid ranges in the shift range, returned false.
104 // Return TRUE if 0 is within [WMIN, WMAX].
107 wi_includes_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
109 signop sign
= TYPE_SIGN (type
);
110 return wi::le_p (wmin
, 0, sign
) && wi::ge_p (wmax
, 0, sign
);
113 // Return TRUE if [WMIN, WMAX] is the singleton 0.
116 wi_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
118 unsigned prec
= TYPE_PRECISION (type
);
119 return wmin
== wmax
&& wi::eq_p (wmin
, wi::zero (prec
));
122 // Default wide_int fold operation returns [MIN, MAX].
125 range_operator::wi_fold (irange
&r
, tree type
,
126 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
127 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
128 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
129 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
131 gcc_checking_assert (irange::supports_type_p (type
));
132 r
.set_varying (type
);
135 // The default for fold is to break all ranges into sub-ranges and
136 // invoke the wi_fold method on each sub-range pair.
139 range_operator::fold_range (irange
&r
, tree type
,
141 const irange
&rh
) const
143 gcc_checking_assert (irange::supports_type_p (type
));
144 if (empty_range_varying (r
, type
, lh
, rh
))
147 unsigned num_lh
= lh
.num_pairs ();
148 unsigned num_rh
= rh
.num_pairs ();
150 // If both ranges are single pairs, fold directly into the result range.
151 if (num_lh
== 1 && num_rh
== 1)
153 wi_fold (r
, type
, lh
.lower_bound (0), lh
.upper_bound (0),
154 rh
.lower_bound (0), rh
.upper_bound (0));
160 for (unsigned x
= 0; x
< num_lh
; ++x
)
161 for (unsigned y
= 0; y
< num_rh
; ++y
)
163 wide_int lh_lb
= lh
.lower_bound (x
);
164 wide_int lh_ub
= lh
.upper_bound (x
);
165 wide_int rh_lb
= rh
.lower_bound (y
);
166 wide_int rh_ub
= rh
.upper_bound (y
);
167 wi_fold (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
175 // The default for op1_range is to return false.
178 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
179 tree type ATTRIBUTE_UNUSED
,
180 const irange
&lhs ATTRIBUTE_UNUSED
,
181 const irange
&op2 ATTRIBUTE_UNUSED
) const
186 // The default for op2_range is to return false.
189 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
190 tree type ATTRIBUTE_UNUSED
,
191 const irange
&lhs ATTRIBUTE_UNUSED
,
192 const irange
&op1 ATTRIBUTE_UNUSED
) const
198 // Create and return a range from a pair of wide-ints that are known
199 // to have overflowed (or underflowed).
202 value_range_from_overflowed_bounds (irange
&r
, tree type
,
203 const wide_int
&wmin
,
204 const wide_int
&wmax
)
206 const signop sgn
= TYPE_SIGN (type
);
207 const unsigned int prec
= TYPE_PRECISION (type
);
209 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
210 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
215 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
218 if (wi::cmp (tmax
, tem
, sgn
) > 0)
221 // If the anti-range would cover nothing, drop to varying.
222 // Likewise if the anti-range bounds are outside of the types
224 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
225 r
.set_varying (type
);
228 tree tree_min
= wide_int_to_tree (type
, tmin
);
229 tree tree_max
= wide_int_to_tree (type
, tmax
);
230 r
.set (tree_min
, tree_max
, VR_ANTI_RANGE
);
234 // Create and return a range from a pair of wide-ints. MIN_OVF and
235 // MAX_OVF describe any overflow that might have occurred while
236 // calculating WMIN and WMAX respectively.
239 value_range_with_overflow (irange
&r
, tree type
,
240 const wide_int
&wmin
, const wide_int
&wmax
,
241 wi::overflow_type min_ovf
= wi::OVF_NONE
,
242 wi::overflow_type max_ovf
= wi::OVF_NONE
)
244 const signop sgn
= TYPE_SIGN (type
);
245 const unsigned int prec
= TYPE_PRECISION (type
);
246 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
248 // For one bit precision if max != min, then the range covers all
250 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
252 r
.set_varying (type
);
258 // If overflow wraps, truncate the values and adjust the range,
259 // kind, and bounds appropriately.
260 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
262 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
263 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
264 // If the limits are swapped, we wrapped around and cover
266 if (wi::gt_p (tmin
, tmax
, sgn
))
267 r
.set_varying (type
);
269 // No overflow or both overflow or underflow. The range
270 // kind stays normal.
271 r
.set (wide_int_to_tree (type
, tmin
),
272 wide_int_to_tree (type
, tmax
));
276 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
277 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
278 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
280 // Other underflow and/or overflow, drop to VR_VARYING.
281 r
.set_varying (type
);
285 // If both bounds either underflowed or overflowed, then the result
287 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
288 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
294 // If overflow does not wrap, saturate to [MIN, MAX].
295 wide_int new_lb
, new_ub
;
296 if (min_ovf
== wi::OVF_UNDERFLOW
)
297 new_lb
= wi::min_value (prec
, sgn
);
298 else if (min_ovf
== wi::OVF_OVERFLOW
)
299 new_lb
= wi::max_value (prec
, sgn
);
303 if (max_ovf
== wi::OVF_UNDERFLOW
)
304 new_ub
= wi::min_value (prec
, sgn
);
305 else if (max_ovf
== wi::OVF_OVERFLOW
)
306 new_ub
= wi::max_value (prec
, sgn
);
310 r
.set (wide_int_to_tree (type
, new_lb
),
311 wide_int_to_tree (type
, new_ub
));
315 // Create and return a range from a pair of wide-ints. Canonicalize
316 // the case where the bounds are swapped. In which case, we transform
317 // [10,5] into [MIN,5][10,MAX].
320 create_possibly_reversed_range (irange
&r
, tree type
,
321 const wide_int
&new_lb
, const wide_int
&new_ub
)
323 signop s
= TYPE_SIGN (type
);
324 // If the bounds are swapped, treat the result as if an overflow occured.
325 if (wi::gt_p (new_lb
, new_ub
, s
))
326 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
328 // Otherwise it's just a normal range.
329 r
.set (wide_int_to_tree (type
, new_lb
), wide_int_to_tree (type
, new_ub
));
332 // Return an irange instance that is a boolean TRUE.
334 static inline int_range
<1>
335 range_true (tree type
)
337 unsigned prec
= TYPE_PRECISION (type
);
338 return int_range
<1> (type
, wi::one (prec
), wi::one (prec
));
341 // Return an irange instance that is a boolean FALSE.
343 static inline int_range
<1>
344 range_false (tree type
)
346 unsigned prec
= TYPE_PRECISION (type
);
347 return int_range
<1> (type
, wi::zero (prec
), wi::zero (prec
));
350 // Return an irange that covers both true and false.
352 static inline int_range
<1>
353 range_true_and_false (tree type
)
355 unsigned prec
= TYPE_PRECISION (type
);
356 return int_range
<1> (type
, wi::zero (prec
), wi::one (prec
));
359 enum bool_range_state
{ BRS_FALSE
, BRS_TRUE
, BRS_EMPTY
, BRS_FULL
};
361 // Return the summary information about boolean range LHS. Return an
362 // "interesting" range in R. For EMPTY or FULL, return the equivalent
363 // range for TYPE, for BRS_TRUE and BRS false, return the negation of
366 static bool_range_state
367 get_bool_state (irange
&r
, const irange
&lhs
, tree val_type
)
369 // If there is no result, then this is unexecutable.
370 if (lhs
.undefined_p ())
379 // For TRUE, we can't just test for [1,1] because Ada can have
380 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
381 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
383 r
.set_varying (val_type
);
390 class operator_equal
: public range_operator
393 virtual bool fold_range (irange
&r
, tree type
,
395 const irange
&op2
) const;
396 virtual bool op1_range (irange
&r
, tree type
,
398 const irange
&val
) const;
399 virtual bool op2_range (irange
&r
, tree type
,
401 const irange
&val
) const;
405 operator_equal::fold_range (irange
&r
, tree type
,
407 const irange
&op2
) const
409 if (empty_range_varying (r
, type
, op1
, op2
))
412 // We can be sure the values are always equal or not if both ranges
413 // consist of a single value, and then compare them.
414 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
415 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
417 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
418 r
= range_true (type
);
420 r
= range_false (type
);
424 // If ranges do not intersect, we know the range is not equal,
425 // otherwise we don't know anything for sure.
426 int_range_max tmp
= op1
;
428 if (tmp
.undefined_p ())
429 r
= range_false (type
);
431 r
= range_true_and_false (type
);
437 operator_equal::op1_range (irange
&r
, tree type
,
439 const irange
&op2
) const
441 switch (get_bool_state (r
, lhs
, type
))
444 // If the result is false, the only time we know anything is
445 // if OP2 is a constant.
446 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
452 r
.set_varying (type
);
456 // If it's true, the result is the same as OP2.
467 operator_equal::op2_range (irange
&r
, tree type
,
469 const irange
&op1
) const
471 return operator_equal::op1_range (r
, type
, lhs
, op1
);
475 class operator_not_equal
: public range_operator
478 virtual bool fold_range (irange
&r
, tree type
,
480 const irange
&op2
) const;
481 virtual bool op1_range (irange
&r
, tree type
,
483 const irange
&op2
) const;
484 virtual bool op2_range (irange
&r
, tree type
,
486 const irange
&op1
) const;
490 operator_not_equal::fold_range (irange
&r
, tree type
,
492 const irange
&op2
) const
494 if (empty_range_varying (r
, type
, op1
, op2
))
497 // We can be sure the values are always equal or not if both ranges
498 // consist of a single value, and then compare them.
499 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
500 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
502 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
503 r
= range_true (type
);
505 r
= range_false (type
);
509 // If ranges do not intersect, we know the range is not equal,
510 // otherwise we don't know anything for sure.
511 int_range_max tmp
= op1
;
513 if (tmp
.undefined_p ())
514 r
= range_true (type
);
516 r
= range_true_and_false (type
);
522 operator_not_equal::op1_range (irange
&r
, tree type
,
524 const irange
&op2
) const
526 switch (get_bool_state (r
, lhs
, type
))
529 // If the result is true, the only time we know anything is if
530 // OP2 is a constant.
531 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
537 r
.set_varying (type
);
541 // If its true, the result is the same as OP2.
553 operator_not_equal::op2_range (irange
&r
, tree type
,
555 const irange
&op1
) const
557 return operator_not_equal::op1_range (r
, type
, lhs
, op1
);
560 // (X < VAL) produces the range of [MIN, VAL - 1].
563 build_lt (irange
&r
, tree type
, const wide_int
&val
)
565 wi::overflow_type ov
;
566 wide_int lim
= wi::sub (val
, 1, TYPE_SIGN (type
), &ov
);
568 // If val - 1 underflows, check if X < MIN, which is an empty range.
572 r
= int_range
<1> (type
, min_limit (type
), lim
);
575 // (X <= VAL) produces the range of [MIN, VAL].
578 build_le (irange
&r
, tree type
, const wide_int
&val
)
580 r
= int_range
<1> (type
, min_limit (type
), val
);
583 // (X > VAL) produces the range of [VAL + 1, MAX].
586 build_gt (irange
&r
, tree type
, const wide_int
&val
)
588 wi::overflow_type ov
;
589 wide_int lim
= wi::add (val
, 1, TYPE_SIGN (type
), &ov
);
590 // If val + 1 overflows, check is for X > MAX, which is an empty range.
594 r
= int_range
<1> (type
, lim
, max_limit (type
));
597 // (X >= val) produces the range of [VAL, MAX].
600 build_ge (irange
&r
, tree type
, const wide_int
&val
)
602 r
= int_range
<1> (type
, val
, max_limit (type
));
606 class operator_lt
: public range_operator
609 virtual bool fold_range (irange
&r
, tree type
,
611 const irange
&op2
) const;
612 virtual bool op1_range (irange
&r
, tree type
,
614 const irange
&op2
) const;
615 virtual bool op2_range (irange
&r
, tree type
,
617 const irange
&op1
) const;
621 operator_lt::fold_range (irange
&r
, tree type
,
623 const irange
&op2
) const
625 if (empty_range_varying (r
, type
, op1
, op2
))
628 signop sign
= TYPE_SIGN (op1
.type ());
629 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
631 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
632 r
= range_true (type
);
633 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
634 r
= range_false (type
);
636 r
= range_true_and_false (type
);
641 operator_lt::op1_range (irange
&r
, tree type
,
643 const irange
&op2
) const
645 switch (get_bool_state (r
, lhs
, type
))
648 build_lt (r
, type
, op2
.upper_bound ());
652 build_ge (r
, type
, op2
.lower_bound ());
662 operator_lt::op2_range (irange
&r
, tree type
,
664 const irange
&op1
) const
666 switch (get_bool_state (r
, lhs
, type
))
669 build_le (r
, type
, op1
.upper_bound ());
673 build_gt (r
, type
, op1
.lower_bound ());
683 class operator_le
: public range_operator
686 virtual bool fold_range (irange
&r
, tree type
,
688 const irange
&op2
) const;
689 virtual bool op1_range (irange
&r
, tree type
,
691 const irange
&op2
) const;
692 virtual bool op2_range (irange
&r
, tree type
,
694 const irange
&op1
) const;
698 operator_le::fold_range (irange
&r
, tree type
,
700 const irange
&op2
) const
702 if (empty_range_varying (r
, type
, op1
, op2
))
705 signop sign
= TYPE_SIGN (op1
.type ());
706 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
708 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
709 r
= range_true (type
);
710 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
711 r
= range_false (type
);
713 r
= range_true_and_false (type
);
718 operator_le::op1_range (irange
&r
, tree type
,
720 const irange
&op2
) const
722 switch (get_bool_state (r
, lhs
, type
))
725 build_le (r
, type
, op2
.upper_bound ());
729 build_gt (r
, type
, op2
.lower_bound ());
739 operator_le::op2_range (irange
&r
, tree type
,
741 const irange
&op1
) const
743 switch (get_bool_state (r
, lhs
, type
))
746 build_lt (r
, type
, op1
.upper_bound ());
750 build_ge (r
, type
, op1
.lower_bound ());
760 class operator_gt
: public range_operator
763 virtual bool fold_range (irange
&r
, tree type
,
765 const irange
&op2
) const;
766 virtual bool op1_range (irange
&r
, tree type
,
768 const irange
&op2
) const;
769 virtual bool op2_range (irange
&r
, tree type
,
771 const irange
&op1
) const;
775 operator_gt::fold_range (irange
&r
, tree type
,
776 const irange
&op1
, const irange
&op2
) const
778 if (empty_range_varying (r
, type
, op1
, op2
))
781 signop sign
= TYPE_SIGN (op1
.type ());
782 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
784 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
785 r
= range_true (type
);
786 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
787 r
= range_false (type
);
789 r
= range_true_and_false (type
);
794 operator_gt::op1_range (irange
&r
, tree type
,
795 const irange
&lhs
, const irange
&op2
) const
797 switch (get_bool_state (r
, lhs
, type
))
800 build_gt (r
, type
, op2
.lower_bound ());
804 build_le (r
, type
, op2
.upper_bound ());
814 operator_gt::op2_range (irange
&r
, tree type
,
816 const irange
&op1
) const
818 switch (get_bool_state (r
, lhs
, type
))
821 build_ge (r
, type
, op1
.lower_bound ());
825 build_lt (r
, type
, op1
.upper_bound ());
835 class operator_ge
: public range_operator
838 virtual bool fold_range (irange
&r
, tree type
,
840 const irange
&op2
) const;
841 virtual bool op1_range (irange
&r
, tree type
,
843 const irange
&op2
) const;
844 virtual bool op2_range (irange
&r
, tree type
,
846 const irange
&op1
) const;
850 operator_ge::fold_range (irange
&r
, tree type
,
852 const irange
&op2
) const
854 if (empty_range_varying (r
, type
, op1
, op2
))
857 signop sign
= TYPE_SIGN (op1
.type ());
858 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
860 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
861 r
= range_true (type
);
862 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
863 r
= range_false (type
);
865 r
= range_true_and_false (type
);
870 operator_ge::op1_range (irange
&r
, tree type
,
872 const irange
&op2
) const
874 switch (get_bool_state (r
, lhs
, type
))
877 build_ge (r
, type
, op2
.lower_bound ());
881 build_lt (r
, type
, op2
.upper_bound ());
891 operator_ge::op2_range (irange
&r
, tree type
,
893 const irange
&op1
) const
895 switch (get_bool_state (r
, lhs
, type
))
898 build_gt (r
, type
, op1
.lower_bound ());
902 build_le (r
, type
, op1
.upper_bound ());
912 class operator_plus
: public range_operator
915 virtual bool op1_range (irange
&r
, tree type
,
917 const irange
&op2
) const;
918 virtual bool op2_range (irange
&r
, tree type
,
920 const irange
&op1
) const;
921 virtual void wi_fold (irange
&r
, tree type
,
922 const wide_int
&lh_lb
,
923 const wide_int
&lh_ub
,
924 const wide_int
&rh_lb
,
925 const wide_int
&rh_ub
) const;
929 operator_plus::wi_fold (irange
&r
, tree type
,
930 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
931 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
933 wi::overflow_type ov_lb
, ov_ub
;
934 signop s
= TYPE_SIGN (type
);
935 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
936 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
937 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
941 operator_plus::op1_range (irange
&r
, tree type
,
943 const irange
&op2
) const
945 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
949 operator_plus::op2_range (irange
&r
, tree type
,
951 const irange
&op1
) const
953 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op1
);
957 class operator_minus
: public range_operator
960 virtual bool op1_range (irange
&r
, tree type
,
962 const irange
&op2
) const;
963 virtual bool op2_range (irange
&r
, tree type
,
965 const irange
&op1
) const;
966 virtual void wi_fold (irange
&r
, tree type
,
967 const wide_int
&lh_lb
,
968 const wide_int
&lh_ub
,
969 const wide_int
&rh_lb
,
970 const wide_int
&rh_ub
) const;
974 operator_minus::wi_fold (irange
&r
, tree type
,
975 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
976 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
978 wi::overflow_type ov_lb
, ov_ub
;
979 signop s
= TYPE_SIGN (type
);
980 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
981 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
982 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
986 operator_minus::op1_range (irange
&r
, tree type
,
988 const irange
&op2
) const
990 return range_op_handler (PLUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
994 operator_minus::op2_range (irange
&r
, tree type
,
996 const irange
&op1
) const
998 return fold_range (r
, type
, op1
, lhs
);
1002 class operator_min
: public range_operator
1005 virtual void wi_fold (irange
&r
, tree type
,
1006 const wide_int
&lh_lb
,
1007 const wide_int
&lh_ub
,
1008 const wide_int
&rh_lb
,
1009 const wide_int
&rh_ub
) const;
1013 operator_min::wi_fold (irange
&r
, tree type
,
1014 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1015 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1017 signop s
= TYPE_SIGN (type
);
1018 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1019 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1020 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1024 class operator_max
: public range_operator
1027 virtual void wi_fold (irange
&r
, tree type
,
1028 const wide_int
&lh_lb
,
1029 const wide_int
&lh_ub
,
1030 const wide_int
&rh_lb
,
1031 const wide_int
&rh_ub
) const;
1035 operator_max::wi_fold (irange
&r
, tree type
,
1036 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1037 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1039 signop s
= TYPE_SIGN (type
);
1040 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1041 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1042 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1046 class cross_product_operator
: public range_operator
1049 // Perform an operation between two wide-ints and place the result
1050 // in R. Return true if the operation overflowed.
1051 virtual bool wi_op_overflows (wide_int
&r
,
1054 const wide_int
&) const = 0;
1056 // Calculate the cross product of two sets of sub-ranges and return it.
1057 void wi_cross_product (irange
&r
, tree type
,
1058 const wide_int
&lh_lb
,
1059 const wide_int
&lh_ub
,
1060 const wide_int
&rh_lb
,
1061 const wide_int
&rh_ub
) const;
1064 // Calculate the cross product of two sets of ranges and return it.
1066 // Multiplications, divisions and shifts are a bit tricky to handle,
1067 // depending on the mix of signs we have in the two ranges, we need to
1068 // operate on different values to get the minimum and maximum values
1069 // for the new range. One approach is to figure out all the
1070 // variations of range combinations and do the operations.
1072 // However, this involves several calls to compare_values and it is
1073 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1074 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1075 // figure the smallest and largest values to form the new range.
1078 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1079 const wide_int
&lh_lb
,
1080 const wide_int
&lh_ub
,
1081 const wide_int
&rh_lb
,
1082 const wide_int
&rh_ub
) const
1084 wide_int cp1
, cp2
, cp3
, cp4
;
1085 // Default to varying.
1086 r
.set_varying (type
);
1088 // Compute the 4 cross operations, bailing if we get an overflow we
1090 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1092 if (wi::eq_p (lh_lb
, lh_ub
))
1094 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
1096 if (wi::eq_p (rh_lb
, rh_ub
))
1098 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
1100 if (wi::eq_p (lh_lb
, lh_ub
))
1102 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
1106 signop sign
= TYPE_SIGN (type
);
1107 if (wi::gt_p (cp1
, cp2
, sign
))
1108 std::swap (cp1
, cp2
);
1109 if (wi::gt_p (cp3
, cp4
, sign
))
1110 std::swap (cp3
, cp4
);
1112 // Choose min and max from the ordered pairs.
1113 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
1114 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
1115 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
1119 class operator_mult
: public cross_product_operator
1122 virtual void wi_fold (irange
&r
, tree type
,
1123 const wide_int
&lh_lb
,
1124 const wide_int
&lh_ub
,
1125 const wide_int
&rh_lb
,
1126 const wide_int
&rh_ub
) const;
1127 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1128 const wide_int
&w0
, const wide_int
&w1
) const;
1129 virtual bool op1_range (irange
&r
, tree type
,
1131 const irange
&op2
) const;
1132 virtual bool op2_range (irange
&r
, tree type
,
1134 const irange
&op1
) const;
1138 operator_mult::op1_range (irange
&r
, tree type
,
1139 const irange
&lhs
, const irange
&op2
) const
1143 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1144 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1145 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1146 if (TYPE_OVERFLOW_WRAPS (type
))
1149 if (op2
.singleton_p (&offset
) && !integer_zerop (offset
))
1150 return range_op_handler (TRUNC_DIV_EXPR
, type
)->fold_range (r
, type
,
1156 operator_mult::op2_range (irange
&r
, tree type
,
1157 const irange
&lhs
, const irange
&op1
) const
1159 return operator_mult::op1_range (r
, type
, lhs
, op1
);
1163 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
1164 const wide_int
&w0
, const wide_int
&w1
) const
1166 wi::overflow_type overflow
= wi::OVF_NONE
;
1167 signop sign
= TYPE_SIGN (type
);
1168 res
= wi::mul (w0
, w1
, sign
, &overflow
);
1169 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1171 // For multiplication, the sign of the overflow is given
1172 // by the comparison of the signs of the operands.
1173 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
1174 res
= wi::max_value (w0
.get_precision (), sign
);
1176 res
= wi::min_value (w0
.get_precision (), sign
);
1183 operator_mult::wi_fold (irange
&r
, tree type
,
1184 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1185 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1187 if (TYPE_OVERFLOW_UNDEFINED (type
))
1189 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1193 // Multiply the ranges when overflow wraps. This is basically fancy
1194 // code so we don't drop to varying with an unsigned
1197 // This test requires 2*prec bits if both operands are signed and
1198 // 2*prec + 2 bits if either is not. Therefore, extend the values
1199 // using the sign of the result to PREC2. From here on out,
1200 // everthing is just signed math no matter what the input types
1203 signop sign
= TYPE_SIGN (type
);
1204 unsigned prec
= TYPE_PRECISION (type
);
1205 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
1206 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
1207 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
1208 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
1209 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
1210 widest2_int size
= sizem1
+ 1;
1212 // Canonicalize the intervals.
1213 if (sign
== UNSIGNED
)
1215 if (wi::ltu_p (size
, min0
+ max0
))
1220 if (wi::ltu_p (size
, min1
+ max1
))
1227 // Sort the 4 products so that min is in prod0 and max is in
1229 widest2_int prod0
= min0
* min1
;
1230 widest2_int prod1
= min0
* max1
;
1231 widest2_int prod2
= max0
* min1
;
1232 widest2_int prod3
= max0
* max1
;
1234 // min0min1 > max0max1
1236 std::swap (prod0
, prod3
);
1238 // min0max1 > max0min1
1240 std::swap (prod1
, prod2
);
1243 std::swap (prod0
, prod1
);
1246 std::swap (prod2
, prod3
);
1249 prod2
= prod3
- prod0
;
1250 if (wi::geu_p (prod2
, sizem1
))
1251 // The range covers all values.
1252 r
.set_varying (type
);
1255 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
1256 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
1257 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
1262 class operator_div
: public cross_product_operator
1265 operator_div (enum tree_code c
) { code
= c
; }
1266 virtual void wi_fold (irange
&r
, tree type
,
1267 const wide_int
&lh_lb
,
1268 const wide_int
&lh_ub
,
1269 const wide_int
&rh_lb
,
1270 const wide_int
&rh_ub
) const;
1271 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1272 const wide_int
&, const wide_int
&) const;
1274 enum tree_code code
;
1278 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
1279 const wide_int
&w0
, const wide_int
&w1
) const
1284 wi::overflow_type overflow
= wi::OVF_NONE
;
1285 signop sign
= TYPE_SIGN (type
);
1289 case EXACT_DIV_EXPR
:
1290 // EXACT_DIV_EXPR is implemented as TRUNC_DIV_EXPR in
1291 // operator_exact_divide. No need to handle it here.
1294 case TRUNC_DIV_EXPR
:
1295 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
1297 case FLOOR_DIV_EXPR
:
1298 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
1300 case ROUND_DIV_EXPR
:
1301 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
1304 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
1310 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1312 // For division, the only case is -INF / -1 = +INF.
1313 res
= wi::max_value (w0
.get_precision (), sign
);
1320 operator_div::wi_fold (irange
&r
, tree type
,
1321 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1322 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1324 // If we know we will divide by zero...
1325 if (rh_lb
== 0 && rh_ub
== 0)
1327 r
.set_varying (type
);
1331 const wide_int dividend_min
= lh_lb
;
1332 const wide_int dividend_max
= lh_ub
;
1333 const wide_int divisor_min
= rh_lb
;
1334 const wide_int divisor_max
= rh_ub
;
1335 signop sign
= TYPE_SIGN (type
);
1336 unsigned prec
= TYPE_PRECISION (type
);
1337 wide_int extra_min
, extra_max
;
1339 // If we know we won't divide by zero, just do the division.
1340 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
1342 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
1343 divisor_min
, divisor_max
);
1347 // If flag_non_call_exceptions, we must not eliminate a division by zero.
1348 if (cfun
->can_throw_non_call_exceptions
)
1350 r
.set_varying (type
);
1354 // If we're definitely dividing by zero, there's nothing to do.
1355 if (wi_zero_p (type
, divisor_min
, divisor_max
))
1357 r
.set_varying (type
);
1361 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
1362 // skip any division by zero.
1364 // First divide by the negative numbers, if any.
1365 if (wi::neg_p (divisor_min
, sign
))
1366 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
1367 divisor_min
, wi::minus_one (prec
));
1371 // Then divide by the non-zero positive numbers, if any.
1372 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
1375 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
1376 wi::one (prec
), divisor_max
);
1379 // We shouldn't still have undefined here.
1380 gcc_checking_assert (!r
.undefined_p ());
1383 operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
1384 operator_div
op_floor_div (FLOOR_DIV_EXPR
);
1385 operator_div
op_round_div (ROUND_DIV_EXPR
);
1386 operator_div
op_ceil_div (CEIL_DIV_EXPR
);
1389 class operator_exact_divide
: public operator_div
1392 operator_exact_divide () : operator_div (TRUNC_DIV_EXPR
) { }
1393 virtual bool op1_range (irange
&r
, tree type
,
1395 const irange
&op2
) const;
1400 operator_exact_divide::op1_range (irange
&r
, tree type
,
1402 const irange
&op2
) const
1405 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
1406 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
1407 // We wont bother trying to enumerate all the in between stuff :-P
1408 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
1409 // the time however.
1410 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
1411 if (op2
.singleton_p (&offset
)
1412 && !integer_zerop (offset
))
1413 return range_op_handler (MULT_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
1418 class operator_lshift
: public cross_product_operator
1421 virtual bool op1_range (irange
&r
, tree type
,
1423 const irange
&op2
) const;
1424 virtual bool fold_range (irange
&r
, tree type
,
1426 const irange
&op2
) const;
1428 virtual void wi_fold (irange
&r
, tree type
,
1429 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1430 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
1431 virtual bool wi_op_overflows (wide_int
&res
,
1434 const wide_int
&) const;
1437 class operator_rshift
: public cross_product_operator
1440 virtual bool fold_range (irange
&r
, tree type
,
1442 const irange
&op2
) const;
1443 virtual void wi_fold (irange
&r
, tree type
,
1444 const wide_int
&lh_lb
,
1445 const wide_int
&lh_ub
,
1446 const wide_int
&rh_lb
,
1447 const wide_int
&rh_ub
) const;
1448 virtual bool wi_op_overflows (wide_int
&res
,
1451 const wide_int
&w1
) const;
1452 virtual bool op1_range (irange
&, tree type
,
1454 const irange
&op2
) const;
1459 operator_lshift::fold_range (irange
&r
, tree type
,
1461 const irange
&op2
) const
1463 int_range_max shift_range
;
1464 if (!get_shift_range (shift_range
, type
, op2
))
1466 if (op2
.undefined_p ())
1469 r
.set_varying (type
);
1473 // Transform left shifts by constants into multiplies.
1474 if (shift_range
.singleton_p ())
1476 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
1477 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
1478 int_range
<1> mult (type
, tmp
, tmp
);
1480 // Force wrapping multiplication.
1481 bool saved_flag_wrapv
= flag_wrapv
;
1482 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
1484 flag_wrapv_pointer
= 1;
1485 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
1486 flag_wrapv
= saved_flag_wrapv
;
1487 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
1491 // Otherwise, invoke the generic fold routine.
1492 return range_operator::fold_range (r
, type
, op1
, shift_range
);
1496 operator_lshift::wi_fold (irange
&r
, tree type
,
1497 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1498 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1500 signop sign
= TYPE_SIGN (type
);
1501 unsigned prec
= TYPE_PRECISION (type
);
1502 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
1503 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
1504 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1505 // overflow. However, for that to happen, rh.max needs to be zero,
1506 // which means rh is a singleton range of zero, which means it
1507 // should be handled by the lshift fold_range above.
1508 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
1509 wide_int complement
= ~(bound
- 1);
1510 wide_int low_bound
, high_bound
;
1511 bool in_bounds
= false;
1513 if (sign
== UNSIGNED
)
1516 high_bound
= complement
;
1517 if (wi::ltu_p (lh_ub
, low_bound
))
1519 // [5, 6] << [1, 2] == [10, 24].
1520 // We're shifting out only zeroes, the value increases
1524 else if (wi::ltu_p (high_bound
, lh_lb
))
1526 // [0xffffff00, 0xffffffff] << [1, 2]
1527 // == [0xfffffc00, 0xfffffffe].
1528 // We're shifting out only ones, the value decreases
1535 // [-1, 1] << [1, 2] == [-4, 4]
1536 low_bound
= complement
;
1538 if (wi::lts_p (lh_ub
, high_bound
)
1539 && wi::lts_p (low_bound
, lh_lb
))
1541 // For non-negative numbers, we're shifting out only zeroes,
1542 // the value increases monotonically. For negative numbers,
1543 // we're shifting out only ones, the value decreases
1550 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1552 r
.set_varying (type
);
1556 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
1557 const wide_int
&w0
, const wide_int
&w1
) const
1559 signop sign
= TYPE_SIGN (type
);
1562 // It's unclear from the C standard whether shifts can overflow.
1563 // The following code ignores overflow; perhaps a C standard
1564 // interpretation ruling is needed.
1565 res
= wi::rshift (w0
, -w1
, sign
);
1568 res
= wi::lshift (w0
, w1
);
1573 operator_lshift::op1_range (irange
&r
,
1576 const irange
&op2
) const
1579 if (op2
.singleton_p (&shift_amount
))
1581 wide_int shift
= wi::to_wide (shift_amount
);
1582 if (wi::lt_p (shift
, 0, SIGNED
))
1584 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
1585 TYPE_PRECISION (op2
.type ())),
1594 // Work completely in unsigned mode to start.
1596 if (TYPE_SIGN (type
) == SIGNED
)
1598 int_range_max tmp
= lhs
;
1599 utype
= unsigned_type_for (type
);
1600 range_cast (tmp
, utype
);
1601 op_rshift
.fold_range (r
, utype
, tmp
, op2
);
1604 op_rshift
.fold_range (r
, utype
, lhs
, op2
);
1606 // Start with ranges which can produce the LHS by right shifting the
1607 // result by the shift amount.
1608 // ie [0x08, 0xF0] = op1 << 2 will start with
1609 // [00001000, 11110000] = op1 << 2
1610 // [0x02, 0x4C] aka [00000010, 00111100]
1612 // Then create a range from the LB with the least significant upper bit
1613 // set, to the upper bound with all the bits set.
1614 // This would be [0x42, 0xFC] aka [01000010, 11111100].
1616 // Ideally we do this for each subrange, but just lump them all for now.
1617 unsigned low_bits
= TYPE_PRECISION (utype
)
1618 - TREE_INT_CST_LOW (shift_amount
);
1619 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
1620 wide_int new_ub
= wi::bit_or (up_mask
, r
.upper_bound ());
1621 wide_int new_lb
= wi::set_bit (r
.lower_bound (), low_bits
);
1622 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
1623 r
.union_ (fill_range
);
1626 range_cast (r
, type
);
1633 operator_rshift::op1_range (irange
&r
,
1636 const irange
&op2
) const
1639 if (op2
.singleton_p (&shift
))
1641 // Ignore nonsensical shifts.
1642 unsigned prec
= TYPE_PRECISION (type
);
1643 if (wi::ge_p (wi::to_wide (shift
),
1644 wi::uhwi (prec
, TYPE_PRECISION (TREE_TYPE (shift
))),
1647 if (wi::to_wide (shift
) == 0)
1653 // Folding the original operation may discard some impossible
1654 // ranges from the LHS.
1655 int_range_max lhs_refined
;
1656 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
1657 lhs_refined
.intersect (lhs
);
1658 if (lhs_refined
.undefined_p ())
1663 int_range_max
shift_range (shift
, shift
);
1664 int_range_max lb
, ub
;
1665 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
1667 // 0000 0111 = OP1 >> 3
1669 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
1670 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
1671 // right hand side (0x07).
1672 tree mask
= fold_build1 (BIT_NOT_EXPR
, type
,
1673 fold_build2 (LSHIFT_EXPR
, type
,
1674 build_minus_one_cst (type
),
1676 int_range_max
mask_range (build_zero_cst (type
), mask
);
1677 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
1680 if (!lhs_refined
.contains_p (build_zero_cst (type
)))
1682 mask_range
.invert ();
1683 r
.intersect (mask_range
);
1691 operator_rshift::wi_op_overflows (wide_int
&res
,
1694 const wide_int
&w1
) const
1696 signop sign
= TYPE_SIGN (type
);
1698 res
= wi::lshift (w0
, -w1
);
1701 // It's unclear from the C standard whether shifts can overflow.
1702 // The following code ignores overflow; perhaps a C standard
1703 // interpretation ruling is needed.
1704 res
= wi::rshift (w0
, w1
, sign
);
1710 operator_rshift::fold_range (irange
&r
, tree type
,
1712 const irange
&op2
) const
1714 int_range_max shift
;
1715 if (!get_shift_range (shift
, type
, op2
))
1717 if (op2
.undefined_p ())
1720 r
.set_varying (type
);
1724 return range_operator::fold_range (r
, type
, op1
, shift
);
1728 operator_rshift::wi_fold (irange
&r
, tree type
,
1729 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1730 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1732 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1736 class operator_cast
: public range_operator
1739 virtual bool fold_range (irange
&r
, tree type
,
1741 const irange
&op2
) const;
1742 virtual bool op1_range (irange
&r
, tree type
,
1744 const irange
&op2
) const;
1746 bool truncating_cast_p (const irange
&inner
, const irange
&outer
) const;
1747 bool inside_domain_p (const wide_int
&min
, const wide_int
&max
,
1748 const irange
&outer
) const;
1749 void fold_pair (irange
&r
, unsigned index
, const irange
&inner
,
1750 const irange
&outer
) const;
1753 // Return TRUE if casting from INNER to OUTER is a truncating cast.
1756 operator_cast::truncating_cast_p (const irange
&inner
,
1757 const irange
&outer
) const
1759 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
1762 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
1765 operator_cast::inside_domain_p (const wide_int
&min
,
1766 const wide_int
&max
,
1767 const irange
&range
) const
1769 wide_int domain_min
= wi::to_wide (vrp_val_min (range
.type ()));
1770 wide_int domain_max
= wi::to_wide (vrp_val_max (range
.type ()));
1771 signop domain_sign
= TYPE_SIGN (range
.type ());
1772 return (wi::le_p (min
, domain_max
, domain_sign
)
1773 && wi::le_p (max
, domain_max
, domain_sign
)
1774 && wi::ge_p (min
, domain_min
, domain_sign
)
1775 && wi::ge_p (max
, domain_min
, domain_sign
));
1779 // Helper for fold_range which work on a pair at a time.
1782 operator_cast::fold_pair (irange
&r
, unsigned index
,
1783 const irange
&inner
,
1784 const irange
&outer
) const
1786 tree inner_type
= inner
.type ();
1787 tree outer_type
= outer
.type ();
1788 signop inner_sign
= TYPE_SIGN (inner_type
);
1789 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
1791 // check to see if casting from INNER to OUTER is a conversion that
1792 // fits in the resulting OUTER type.
1793 wide_int inner_lb
= inner
.lower_bound (index
);
1794 wide_int inner_ub
= inner
.upper_bound (index
);
1795 if (truncating_cast_p (inner
, outer
))
1797 // We may be able to accomodate a truncating cast if the
1798 // resulting range can be represented in the target type...
1799 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
1800 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
1803 r
.set_varying (outer_type
);
1807 // ...but we must still verify that the final range fits in the
1808 // domain. This catches -fstrict-enum restrictions where the domain
1809 // range is smaller than what fits in the underlying type.
1810 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
1811 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
1812 if (inside_domain_p (min
, max
, outer
))
1813 create_possibly_reversed_range (r
, outer_type
, min
, max
);
1815 r
.set_varying (outer_type
);
1820 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
1821 const irange
&inner
,
1822 const irange
&outer
) const
1824 if (empty_range_varying (r
, type
, inner
, outer
))
1827 gcc_checking_assert (outer
.varying_p ());
1828 gcc_checking_assert (inner
.num_pairs () > 0);
1830 // Avoid a temporary by folding the first pair directly into the result.
1831 fold_pair (r
, 0, inner
, outer
);
1833 // Then process any additonal pairs by unioning with their results.
1834 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
1837 fold_pair (tmp
, x
, inner
, outer
);
1846 operator_cast::op1_range (irange
&r
, tree type
,
1848 const irange
&op2
) const
1850 tree lhs_type
= lhs
.type ();
1851 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
1853 // If we are calculating a pointer, shortcut to what we really care about.
1854 if (POINTER_TYPE_P (type
))
1856 // Conversion from other pointers or a constant (including 0/NULL)
1857 // are straightforward.
1858 if (POINTER_TYPE_P (lhs
.type ())
1859 || (lhs
.singleton_p ()
1860 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
1863 range_cast (r
, type
);
1867 // If the LHS is not a pointer nor a singleton, then it is
1868 // either VARYING or non-zero.
1869 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1870 r
.set_nonzero (type
);
1872 r
.set_varying (type
);
1878 if (truncating_cast_p (op2
, lhs
))
1880 if (lhs
.varying_p ())
1881 r
.set_varying (type
);
1884 // We want to insert the LHS as an unsigned value since it
1885 // would not trigger the signed bit of the larger type.
1886 int_range_max converted_lhs
= lhs
;
1887 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
1888 range_cast (converted_lhs
, type
);
1889 // Start by building the positive signed outer range for the type.
1890 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
1891 TYPE_PRECISION (type
));
1892 r
= int_range
<1> (type
, lim
, wi::max_value (TYPE_PRECISION (type
),
1894 // For the signed part, we need to simply union the 2 ranges now.
1895 r
.union_ (converted_lhs
);
1897 // Create maximal negative number outside of LHS bits.
1898 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
1899 TYPE_PRECISION (type
));
1900 // Add this to the unsigned LHS range(s).
1901 int_range_max
lim_range (type
, lim
, lim
);
1902 int_range_max lhs_neg
;
1903 range_op_handler (PLUS_EXPR
, type
)->fold_range (lhs_neg
,
1907 // lhs_neg now has all the negative versions of the LHS.
1908 // Now union in all the values from SIGNED MIN (0x80000) to
1909 // lim-1 in order to fill in all the ranges with the upper
1912 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
1913 // we don't need to create a range from min to lim-1
1914 // calculate neg range traps trying to create [lim, lim - 1].
1915 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
1918 int_range_max
neg (type
,
1919 wi::min_value (TYPE_PRECISION (type
),
1922 lhs_neg
.union_ (neg
);
1924 // And finally, munge the signed and unsigned portions.
1927 // And intersect with any known value passed in the extra operand.
1933 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
1937 // The cast is not truncating, and the range is restricted to
1938 // the range of the RHS by this assignment.
1940 // Cast the range of the RHS to the type of the LHS.
1941 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
1942 // Intersect this with the LHS range will produce the range,
1943 // which will be cast to the RHS type before returning.
1944 tmp
.intersect (lhs
);
1947 // Cast the calculated range to the type of the RHS.
1948 fold_range (r
, type
, tmp
, int_range
<1> (type
));
1953 class operator_logical_and
: public range_operator
1956 virtual bool fold_range (irange
&r
, tree type
,
1958 const irange
&rh
) const;
1959 virtual bool op1_range (irange
&r
, tree type
,
1961 const irange
&op2
) const;
1962 virtual bool op2_range (irange
&r
, tree type
,
1964 const irange
&op1
) const;
1969 operator_logical_and::fold_range (irange
&r
, tree type
,
1971 const irange
&rh
) const
1973 if (empty_range_varying (r
, type
, lh
, rh
))
1976 // 0 && anything is 0.
1977 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
1978 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
1979 r
= range_false (type
);
1980 else if (lh
.contains_p (build_zero_cst (lh
.type ()))
1981 || rh
.contains_p (build_zero_cst (rh
.type ())))
1982 // To reach this point, there must be a logical 1 on each side, and
1983 // the only remaining question is whether there is a zero or not.
1984 r
= range_true_and_false (type
);
1986 r
= range_true (type
);
1991 operator_logical_and::op1_range (irange
&r
, tree type
,
1993 const irange
&op2 ATTRIBUTE_UNUSED
) const
1995 switch (get_bool_state (r
, lhs
, type
))
1998 // A true result means both sides of the AND must be true.
1999 r
= range_true (type
);
2002 // Any other result means only one side has to be false, the
2003 // other side can be anything. So we cannott be sure of any
2005 r
= range_true_and_false (type
);
2012 operator_logical_and::op2_range (irange
&r
, tree type
,
2014 const irange
&op1
) const
2016 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
2020 class operator_bitwise_and
: public range_operator
2023 virtual bool fold_range (irange
&r
, tree type
,
2025 const irange
&rh
) const;
2026 virtual bool op1_range (irange
&r
, tree type
,
2028 const irange
&op2
) const;
2029 virtual bool op2_range (irange
&r
, tree type
,
2031 const irange
&op1
) const;
2032 virtual void wi_fold (irange
&r
, tree type
,
2033 const wide_int
&lh_lb
,
2034 const wide_int
&lh_ub
,
2035 const wide_int
&rh_lb
,
2036 const wide_int
&rh_ub
) const;
2038 void simple_op1_range_solver (irange
&r
, tree type
,
2040 const irange
&op2
) const;
2041 void remove_impossible_ranges (irange
&r
, const irange
&rh
) const;
2045 unsigned_singleton_p (const irange
&op
)
2048 if (op
.singleton_p (&mask
))
2050 wide_int x
= wi::to_wide (mask
);
2051 return wi::ge_p (x
, 0, TYPE_SIGN (op
.type ()));
2056 // Remove any ranges from R that are known to be impossible when an
2057 // range is ANDed with MASK.
2060 operator_bitwise_and::remove_impossible_ranges (irange
&r
,
2061 const irange
&rmask
) const
2063 if (r
.undefined_p () || !unsigned_singleton_p (rmask
))
2066 wide_int mask
= rmask
.lower_bound ();
2067 tree type
= r
.type ();
2068 int prec
= TYPE_PRECISION (type
);
2069 int leading_zeros
= wi::clz (mask
);
2070 int_range_max impossible_ranges
;
2072 /* We know that starting at the most significant bit, any 0 in the
2073 mask means the resulting range cannot contain a 1 in that same
2074 position. This means the following ranges are impossible:
2078 01xx xxxx [0100 0000, 0111 1111]
2079 001x xxxx [0010 0000, 0011 1111]
2080 0000 01xx [0000 0100, 0000 0111]
2081 0000 0001 [0000 0001, 0000 0001]
2083 wide_int one
= wi::one (prec
);
2084 for (int i
= 0; i
< prec
- leading_zeros
- 1; ++i
)
2085 if (wi::bit_and (mask
, wi::lshift (one
, wi::uhwi (i
, prec
))) == 0)
2087 tree lb
= fold_build2 (LSHIFT_EXPR
, type
,
2088 build_one_cst (type
),
2089 build_int_cst (type
, i
));
2090 tree ub_left
= fold_build1 (BIT_NOT_EXPR
, type
,
2091 fold_build2 (LSHIFT_EXPR
, type
,
2092 build_minus_one_cst (type
),
2093 build_int_cst (type
, i
)));
2094 tree ub_right
= fold_build2 (LSHIFT_EXPR
, type
,
2095 build_one_cst (type
),
2096 build_int_cst (type
, i
));
2097 tree ub
= fold_build2 (BIT_IOR_EXPR
, type
, ub_left
, ub_right
);
2098 impossible_ranges
.union_ (int_range
<1> (lb
, ub
));
2100 if (!impossible_ranges
.undefined_p ())
2102 impossible_ranges
.invert ();
2103 r
.intersect (impossible_ranges
);
2108 operator_bitwise_and::fold_range (irange
&r
, tree type
,
2110 const irange
&rh
) const
2112 if (range_operator::fold_range (r
, type
, lh
, rh
))
2114 // FIXME: This is temporarily disabled because, though it
2115 // generates better ranges, it's noticeably slower for evrp.
2116 // remove_impossible_ranges (r, rh);
2123 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2124 // possible. Basically, see if we can optimize:
2128 // [LB op Z, UB op Z]
2130 // If the optimization was successful, accumulate the range in R and
2134 wi_optimize_and_or (irange
&r
,
2135 enum tree_code code
,
2137 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2138 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2140 // Calculate the singleton mask among the ranges, if any.
2141 wide_int lower_bound
, upper_bound
, mask
;
2142 if (wi::eq_p (rh_lb
, rh_ub
))
2145 lower_bound
= lh_lb
;
2146 upper_bound
= lh_ub
;
2148 else if (wi::eq_p (lh_lb
, lh_ub
))
2151 lower_bound
= rh_lb
;
2152 upper_bound
= rh_ub
;
2157 // If Z is a constant which (for op | its bitwise not) has n
2158 // consecutive least significant bits cleared followed by m 1
2159 // consecutive bits set immediately above it and either
2160 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2162 // The least significant n bits of all the values in the range are
2163 // cleared or set, the m bits above it are preserved and any bits
2164 // above these are required to be the same for all values in the
2168 if (code
== BIT_IOR_EXPR
)
2170 if (wi::eq_p (w
, 0))
2171 n
= w
.get_precision ();
2175 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
2176 if (wi::eq_p (w
, 0))
2177 m
= w
.get_precision () - n
;
2179 m
= wi::ctz (w
) - n
;
2181 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
2182 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
2185 wide_int res_lb
, res_ub
;
2186 if (code
== BIT_AND_EXPR
)
2188 res_lb
= wi::bit_and (lower_bound
, mask
);
2189 res_ub
= wi::bit_and (upper_bound
, mask
);
2191 else if (code
== BIT_IOR_EXPR
)
2193 res_lb
= wi::bit_or (lower_bound
, mask
);
2194 res_ub
= wi::bit_or (upper_bound
, mask
);
2198 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2200 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
2201 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
2204 tmp
.set_nonzero (type
);
2210 // For range [LB, UB] compute two wide_int bit masks.
2212 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
2213 // for all numbers in the range the bit is 0, otherwise it might be 0
2216 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
2217 // for all numbers in the range the bit is 1, otherwise it might be 0
2221 wi_set_zero_nonzero_bits (tree type
,
2222 const wide_int
&lb
, const wide_int
&ub
,
2223 wide_int
&maybe_nonzero
,
2224 wide_int
&mustbe_nonzero
)
2226 signop sign
= TYPE_SIGN (type
);
2228 if (wi::eq_p (lb
, ub
))
2229 maybe_nonzero
= mustbe_nonzero
= lb
;
2230 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
2232 wide_int xor_mask
= lb
^ ub
;
2233 maybe_nonzero
= lb
| ub
;
2234 mustbe_nonzero
= lb
& ub
;
2237 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
2238 maybe_nonzero
.get_precision ());
2239 maybe_nonzero
= maybe_nonzero
| mask
;
2240 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
2245 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
2246 mustbe_nonzero
= wi::zero (lb
.get_precision ());
2251 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
2252 const wide_int
&lh_lb
,
2253 const wide_int
&lh_ub
,
2254 const wide_int
&rh_lb
,
2255 const wide_int
&rh_ub
) const
2257 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
2260 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2261 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2262 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2263 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2264 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2265 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2267 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
2268 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
2269 signop sign
= TYPE_SIGN (type
);
2270 unsigned prec
= TYPE_PRECISION (type
);
2271 // If both input ranges contain only negative values, we can
2272 // truncate the result range maximum to the minimum of the
2273 // input range maxima.
2274 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
2276 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
2277 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
2279 // If either input range contains only non-negative values
2280 // we can truncate the result range maximum to the respective
2281 // maximum of the input range.
2282 if (wi::ge_p (lh_lb
, 0, sign
))
2283 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
2284 if (wi::ge_p (rh_lb
, 0, sign
))
2285 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
2286 // PR68217: In case of signed & sign-bit-CST should
2287 // result in [-INF, 0] instead of [-INF, INF].
2288 if (wi::gt_p (new_lb
, new_ub
, sign
))
2290 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
2292 && ((wi::eq_p (lh_lb
, lh_ub
)
2293 && !wi::cmps (lh_lb
, sign_bit
))
2294 || (wi::eq_p (rh_lb
, rh_ub
)
2295 && !wi::cmps (rh_lb
, sign_bit
))))
2297 new_lb
= wi::min_value (prec
, sign
);
2298 new_ub
= wi::zero (prec
);
2301 // If the limits got swapped around, return varying.
2302 if (wi::gt_p (new_lb
, new_ub
,sign
))
2303 r
.set_varying (type
);
2305 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2309 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
2311 if (!lhs
.contains_p (build_zero_cst (type
)))
2312 r
= range_nonzero (type
);
2314 r
.set_varying (type
);
2317 // This was shamelessly stolen from register_edge_assert_for_2 and
2318 // adjusted to work with iranges.
2321 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
2323 const irange
&op2
) const
2325 if (!op2
.singleton_p ())
2327 set_nonzero_range_from_mask (r
, type
, lhs
);
2330 unsigned int nprec
= TYPE_PRECISION (type
);
2331 wide_int cst2v
= op2
.lower_bound ();
2332 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
2335 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
2337 sgnbit
= wi::zero (nprec
);
2339 // Solve [lhs.lower_bound (), +INF] = x & MASK.
2341 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
2342 // maximum unsigned value is ~0. For signed comparison, if CST2
2343 // doesn't have the most significant bit set, handle it similarly. If
2344 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
2345 wide_int valv
= lhs
.lower_bound ();
2346 wide_int minv
= valv
& cst2v
, maxv
;
2347 bool we_know_nothing
= false;
2350 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
2351 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
2354 // If we can't determine anything on this bound, fall
2355 // through and conservatively solve for the other end point.
2356 we_know_nothing
= true;
2359 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
2360 if (we_know_nothing
)
2361 r
.set_varying (type
);
2363 r
= int_range
<1> (type
, minv
, maxv
);
2365 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
2367 // Minimum unsigned value for <= is 0 and maximum unsigned value is
2368 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
2370 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2372 // For signed comparison, if CST2 doesn't have most significant bit
2373 // set, handle it similarly. If CST2 has MSB set, the maximum is
2374 // the same and minimum is INT_MIN.
2375 valv
= lhs
.upper_bound ();
2376 minv
= valv
& cst2v
;
2381 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
2384 // If we couldn't determine anything on either bound, return
2386 if (we_know_nothing
)
2394 int_range
<1> upper_bits (type
, minv
, maxv
);
2395 r
.intersect (upper_bits
);
2399 operator_bitwise_and::op1_range (irange
&r
, tree type
,
2401 const irange
&op2
) const
2403 if (types_compatible_p (type
, boolean_type_node
))
2404 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
2407 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
2409 int_range_max
chunk (lhs
.type (),
2410 lhs
.lower_bound (i
),
2411 lhs
.upper_bound (i
));
2413 simple_op1_range_solver (res
, type
, chunk
, op2
);
2416 if (r
.undefined_p ())
2417 set_nonzero_range_from_mask (r
, type
, lhs
);
2422 operator_bitwise_and::op2_range (irange
&r
, tree type
,
2424 const irange
&op1
) const
2426 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
2430 class operator_logical_or
: public range_operator
2433 virtual bool fold_range (irange
&r
, tree type
,
2435 const irange
&rh
) const;
2436 virtual bool op1_range (irange
&r
, tree type
,
2438 const irange
&op2
) const;
2439 virtual bool op2_range (irange
&r
, tree type
,
2441 const irange
&op1
) const;
2445 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2447 const irange
&rh
) const
2449 if (empty_range_varying (r
, type
, lh
, rh
))
2458 operator_logical_or::op1_range (irange
&r
, tree type
,
2460 const irange
&op2 ATTRIBUTE_UNUSED
) const
2462 switch (get_bool_state (r
, lhs
, type
))
2465 // A false result means both sides of the OR must be false.
2466 r
= range_false (type
);
2469 // Any other result means only one side has to be true, the
2470 // other side can be anything. so we can't be sure of any result
2472 r
= range_true_and_false (type
);
2479 operator_logical_or::op2_range (irange
&r
, tree type
,
2481 const irange
&op1
) const
2483 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
2487 class operator_bitwise_or
: public range_operator
2490 virtual bool op1_range (irange
&r
, tree type
,
2492 const irange
&op2
) const;
2493 virtual bool op2_range (irange
&r
, tree type
,
2495 const irange
&op1
) const;
2496 virtual void wi_fold (irange
&r
, tree type
,
2497 const wide_int
&lh_lb
,
2498 const wide_int
&lh_ub
,
2499 const wide_int
&rh_lb
,
2500 const wide_int
&rh_ub
) const;
2504 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
2505 const wide_int
&lh_lb
,
2506 const wide_int
&lh_ub
,
2507 const wide_int
&rh_lb
,
2508 const wide_int
&rh_ub
) const
2510 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
2513 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2514 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2515 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2516 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2517 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2518 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2519 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
2520 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
2521 signop sign
= TYPE_SIGN (type
);
2522 // If the input ranges contain only positive values we can
2523 // truncate the minimum of the result range to the maximum
2524 // of the input range minima.
2525 if (wi::ge_p (lh_lb
, 0, sign
)
2526 && wi::ge_p (rh_lb
, 0, sign
))
2528 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
2529 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
2531 // If either input range contains only negative values
2532 // we can truncate the minimum of the result range to the
2533 // respective minimum range.
2534 if (wi::lt_p (lh_ub
, 0, sign
))
2535 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
2536 if (wi::lt_p (rh_ub
, 0, sign
))
2537 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
2538 // If the limits got swapped around, return varying.
2539 if (wi::gt_p (new_lb
, new_ub
,sign
))
2540 r
.set_varying (type
);
2542 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2546 operator_bitwise_or::op1_range (irange
&r
, tree type
,
2548 const irange
&op2
) const
2550 // If this is really a logical wi_fold, call that.
2551 if (types_compatible_p (type
, boolean_type_node
))
2552 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
2556 tree zero
= build_zero_cst (type
);
2557 r
= int_range
<1> (zero
, zero
);
2560 r
.set_varying (type
);
2565 operator_bitwise_or::op2_range (irange
&r
, tree type
,
2567 const irange
&op1
) const
2569 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
2573 class operator_bitwise_xor
: public range_operator
2576 virtual void wi_fold (irange
&r
, tree type
,
2577 const wide_int
&lh_lb
,
2578 const wide_int
&lh_ub
,
2579 const wide_int
&rh_lb
,
2580 const wide_int
&rh_ub
) const;
2581 virtual bool op1_range (irange
&r
, tree type
,
2583 const irange
&op2
) const;
2584 virtual bool op2_range (irange
&r
, tree type
,
2586 const irange
&op1
) const;
2590 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
2591 const wide_int
&lh_lb
,
2592 const wide_int
&lh_ub
,
2593 const wide_int
&rh_lb
,
2594 const wide_int
&rh_ub
) const
2596 signop sign
= TYPE_SIGN (type
);
2597 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2598 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2599 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2600 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2601 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2602 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2604 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
2605 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
2606 wide_int result_one_bits
2607 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
2608 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
2609 wide_int new_ub
= ~result_zero_bits
;
2610 wide_int new_lb
= result_one_bits
;
2612 // If the range has all positive or all negative values, the result
2613 // is better than VARYING.
2614 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
2615 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2617 r
.set_varying (type
);
2621 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
2623 const irange
&op2
) const
2625 if (lhs
.undefined_p () || lhs
.varying_p ())
2630 if (types_compatible_p (type
, boolean_type_node
))
2632 switch (get_bool_state (r
, lhs
, type
))
2635 if (op2
.varying_p ())
2636 r
.set_varying (type
);
2637 else if (op2
.zero_p ())
2638 r
= range_true (type
);
2640 r
= range_false (type
);
2650 r
.set_varying (type
);
2655 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
2657 const irange
&op1
) const
2659 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
2662 class operator_trunc_mod
: public range_operator
2665 virtual void wi_fold (irange
&r
, tree type
,
2666 const wide_int
&lh_lb
,
2667 const wide_int
&lh_ub
,
2668 const wide_int
&rh_lb
,
2669 const wide_int
&rh_ub
) const;
2670 virtual bool op1_range (irange
&r
, tree type
,
2672 const irange
&op2
) const;
2673 virtual bool op2_range (irange
&r
, tree type
,
2675 const irange
&op1
) const;
2679 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
2680 const wide_int
&lh_lb
,
2681 const wide_int
&lh_ub
,
2682 const wide_int
&rh_lb
,
2683 const wide_int
&rh_ub
) const
2685 wide_int new_lb
, new_ub
, tmp
;
2686 signop sign
= TYPE_SIGN (type
);
2687 unsigned prec
= TYPE_PRECISION (type
);
2689 // Mod 0 is undefined.
2690 if (wi_zero_p (type
, rh_lb
, rh_ub
))
2692 r
.set_varying (type
);
2696 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
2701 new_ub
= wi::smax (new_ub
, tmp
);
2704 if (sign
== UNSIGNED
)
2705 new_lb
= wi::zero (prec
);
2710 if (wi::gts_p (tmp
, 0))
2711 tmp
= wi::zero (prec
);
2712 new_lb
= wi::smax (new_lb
, tmp
);
2715 if (sign
== SIGNED
&& wi::neg_p (tmp
))
2716 tmp
= wi::zero (prec
);
2717 new_ub
= wi::min (new_ub
, tmp
, sign
);
2719 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2723 operator_trunc_mod::op1_range (irange
&r
, tree type
,
2725 const irange
&) const
2728 signop sign
= TYPE_SIGN (type
);
2729 unsigned prec
= TYPE_PRECISION (type
);
2730 // (a % b) >= x && x > 0 , then a >= x.
2731 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
2733 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
2736 // (a % b) <= x && x < 0 , then a <= x.
2737 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
2739 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
2746 operator_trunc_mod::op2_range (irange
&r
, tree type
,
2748 const irange
&) const
2751 signop sign
= TYPE_SIGN (type
);
2752 unsigned prec
= TYPE_PRECISION (type
);
2753 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
2754 // or b > x for unsigned.
2755 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
2758 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
2759 lhs
.lower_bound (), VR_ANTI_RANGE
);
2760 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
2762 r
= value_range (type
, lhs
.lower_bound () + 1,
2763 wi::max_value (prec
, sign
));
2768 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
2769 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
2771 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
2772 r
= value_range (type
, lhs
.upper_bound (),
2773 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
2782 class operator_logical_not
: public range_operator
2785 virtual bool fold_range (irange
&r
, tree type
,
2787 const irange
&rh
) const;
2788 virtual bool op1_range (irange
&r
, tree type
,
2790 const irange
&op2
) const;
2793 // Folding a logical NOT, oddly enough, involves doing nothing on the
2794 // forward pass through. During the initial walk backwards, the
2795 // logical NOT reversed the desired outcome on the way back, so on the
2796 // way forward all we do is pass the range forward.
2801 // to determine the TRUE branch, walking backward
2802 // if (b_3) if ([1,1])
2803 // b_3 = !b_2 [1,1] = ![0,0]
2804 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
2805 // which is the result we are looking for.. so.. pass it through.
2808 operator_logical_not::fold_range (irange
&r
, tree type
,
2810 const irange
&rh ATTRIBUTE_UNUSED
) const
2812 if (empty_range_varying (r
, type
, lh
, rh
))
2816 if (!lh
.varying_p () && !lh
.undefined_p ())
2823 operator_logical_not::op1_range (irange
&r
,
2826 const irange
&op2
) const
2828 // Logical NOT is involutary...do it again.
2829 return fold_range (r
, type
, lhs
, op2
);
2833 class operator_bitwise_not
: public range_operator
2836 virtual bool fold_range (irange
&r
, tree type
,
2838 const irange
&rh
) const;
2839 virtual bool op1_range (irange
&r
, tree type
,
2841 const irange
&op2
) const;
2845 operator_bitwise_not::fold_range (irange
&r
, tree type
,
2847 const irange
&rh
) const
2849 if (empty_range_varying (r
, type
, lh
, rh
))
2852 if (types_compatible_p (type
, boolean_type_node
))
2853 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
2855 // ~X is simply -1 - X.
2856 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
2857 wi::minus_one (TYPE_PRECISION (type
)));
2858 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, minusone
,
2863 operator_bitwise_not::op1_range (irange
&r
, tree type
,
2865 const irange
&op2
) const
2867 if (types_compatible_p (type
, boolean_type_node
))
2868 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
2870 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
2871 return fold_range (r
, type
, lhs
, op2
);
2875 class operator_cst
: public range_operator
2878 virtual bool fold_range (irange
&r
, tree type
,
2880 const irange
&op2
) const;
2884 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2886 const irange
&rh ATTRIBUTE_UNUSED
) const
2893 class operator_identity
: public range_operator
2896 virtual bool fold_range (irange
&r
, tree type
,
2898 const irange
&op2
) const;
2899 virtual bool op1_range (irange
&r
, tree type
,
2901 const irange
&op2
) const;
2905 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2907 const irange
&rh ATTRIBUTE_UNUSED
) const
2914 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2916 const irange
&op2 ATTRIBUTE_UNUSED
) const
2923 class operator_unknown
: public range_operator
2926 virtual bool fold_range (irange
&r
, tree type
,
2928 const irange
&op2
) const;
2932 operator_unknown::fold_range (irange
&r
, tree type
,
2933 const irange
&lh ATTRIBUTE_UNUSED
,
2934 const irange
&rh ATTRIBUTE_UNUSED
) const
2936 r
.set_varying (type
);
2941 class operator_abs
: public range_operator
2944 virtual void wi_fold (irange
&r
, tree type
,
2945 const wide_int
&lh_lb
,
2946 const wide_int
&lh_ub
,
2947 const wide_int
&rh_lb
,
2948 const wide_int
&rh_ub
) const;
2949 virtual bool op1_range (irange
&r
, tree type
,
2951 const irange
&op2
) const;
2955 operator_abs::wi_fold (irange
&r
, tree type
,
2956 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2957 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
2958 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
2961 signop sign
= TYPE_SIGN (type
);
2962 unsigned prec
= TYPE_PRECISION (type
);
2964 // Pass through LH for the easy cases.
2965 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
2967 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
2971 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
2973 wide_int min_value
= wi::min_value (prec
, sign
);
2974 wide_int max_value
= wi::max_value (prec
, sign
);
2975 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
2977 r
.set_varying (type
);
2981 // ABS_EXPR may flip the range around, if the original range
2982 // included negative values.
2983 if (wi::eq_p (lh_lb
, min_value
))
2985 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
2986 // returned [-MIN,-MIN] so this preserves that behaviour. PR37078
2987 if (wi::eq_p (lh_ub
, min_value
))
2989 r
= int_range
<1> (type
, min_value
, min_value
);
2995 min
= wi::abs (lh_lb
);
2997 if (wi::eq_p (lh_ub
, min_value
))
3000 max
= wi::abs (lh_ub
);
3002 // If the range contains zero then we know that the minimum value in the
3003 // range will be zero.
3004 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
3006 if (wi::gt_p (min
, max
, sign
))
3008 min
= wi::zero (prec
);
3012 // If the range was reversed, swap MIN and MAX.
3013 if (wi::gt_p (min
, max
, sign
))
3014 std::swap (min
, max
);
3017 // If the new range has its limits swapped around (MIN > MAX), then
3018 // the operation caused one of them to wrap around. The only thing
3019 // we know is that the result is positive.
3020 if (wi::gt_p (min
, max
, sign
))
3022 min
= wi::zero (prec
);
3025 r
= int_range
<1> (type
, min
, max
);
3029 operator_abs::op1_range (irange
&r
, tree type
,
3031 const irange
&op2
) const
3033 if (empty_range_varying (r
, type
, lhs
, op2
))
3035 if (TYPE_UNSIGNED (type
))
3040 // Start with the positives because negatives are an impossible result.
3041 int_range_max positives
= range_positives (type
);
3042 positives
.intersect (lhs
);
3044 // Then add the negative of each pair:
3045 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
3046 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
3047 r
.union_ (int_range
<1> (type
,
3048 -positives
.upper_bound (i
),
3049 -positives
.lower_bound (i
)));
3054 class operator_absu
: public range_operator
3057 virtual void wi_fold (irange
&r
, tree type
,
3058 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3059 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3063 operator_absu::wi_fold (irange
&r
, tree type
,
3064 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3065 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
3066 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
3068 wide_int new_lb
, new_ub
;
3070 // Pass through VR0 the easy cases.
3071 if (wi::ges_p (lh_lb
, 0))
3078 new_lb
= wi::abs (lh_lb
);
3079 new_ub
= wi::abs (lh_ub
);
3081 // If the range contains zero then we know that the minimum
3082 // value in the range will be zero.
3083 if (wi::ges_p (lh_ub
, 0))
3085 if (wi::gtu_p (new_lb
, new_ub
))
3087 new_lb
= wi::zero (TYPE_PRECISION (type
));
3090 std::swap (new_lb
, new_ub
);
3093 gcc_checking_assert (TYPE_UNSIGNED (type
));
3094 r
= int_range
<1> (type
, new_lb
, new_ub
);
3098 class operator_negate
: public range_operator
3101 virtual bool fold_range (irange
&r
, tree type
,
3103 const irange
&op2
) const;
3104 virtual bool op1_range (irange
&r
, tree type
,
3106 const irange
&op2
) const;
3110 operator_negate::fold_range (irange
&r
, tree type
,
3112 const irange
&rh
) const
3114 if (empty_range_varying (r
, type
, lh
, rh
))
3116 // -X is simply 0 - X.
3117 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
,
3123 operator_negate::op1_range (irange
&r
, tree type
,
3125 const irange
&op2
) const
3127 // NEGATE is involutory.
3128 return fold_range (r
, type
, lhs
, op2
);
3132 class operator_addr_expr
: public range_operator
3135 virtual bool fold_range (irange
&r
, tree type
,
3137 const irange
&op2
) const;
3138 virtual bool op1_range (irange
&r
, tree type
,
3140 const irange
&op2
) const;
3144 operator_addr_expr::fold_range (irange
&r
, tree type
,
3146 const irange
&rh
) const
3148 if (empty_range_varying (r
, type
, lh
, rh
))
3151 // Return a non-null pointer of the LHS type (passed in op2).
3153 r
= range_zero (type
);
3154 else if (!lh
.contains_p (build_zero_cst (lh
.type ())))
3155 r
= range_nonzero (type
);
3157 r
.set_varying (type
);
3162 operator_addr_expr::op1_range (irange
&r
, tree type
,
3164 const irange
&op2
) const
3166 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
3170 class pointer_plus_operator
: public range_operator
3173 virtual void wi_fold (irange
&r
, tree type
,
3174 const wide_int
&lh_lb
,
3175 const wide_int
&lh_ub
,
3176 const wide_int
&rh_lb
,
3177 const wide_int
&rh_ub
) const;
3181 pointer_plus_operator::wi_fold (irange
&r
, tree type
,
3182 const wide_int
&lh_lb
,
3183 const wide_int
&lh_ub
,
3184 const wide_int
&rh_lb
,
3185 const wide_int
&rh_ub
) const
3187 // Check for [0,0] + const, and simply return the const.
3188 if (lh_lb
== 0 && lh_ub
== 0 && rh_lb
== rh_ub
)
3190 tree val
= wide_int_to_tree (type
, rh_lb
);
3195 // For pointer types, we are really only interested in asserting
3196 // whether the expression evaluates to non-NULL.
3198 // With -fno-delete-null-pointer-checks we need to be more
3199 // conservative. As some object might reside at address 0,
3200 // then some offset could be added to it and the same offset
3201 // subtracted again and the result would be NULL.
3203 // static int a[12]; where &a[0] is NULL and
3206 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
3207 // where the first range doesn't include zero and the second one
3208 // doesn't either. As the second operand is sizetype (unsigned),
3209 // consider all ranges where the MSB could be set as possible
3210 // subtractions where the result might be NULL.
3211 if ((!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3212 || !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3213 && !TYPE_OVERFLOW_WRAPS (type
)
3214 && (flag_delete_null_pointer_checks
3215 || !wi::sign_mask (rh_ub
)))
3216 r
= range_nonzero (type
);
3217 else if (lh_lb
== lh_ub
&& lh_lb
== 0
3218 && rh_lb
== rh_ub
&& rh_lb
== 0)
3219 r
= range_zero (type
);
3221 r
.set_varying (type
);
3225 class pointer_min_max_operator
: public range_operator
3228 virtual void wi_fold (irange
& r
, tree type
,
3229 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3230 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3234 pointer_min_max_operator::wi_fold (irange
&r
, tree type
,
3235 const wide_int
&lh_lb
,
3236 const wide_int
&lh_ub
,
3237 const wide_int
&rh_lb
,
3238 const wide_int
&rh_ub
) const
3240 // For MIN/MAX expressions with pointers, we only care about
3241 // nullness. If both are non null, then the result is nonnull.
3242 // If both are null, then the result is null. Otherwise they
3244 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3245 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3246 r
= range_nonzero (type
);
3247 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
3248 r
= range_zero (type
);
3250 r
.set_varying (type
);
3254 class pointer_and_operator
: public range_operator
3257 virtual void wi_fold (irange
&r
, tree type
,
3258 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3259 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3263 pointer_and_operator::wi_fold (irange
&r
, tree type
,
3264 const wide_int
&lh_lb
,
3265 const wide_int
&lh_ub
,
3266 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
3267 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
3269 // For pointer types, we are really only interested in asserting
3270 // whether the expression evaluates to non-NULL.
3271 if (wi_zero_p (type
, lh_lb
, lh_ub
) || wi_zero_p (type
, lh_lb
, lh_ub
))
3272 r
= range_zero (type
);
3274 r
.set_varying (type
);
3278 class pointer_or_operator
: public range_operator
3281 virtual bool op1_range (irange
&r
, tree type
,
3283 const irange
&op2
) const;
3284 virtual bool op2_range (irange
&r
, tree type
,
3286 const irange
&op1
) const;
3287 virtual void wi_fold (irange
&r
, tree type
,
3288 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3289 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3293 pointer_or_operator::op1_range (irange
&r
, tree type
,
3295 const irange
&op2 ATTRIBUTE_UNUSED
) const
3299 tree zero
= build_zero_cst (type
);
3300 r
= int_range
<1> (zero
, zero
);
3303 r
.set_varying (type
);
3308 pointer_or_operator::op2_range (irange
&r
, tree type
,
3310 const irange
&op1
) const
3312 return pointer_or_operator::op1_range (r
, type
, lhs
, op1
);
3316 pointer_or_operator::wi_fold (irange
&r
, tree type
,
3317 const wide_int
&lh_lb
,
3318 const wide_int
&lh_ub
,
3319 const wide_int
&rh_lb
,
3320 const wide_int
&rh_ub
) const
3322 // For pointer types, we are really only interested in asserting
3323 // whether the expression evaluates to non-NULL.
3324 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3325 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3326 r
= range_nonzero (type
);
3327 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
3328 r
= range_zero (type
);
3330 r
.set_varying (type
);
3333 // This implements the range operator tables as local objects in this file.
3335 class range_op_table
3338 inline range_operator
*operator[] (enum tree_code code
);
3340 void set (enum tree_code code
, range_operator
&op
);
3342 range_operator
*m_range_tree
[MAX_TREE_CODES
];
3345 // Return a pointer to the range_operator instance, if there is one
3346 // associated with tree_code CODE.
3349 range_op_table::operator[] (enum tree_code code
)
3351 gcc_checking_assert (code
> 0 && code
< MAX_TREE_CODES
);
3352 return m_range_tree
[code
];
3355 // Add OP to the handler table for CODE.
3358 range_op_table::set (enum tree_code code
, range_operator
&op
)
3360 gcc_checking_assert (m_range_tree
[code
] == NULL
);
3361 m_range_tree
[code
] = &op
;
3364 // Instantiate a range op table for integral operations.
3366 class integral_table
: public range_op_table
3370 } integral_tree_table
;
3372 integral_table::integral_table ()
3374 set (EQ_EXPR
, op_equal
);
3375 set (NE_EXPR
, op_not_equal
);
3376 set (LT_EXPR
, op_lt
);
3377 set (LE_EXPR
, op_le
);
3378 set (GT_EXPR
, op_gt
);
3379 set (GE_EXPR
, op_ge
);
3380 set (PLUS_EXPR
, op_plus
);
3381 set (MINUS_EXPR
, op_minus
);
3382 set (MIN_EXPR
, op_min
);
3383 set (MAX_EXPR
, op_max
);
3384 set (MULT_EXPR
, op_mult
);
3385 set (TRUNC_DIV_EXPR
, op_trunc_div
);
3386 set (FLOOR_DIV_EXPR
, op_floor_div
);
3387 set (ROUND_DIV_EXPR
, op_round_div
);
3388 set (CEIL_DIV_EXPR
, op_ceil_div
);
3389 set (EXACT_DIV_EXPR
, op_exact_div
);
3390 set (LSHIFT_EXPR
, op_lshift
);
3391 set (RSHIFT_EXPR
, op_rshift
);
3392 set (NOP_EXPR
, op_convert
);
3393 set (CONVERT_EXPR
, op_convert
);
3394 set (TRUTH_AND_EXPR
, op_logical_and
);
3395 set (BIT_AND_EXPR
, op_bitwise_and
);
3396 set (TRUTH_OR_EXPR
, op_logical_or
);
3397 set (BIT_IOR_EXPR
, op_bitwise_or
);
3398 set (BIT_XOR_EXPR
, op_bitwise_xor
);
3399 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
3400 set (TRUTH_NOT_EXPR
, op_logical_not
);
3401 set (BIT_NOT_EXPR
, op_bitwise_not
);
3402 set (INTEGER_CST
, op_integer_cst
);
3403 set (SSA_NAME
, op_identity
);
3404 set (PAREN_EXPR
, op_identity
);
3405 set (OBJ_TYPE_REF
, op_identity
);
3406 set (IMAGPART_EXPR
, op_unknown
);
3407 set (POINTER_DIFF_EXPR
, op_unknown
);
3408 set (ABS_EXPR
, op_abs
);
3409 set (ABSU_EXPR
, op_absu
);
3410 set (NEGATE_EXPR
, op_negate
);
3411 set (ADDR_EXPR
, op_addr
);
3414 // Instantiate a range op table for pointer operations.
3416 class pointer_table
: public range_op_table
3420 } pointer_tree_table
;
3422 pointer_table::pointer_table ()
3424 set (BIT_AND_EXPR
, op_pointer_and
);
3425 set (BIT_IOR_EXPR
, op_pointer_or
);
3426 set (MIN_EXPR
, op_ptr_min_max
);
3427 set (MAX_EXPR
, op_ptr_min_max
);
3428 set (POINTER_PLUS_EXPR
, op_pointer_plus
);
3430 set (EQ_EXPR
, op_equal
);
3431 set (NE_EXPR
, op_not_equal
);
3432 set (LT_EXPR
, op_lt
);
3433 set (LE_EXPR
, op_le
);
3434 set (GT_EXPR
, op_gt
);
3435 set (GE_EXPR
, op_ge
);
3436 set (SSA_NAME
, op_identity
);
3437 set (INTEGER_CST
, op_integer_cst
);
3438 set (ADDR_EXPR
, op_addr
);
3439 set (NOP_EXPR
, op_convert
);
3440 set (CONVERT_EXPR
, op_convert
);
3442 set (BIT_NOT_EXPR
, op_bitwise_not
);
3443 set (BIT_XOR_EXPR
, op_bitwise_xor
);
3446 // The tables are hidden and accessed via a simple extern function.
3449 range_op_handler (enum tree_code code
, tree type
)
3451 // First check if there is a pointer specialization.
3452 if (POINTER_TYPE_P (type
))
3453 return pointer_tree_table
[code
];
3454 if (INTEGRAL_TYPE_P (type
))
3455 return integral_tree_table
[code
];
3459 // Cast the range in R to TYPE.
3462 range_cast (irange
&r
, tree type
)
3464 int_range_max tmp
= r
;
3465 range_operator
*op
= range_op_handler (CONVERT_EXPR
, type
);
3466 // Call op_convert, if it fails, the result is varying.
3467 if (!op
->fold_range (r
, type
, tmp
, int_range
<1> (type
)))
3468 r
.set_varying (type
);
3472 #include "selftest.h"
3476 #define INT(N) build_int_cst (integer_type_node, (N))
3477 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
3478 #define INT16(N) build_int_cst (short_integer_type_node, (N))
3479 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
3480 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
3481 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
3484 range_op_cast_tests ()
3486 int_range
<1> r0
, r1
, r2
, rold
;
3487 r0
.set_varying (integer_type_node
);
3488 tree maxint
= wide_int_to_tree (integer_type_node
, r0
.upper_bound ());
3490 // If a range is in any way outside of the range for the converted
3491 // to range, default to the range for the new type.
3492 r0
.set_varying (short_integer_type_node
);
3493 tree minshort
= wide_int_to_tree (short_integer_type_node
, r0
.lower_bound ());
3494 tree maxshort
= wide_int_to_tree (short_integer_type_node
, r0
.upper_bound ());
3495 if (TYPE_PRECISION (TREE_TYPE (maxint
))
3496 > TYPE_PRECISION (short_integer_type_node
))
3498 r1
= int_range
<1> (integer_zero_node
, maxint
);
3499 range_cast (r1
, short_integer_type_node
);
3500 ASSERT_TRUE (r1
.lower_bound () == wi::to_wide (minshort
)
3501 && r1
.upper_bound() == wi::to_wide (maxshort
));
3504 // (unsigned char)[-5,-1] => [251,255].
3505 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (-1));
3506 range_cast (r0
, unsigned_char_type_node
);
3507 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (251), UCHAR (255)));
3508 range_cast (r0
, signed_char_type_node
);
3509 ASSERT_TRUE (r0
== rold
);
3511 // (signed char)[15, 150] => [-128,-106][15,127].
3512 r0
= rold
= int_range
<1> (UCHAR (15), UCHAR (150));
3513 range_cast (r0
, signed_char_type_node
);
3514 r1
= int_range
<1> (SCHAR (15), SCHAR (127));
3515 r2
= int_range
<1> (SCHAR (-128), SCHAR (-106));
3517 ASSERT_TRUE (r1
== r0
);
3518 range_cast (r0
, unsigned_char_type_node
);
3519 ASSERT_TRUE (r0
== rold
);
3521 // (unsigned char)[-5, 5] => [0,5][251,255].
3522 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (5));
3523 range_cast (r0
, unsigned_char_type_node
);
3524 r1
= int_range
<1> (UCHAR (251), UCHAR (255));
3525 r2
= int_range
<1> (UCHAR (0), UCHAR (5));
3527 ASSERT_TRUE (r0
== r1
);
3528 range_cast (r0
, signed_char_type_node
);
3529 ASSERT_TRUE (r0
== rold
);
3531 // (unsigned char)[-5,5] => [0,5][251,255].
3532 r0
= int_range
<1> (INT (-5), INT (5));
3533 range_cast (r0
, unsigned_char_type_node
);
3534 r1
= int_range
<1> (UCHAR (0), UCHAR (5));
3535 r1
.union_ (int_range
<1> (UCHAR (251), UCHAR (255)));
3536 ASSERT_TRUE (r0
== r1
);
3538 // (unsigned char)[5U,1974U] => [0,255].
3539 r0
= int_range
<1> (UINT (5), UINT (1974));
3540 range_cast (r0
, unsigned_char_type_node
);
3541 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (0), UCHAR (255)));
3542 range_cast (r0
, integer_type_node
);
3543 // Going to a wider range should not sign extend.
3544 ASSERT_TRUE (r0
== int_range
<1> (INT (0), INT (255)));
3546 // (unsigned char)[-350,15] => [0,255].
3547 r0
= int_range
<1> (INT (-350), INT (15));
3548 range_cast (r0
, unsigned_char_type_node
);
3549 ASSERT_TRUE (r0
== (int_range
<1>
3550 (TYPE_MIN_VALUE (unsigned_char_type_node
),
3551 TYPE_MAX_VALUE (unsigned_char_type_node
))));
3553 // Casting [-120,20] from signed char to unsigned short.
3554 // => [0, 20][0xff88, 0xffff].
3555 r0
= int_range
<1> (SCHAR (-120), SCHAR (20));
3556 range_cast (r0
, short_unsigned_type_node
);
3557 r1
= int_range
<1> (UINT16 (0), UINT16 (20));
3558 r2
= int_range
<1> (UINT16 (0xff88), UINT16 (0xffff));
3560 ASSERT_TRUE (r0
== r1
);
3561 // A truncating cast back to signed char will work because [-120, 20]
3562 // is representable in signed char.
3563 range_cast (r0
, signed_char_type_node
);
3564 ASSERT_TRUE (r0
== int_range
<1> (SCHAR (-120), SCHAR (20)));
3566 // unsigned char -> signed short
3567 // (signed short)[(unsigned char)25, (unsigned char)250]
3568 // => [(signed short)25, (signed short)250]
3569 r0
= rold
= int_range
<1> (UCHAR (25), UCHAR (250));
3570 range_cast (r0
, short_integer_type_node
);
3571 r1
= int_range
<1> (INT16 (25), INT16 (250));
3572 ASSERT_TRUE (r0
== r1
);
3573 range_cast (r0
, unsigned_char_type_node
);
3574 ASSERT_TRUE (r0
== rold
);
3576 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
3577 r0
= int_range
<1> (TYPE_MIN_VALUE (long_long_integer_type_node
),
3578 TYPE_MAX_VALUE (long_long_integer_type_node
));
3579 range_cast (r0
, short_unsigned_type_node
);
3580 r1
= int_range
<1> (TYPE_MIN_VALUE (short_unsigned_type_node
),
3581 TYPE_MAX_VALUE (short_unsigned_type_node
));
3582 ASSERT_TRUE (r0
== r1
);
3584 // Casting NONZERO to a narrower type will wrap/overflow so
3585 // it's just the entire range for the narrower type.
3587 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
3588 // is outside of the range of a smaller range, return the full
3590 if (TYPE_PRECISION (integer_type_node
)
3591 > TYPE_PRECISION (short_integer_type_node
))
3593 r0
= range_nonzero (integer_type_node
);
3594 range_cast (r0
, short_integer_type_node
);
3595 r1
= int_range
<1> (TYPE_MIN_VALUE (short_integer_type_node
),
3596 TYPE_MAX_VALUE (short_integer_type_node
));
3597 ASSERT_TRUE (r0
== r1
);
3600 // Casting NONZERO from a narrower signed to a wider signed.
3602 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
3603 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
3604 r0
= range_nonzero (short_integer_type_node
);
3605 range_cast (r0
, integer_type_node
);
3606 r1
= int_range
<1> (INT (-32768), INT (-1));
3607 r2
= int_range
<1> (INT (1), INT (32767));
3609 ASSERT_TRUE (r0
== r1
);
3613 range_op_lshift_tests ()
3615 // Test that 0x808.... & 0x8.... still contains 0x8....
3616 // for a large set of numbers.
3619 tree big_type
= long_long_unsigned_type_node
;
3620 // big_num = 0x808,0000,0000,0000
3621 tree big_num
= fold_build2 (LSHIFT_EXPR
, big_type
,
3622 build_int_cst (big_type
, 0x808),
3623 build_int_cst (big_type
, 48));
3624 op_bitwise_and
.fold_range (res
, big_type
,
3625 int_range
<1> (big_type
),
3626 int_range
<1> (big_num
, big_num
));
3627 // val = 0x8,0000,0000,0000
3628 tree val
= fold_build2 (LSHIFT_EXPR
, big_type
,
3629 build_int_cst (big_type
, 0x8),
3630 build_int_cst (big_type
, 48));
3631 ASSERT_TRUE (res
.contains_p (val
));
3634 if (TYPE_PRECISION (unsigned_type_node
) > 31)
3636 // unsigned VARYING = op1 << 1 should be VARYING.
3637 int_range
<2> lhs (unsigned_type_node
);
3638 int_range
<2> shift (INT (1), INT (1));
3640 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
3641 ASSERT_TRUE (op1
.varying_p ());
3643 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
3644 int_range
<2> zero (UINT (0), UINT (0));
3645 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
3646 ASSERT_TRUE (op1
.num_pairs () == 2);
3647 // Remove the [0,0] range.
3648 op1
.intersect (zero
);
3649 ASSERT_TRUE (op1
.num_pairs () == 1);
3650 // op1 << 1 should be [0x8000,0x8000] << 1,
3651 // which should result in [0,0].
3652 int_range_max result
;
3653 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
3654 ASSERT_TRUE (result
== zero
);
3656 // signed VARYING = op1 << 1 should be VARYING.
3657 if (TYPE_PRECISION (integer_type_node
) > 31)
3659 // unsigned VARYING = op1 << 1 hould be VARYING.
3660 int_range
<2> lhs (integer_type_node
);
3661 int_range
<2> shift (INT (1), INT (1));
3663 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
3664 ASSERT_TRUE (op1
.varying_p ());
3666 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
3667 int_range
<2> zero (INT (0), INT (0));
3668 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
3669 ASSERT_TRUE (op1
.num_pairs () == 2);
3670 // Remove the [0,0] range.
3671 op1
.intersect (zero
);
3672 ASSERT_TRUE (op1
.num_pairs () == 1);
3673 // op1 << 1 shuould be [0x8000,0x8000] << 1,
3674 // which should result in [0,0].
3675 int_range_max result
;
3676 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
3677 ASSERT_TRUE (result
== zero
);
3682 range_op_rshift_tests ()
3684 // unsigned: [3, MAX] = OP1 >> 1
3686 int_range_max
lhs (build_int_cst (unsigned_type_node
, 3),
3687 TYPE_MAX_VALUE (unsigned_type_node
));
3688 int_range_max
one (build_one_cst (unsigned_type_node
),
3689 build_one_cst (unsigned_type_node
));
3691 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
3692 ASSERT_FALSE (op1
.contains_p (UINT (3)));
3695 // signed: [3, MAX] = OP1 >> 1
3697 int_range_max
lhs (INT (3), TYPE_MAX_VALUE (integer_type_node
));
3698 int_range_max
one (INT (1), INT (1));
3700 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
3701 ASSERT_FALSE (op1
.contains_p (INT (-2)));
3704 // This is impossible, so OP1 should be [].
3705 // signed: [MIN, MIN] = OP1 >> 1
3707 int_range_max
lhs (TYPE_MIN_VALUE (integer_type_node
),
3708 TYPE_MIN_VALUE (integer_type_node
));
3709 int_range_max
one (INT (1), INT (1));
3711 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
3712 ASSERT_TRUE (op1
.undefined_p ());
3715 // signed: ~[-1] = OP1 >> 31
3716 if (TYPE_PRECISION (integer_type_node
) > 31)
3718 int_range_max
lhs (INT (-1), INT (-1), VR_ANTI_RANGE
);
3719 int_range_max
shift (INT (31), INT (31));
3721 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
3722 int_range_max negatives
= range_negatives (integer_type_node
);
3723 negatives
.intersect (op1
);
3724 ASSERT_TRUE (negatives
.undefined_p ());
3729 range_op_bitwise_and_tests ()
3732 tree min
= vrp_val_min (integer_type_node
);
3733 tree max
= vrp_val_max (integer_type_node
);
3734 tree tiny
= fold_build2 (PLUS_EXPR
, integer_type_node
, min
,
3735 build_one_cst (integer_type_node
));
3736 int_range_max
i1 (tiny
, max
);
3737 int_range_max
i2 (build_int_cst (integer_type_node
, 255),
3738 build_int_cst (integer_type_node
, 255));
3740 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
3741 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
3742 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
3744 // VARYING = OP1 & 255: OP1 is VARYING
3745 i1
= int_range
<1> (integer_type_node
);
3746 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
3747 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
3753 range_op_rshift_tests ();
3754 range_op_lshift_tests ();
3755 range_op_bitwise_and_tests ();
3756 range_op_cast_tests ();
3759 } // namespace selftest
3761 #endif // CHECKING_P