1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
51 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
54 irange_to_masked_value (const irange
&r
, widest_int
&value
, widest_int
&mask
)
59 value
= widest_int::from (r
.lower_bound (), TYPE_SIGN (r
.type ()));
63 mask
= widest_int::from (r
.get_nonzero_bits (), TYPE_SIGN (r
.type ()));
68 // Update the known bitmasks in R when applying the operation CODE to
72 update_known_bitmask (irange
&r
, tree_code code
,
73 const irange
&lh
, const irange
&rh
)
75 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ())
78 widest_int value
, mask
, lh_mask
, rh_mask
, lh_value
, rh_value
;
79 tree type
= r
.type ();
80 signop sign
= TYPE_SIGN (type
);
81 int prec
= TYPE_PRECISION (type
);
82 signop lh_sign
= TYPE_SIGN (lh
.type ());
83 signop rh_sign
= TYPE_SIGN (rh
.type ());
84 int lh_prec
= TYPE_PRECISION (lh
.type ());
85 int rh_prec
= TYPE_PRECISION (rh
.type ());
87 irange_to_masked_value (lh
, lh_value
, lh_mask
);
88 irange_to_masked_value (rh
, rh_value
, rh_mask
);
89 bit_value_binop (code
, sign
, prec
, &value
, &mask
,
90 lh_sign
, lh_prec
, lh_value
, lh_mask
,
91 rh_sign
, rh_prec
, rh_value
, rh_mask
);
92 r
.set_nonzero_bits (value
| mask
);
95 // Return the upper limit for a type.
97 static inline wide_int
98 max_limit (const_tree type
)
100 return wi::max_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
103 // Return the lower limit for a type.
105 static inline wide_int
106 min_limit (const_tree type
)
108 return wi::min_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
111 // Return false if shifting by OP is undefined behavior. Otherwise, return
112 // true and the range it is to be shifted by. This allows trimming out of
113 // undefined ranges, leaving only valid ranges if there are any.
116 get_shift_range (irange
&r
, tree type
, const irange
&op
)
118 if (op
.undefined_p ())
121 // Build valid range and intersect it with the shift range.
122 r
= value_range (build_int_cst_type (op
.type (), 0),
123 build_int_cst_type (op
.type (), TYPE_PRECISION (type
) - 1));
126 // If there are no valid ranges in the shift range, returned false.
127 if (r
.undefined_p ())
132 // Return TRUE if 0 is within [WMIN, WMAX].
135 wi_includes_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
137 signop sign
= TYPE_SIGN (type
);
138 return wi::le_p (wmin
, 0, sign
) && wi::ge_p (wmax
, 0, sign
);
141 // Return TRUE if [WMIN, WMAX] is the singleton 0.
144 wi_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
146 unsigned prec
= TYPE_PRECISION (type
);
147 return wmin
== wmax
&& wi::eq_p (wmin
, wi::zero (prec
));
150 // Default wide_int fold operation returns [MIN, MAX].
153 range_operator::wi_fold (irange
&r
, tree type
,
154 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
155 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
156 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
157 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
159 gcc_checking_assert (r
.supports_type_p (type
));
160 r
.set_varying (type
);
163 // Call wi_fold, except further split small subranges into constants.
164 // This can provide better precision. For something 8 >> [0,1]
165 // Instead of [8, 16], we will produce [8,8][16,16]
168 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
169 const wide_int
&lh_lb
,
170 const wide_int
&lh_ub
,
171 const wide_int
&rh_lb
,
172 const wide_int
&rh_ub
) const
175 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
176 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
177 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
178 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
179 // If there are 2, 3, or 4 values in the RH range, do them separately.
180 // Call wi_fold_in_parts to check the RH side.
181 if (rh_range
> 0 && rh_range
< 4)
183 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
186 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
190 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
194 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
197 // Otherise check for 2, 3, or 4 values in the LH range and split them up.
198 // The RH side has been checked, so no recursion needed.
199 else if (lh_range
> 0 && lh_range
< 4)
201 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
204 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
208 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
212 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
215 // Otherwise just call wi_fold.
217 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
220 // The default for fold is to break all ranges into sub-ranges and
221 // invoke the wi_fold method on each sub-range pair.
224 range_operator::fold_range (irange
&r
, tree type
,
227 relation_trio trio
) const
229 gcc_checking_assert (r
.supports_type_p (type
));
230 if (empty_range_varying (r
, type
, lh
, rh
))
233 relation_kind rel
= trio
.op1_op2 ();
234 unsigned num_lh
= lh
.num_pairs ();
235 unsigned num_rh
= rh
.num_pairs ();
237 // If both ranges are single pairs, fold directly into the result range.
238 // If the number of subranges grows too high, produce a summary result as the
239 // loop becomes exponential with little benefit. See PR 103821.
240 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
242 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
243 rh
.lower_bound (), rh
.upper_bound ());
244 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
245 update_known_bitmask (r
, m_code
, lh
, rh
);
251 for (unsigned x
= 0; x
< num_lh
; ++x
)
252 for (unsigned y
= 0; y
< num_rh
; ++y
)
254 wide_int lh_lb
= lh
.lower_bound (x
);
255 wide_int lh_ub
= lh
.upper_bound (x
);
256 wide_int rh_lb
= rh
.lower_bound (y
);
257 wide_int rh_ub
= rh
.upper_bound (y
);
258 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
262 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
263 update_known_bitmask (r
, m_code
, lh
, rh
);
267 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
268 update_known_bitmask (r
, m_code
, lh
, rh
);
272 // The default for op1_range is to return false.
275 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
276 tree type ATTRIBUTE_UNUSED
,
277 const irange
&lhs ATTRIBUTE_UNUSED
,
278 const irange
&op2 ATTRIBUTE_UNUSED
,
284 // The default for op2_range is to return false.
287 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
288 tree type ATTRIBUTE_UNUSED
,
289 const irange
&lhs ATTRIBUTE_UNUSED
,
290 const irange
&op1 ATTRIBUTE_UNUSED
,
296 // The default relation routines return VREL_VARYING.
299 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
300 const irange
&op1 ATTRIBUTE_UNUSED
,
301 const irange
&op2 ATTRIBUTE_UNUSED
,
302 relation_kind rel ATTRIBUTE_UNUSED
) const
308 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
309 const irange
&op1 ATTRIBUTE_UNUSED
,
310 const irange
&op2 ATTRIBUTE_UNUSED
,
311 relation_kind rel ATTRIBUTE_UNUSED
) const
317 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
) const
322 // Default is no relation affects the LHS.
325 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
326 tree type ATTRIBUTE_UNUSED
,
327 const irange
&op1_range ATTRIBUTE_UNUSED
,
328 const irange
&op2_range ATTRIBUTE_UNUSED
,
329 relation_kind rel ATTRIBUTE_UNUSED
) const
334 // Create and return a range from a pair of wide-ints that are known
335 // to have overflowed (or underflowed).
338 value_range_from_overflowed_bounds (irange
&r
, tree type
,
339 const wide_int
&wmin
,
340 const wide_int
&wmax
)
342 const signop sgn
= TYPE_SIGN (type
);
343 const unsigned int prec
= TYPE_PRECISION (type
);
345 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
346 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
351 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
354 if (wi::cmp (tmax
, tem
, sgn
) > 0)
357 // If the anti-range would cover nothing, drop to varying.
358 // Likewise if the anti-range bounds are outside of the types
360 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
361 r
.set_varying (type
);
364 tree tree_min
= wide_int_to_tree (type
, tmin
);
365 tree tree_max
= wide_int_to_tree (type
, tmax
);
366 r
.set (tree_min
, tree_max
, VR_ANTI_RANGE
);
370 // Create and return a range from a pair of wide-ints. MIN_OVF and
371 // MAX_OVF describe any overflow that might have occurred while
372 // calculating WMIN and WMAX respectively.
375 value_range_with_overflow (irange
&r
, tree type
,
376 const wide_int
&wmin
, const wide_int
&wmax
,
377 wi::overflow_type min_ovf
= wi::OVF_NONE
,
378 wi::overflow_type max_ovf
= wi::OVF_NONE
)
380 const signop sgn
= TYPE_SIGN (type
);
381 const unsigned int prec
= TYPE_PRECISION (type
);
382 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
384 // For one bit precision if max != min, then the range covers all
386 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
388 r
.set_varying (type
);
394 // If overflow wraps, truncate the values and adjust the range,
395 // kind, and bounds appropriately.
396 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
398 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
399 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
400 // If the limits are swapped, we wrapped around and cover
402 if (wi::gt_p (tmin
, tmax
, sgn
))
403 r
.set_varying (type
);
405 // No overflow or both overflow or underflow. The range
406 // kind stays normal.
407 r
.set (wide_int_to_tree (type
, tmin
),
408 wide_int_to_tree (type
, tmax
));
412 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
413 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
414 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
416 // Other underflow and/or overflow, drop to VR_VARYING.
417 r
.set_varying (type
);
421 // If both bounds either underflowed or overflowed, then the result
423 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
424 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
430 // If overflow does not wrap, saturate to [MIN, MAX].
431 wide_int new_lb
, new_ub
;
432 if (min_ovf
== wi::OVF_UNDERFLOW
)
433 new_lb
= wi::min_value (prec
, sgn
);
434 else if (min_ovf
== wi::OVF_OVERFLOW
)
435 new_lb
= wi::max_value (prec
, sgn
);
439 if (max_ovf
== wi::OVF_UNDERFLOW
)
440 new_ub
= wi::min_value (prec
, sgn
);
441 else if (max_ovf
== wi::OVF_OVERFLOW
)
442 new_ub
= wi::max_value (prec
, sgn
);
446 r
.set (wide_int_to_tree (type
, new_lb
),
447 wide_int_to_tree (type
, new_ub
));
451 // Create and return a range from a pair of wide-ints. Canonicalize
452 // the case where the bounds are swapped. In which case, we transform
453 // [10,5] into [MIN,5][10,MAX].
456 create_possibly_reversed_range (irange
&r
, tree type
,
457 const wide_int
&new_lb
, const wide_int
&new_ub
)
459 signop s
= TYPE_SIGN (type
);
460 // If the bounds are swapped, treat the result as if an overflow occured.
461 if (wi::gt_p (new_lb
, new_ub
, s
))
462 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
464 // Otherwise it's just a normal range.
465 r
.set (wide_int_to_tree (type
, new_lb
), wide_int_to_tree (type
, new_ub
));
468 // Return the summary information about boolean range LHS. If EMPTY/FULL,
469 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
472 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
474 // If there is no result, then this is unexecutable.
475 if (lhs
.undefined_p ())
484 // For TRUE, we can't just test for [1,1] because Ada can have
485 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
486 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
488 r
.set_varying (val_type
);
496 class operator_equal
: public range_operator
498 using range_operator::fold_range
;
499 using range_operator::op1_range
;
500 using range_operator::op2_range
;
502 virtual bool fold_range (irange
&r
, tree type
,
505 relation_trio
= TRIO_VARYING
) const;
506 virtual bool op1_range (irange
&r
, tree type
,
509 relation_trio
= TRIO_VARYING
) const;
510 virtual bool op2_range (irange
&r
, tree type
,
513 relation_trio
= TRIO_VARYING
) const;
514 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
517 // Check if the LHS range indicates a relation between OP1 and OP2.
520 equal_op1_op2_relation (const irange
&lhs
)
522 if (lhs
.undefined_p ())
523 return VREL_UNDEFINED
;
525 // FALSE = op1 == op2 indicates NE_EXPR.
529 // TRUE = op1 == op2 indicates EQ_EXPR.
530 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
536 operator_equal::op1_op2_relation (const irange
&lhs
) const
538 return equal_op1_op2_relation (lhs
);
543 operator_equal::fold_range (irange
&r
, tree type
,
546 relation_trio rel
) const
548 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
551 // We can be sure the values are always equal or not if both ranges
552 // consist of a single value, and then compare them.
553 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
554 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
556 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
557 r
= range_true (type
);
559 r
= range_false (type
);
563 // If ranges do not intersect, we know the range is not equal,
564 // otherwise we don't know anything for sure.
565 int_range_max tmp
= op1
;
567 if (tmp
.undefined_p ())
568 r
= range_false (type
);
570 r
= range_true_and_false (type
);
576 operator_equal::op1_range (irange
&r
, tree type
,
581 switch (get_bool_state (r
, lhs
, type
))
584 // If it's true, the result is the same as OP2.
589 // If the result is false, the only time we know anything is
590 // if OP2 is a constant.
591 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
597 r
.set_varying (type
);
607 operator_equal::op2_range (irange
&r
, tree type
,
610 relation_trio rel
) const
612 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
615 class operator_not_equal
: public range_operator
617 using range_operator::fold_range
;
618 using range_operator::op1_range
;
619 using range_operator::op2_range
;
621 virtual bool fold_range (irange
&r
, tree type
,
624 relation_trio
= TRIO_VARYING
) const;
625 virtual bool op1_range (irange
&r
, tree type
,
628 relation_trio
= TRIO_VARYING
) const;
629 virtual bool op2_range (irange
&r
, tree type
,
632 relation_trio
= TRIO_VARYING
) const;
633 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
636 // Check if the LHS range indicates a relation between OP1 and OP2.
639 not_equal_op1_op2_relation (const irange
&lhs
)
641 if (lhs
.undefined_p ())
642 return VREL_UNDEFINED
;
644 // FALSE = op1 != op2 indicates EQ_EXPR.
648 // TRUE = op1 != op2 indicates NE_EXPR.
649 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
655 operator_not_equal::op1_op2_relation (const irange
&lhs
) const
657 return not_equal_op1_op2_relation (lhs
);
661 operator_not_equal::fold_range (irange
&r
, tree type
,
664 relation_trio rel
) const
666 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
669 // We can be sure the values are always equal or not if both ranges
670 // consist of a single value, and then compare them.
671 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
672 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
674 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
675 r
= range_true (type
);
677 r
= range_false (type
);
681 // If ranges do not intersect, we know the range is not equal,
682 // otherwise we don't know anything for sure.
683 int_range_max tmp
= op1
;
685 if (tmp
.undefined_p ())
686 r
= range_true (type
);
688 r
= range_true_and_false (type
);
694 operator_not_equal::op1_range (irange
&r
, tree type
,
699 switch (get_bool_state (r
, lhs
, type
))
702 // If the result is true, the only time we know anything is if
703 // OP2 is a constant.
704 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
710 r
.set_varying (type
);
714 // If it's false, the result is the same as OP2.
726 operator_not_equal::op2_range (irange
&r
, tree type
,
729 relation_trio rel
) const
731 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
734 // (X < VAL) produces the range of [MIN, VAL - 1].
737 build_lt (irange
&r
, tree type
, const wide_int
&val
)
739 wi::overflow_type ov
;
741 signop sgn
= TYPE_SIGN (type
);
743 // Signed 1 bit cannot represent 1 for subtraction.
745 lim
= wi::add (val
, -1, sgn
, &ov
);
747 lim
= wi::sub (val
, 1, sgn
, &ov
);
749 // If val - 1 underflows, check if X < MIN, which is an empty range.
753 r
= int_range
<1> (type
, min_limit (type
), lim
);
756 // (X <= VAL) produces the range of [MIN, VAL].
759 build_le (irange
&r
, tree type
, const wide_int
&val
)
761 r
= int_range
<1> (type
, min_limit (type
), val
);
764 // (X > VAL) produces the range of [VAL + 1, MAX].
767 build_gt (irange
&r
, tree type
, const wide_int
&val
)
769 wi::overflow_type ov
;
771 signop sgn
= TYPE_SIGN (type
);
773 // Signed 1 bit cannot represent 1 for addition.
775 lim
= wi::sub (val
, -1, sgn
, &ov
);
777 lim
= wi::add (val
, 1, sgn
, &ov
);
778 // If val + 1 overflows, check is for X > MAX, which is an empty range.
782 r
= int_range
<1> (type
, lim
, max_limit (type
));
785 // (X >= val) produces the range of [VAL, MAX].
788 build_ge (irange
&r
, tree type
, const wide_int
&val
)
790 r
= int_range
<1> (type
, val
, max_limit (type
));
794 class operator_lt
: public range_operator
796 using range_operator::fold_range
;
797 using range_operator::op1_range
;
798 using range_operator::op2_range
;
800 virtual bool fold_range (irange
&r
, tree type
,
803 relation_trio
= TRIO_VARYING
) const;
804 virtual bool op1_range (irange
&r
, tree type
,
807 relation_trio
= TRIO_VARYING
) const;
808 virtual bool op2_range (irange
&r
, tree type
,
811 relation_trio
= TRIO_VARYING
) const;
812 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
815 // Check if the LHS range indicates a relation between OP1 and OP2.
818 lt_op1_op2_relation (const irange
&lhs
)
820 if (lhs
.undefined_p ())
821 return VREL_UNDEFINED
;
823 // FALSE = op1 < op2 indicates GE_EXPR.
827 // TRUE = op1 < op2 indicates LT_EXPR.
828 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
834 operator_lt::op1_op2_relation (const irange
&lhs
) const
836 return lt_op1_op2_relation (lhs
);
840 operator_lt::fold_range (irange
&r
, tree type
,
843 relation_trio rel
) const
845 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
848 signop sign
= TYPE_SIGN (op1
.type ());
849 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
851 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
852 r
= range_true (type
);
853 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
854 r
= range_false (type
);
855 // Use nonzero bits to determine if < 0 is false.
856 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
857 r
= range_false (type
);
859 r
= range_true_and_false (type
);
864 operator_lt::op1_range (irange
&r
, tree type
,
869 switch (get_bool_state (r
, lhs
, type
))
872 build_lt (r
, type
, op2
.upper_bound ());
876 build_ge (r
, type
, op2
.lower_bound ());
886 operator_lt::op2_range (irange
&r
, tree type
,
891 switch (get_bool_state (r
, lhs
, type
))
894 build_gt (r
, type
, op1
.lower_bound ());
898 build_le (r
, type
, op1
.upper_bound ());
908 class operator_le
: public range_operator
910 using range_operator::fold_range
;
911 using range_operator::op1_range
;
912 using range_operator::op2_range
;
914 virtual bool fold_range (irange
&r
, tree type
,
917 relation_trio
= TRIO_VARYING
) const;
918 virtual bool op1_range (irange
&r
, tree type
,
921 relation_trio
= TRIO_VARYING
) const;
922 virtual bool op2_range (irange
&r
, tree type
,
925 relation_trio
= TRIO_VARYING
) const;
926 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
929 // Check if the LHS range indicates a relation between OP1 and OP2.
932 le_op1_op2_relation (const irange
&lhs
)
934 if (lhs
.undefined_p ())
935 return VREL_UNDEFINED
;
937 // FALSE = op1 <= op2 indicates GT_EXPR.
941 // TRUE = op1 <= op2 indicates LE_EXPR.
942 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
948 operator_le::op1_op2_relation (const irange
&lhs
) const
950 return le_op1_op2_relation (lhs
);
954 operator_le::fold_range (irange
&r
, tree type
,
957 relation_trio rel
) const
959 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
962 signop sign
= TYPE_SIGN (op1
.type ());
963 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
965 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
966 r
= range_true (type
);
967 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
968 r
= range_false (type
);
970 r
= range_true_and_false (type
);
975 operator_le::op1_range (irange
&r
, tree type
,
980 switch (get_bool_state (r
, lhs
, type
))
983 build_le (r
, type
, op2
.upper_bound ());
987 build_gt (r
, type
, op2
.lower_bound ());
997 operator_le::op2_range (irange
&r
, tree type
,
1000 relation_trio
) const
1002 switch (get_bool_state (r
, lhs
, type
))
1005 build_ge (r
, type
, op1
.lower_bound ());
1009 build_lt (r
, type
, op1
.upper_bound ());
1019 class operator_gt
: public range_operator
1021 using range_operator::fold_range
;
1022 using range_operator::op1_range
;
1023 using range_operator::op2_range
;
1025 virtual bool fold_range (irange
&r
, tree type
,
1028 relation_trio
= TRIO_VARYING
) const;
1029 virtual bool op1_range (irange
&r
, tree type
,
1032 relation_trio
= TRIO_VARYING
) const;
1033 virtual bool op2_range (irange
&r
, tree type
,
1036 relation_trio
= TRIO_VARYING
) const;
1037 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1040 // Check if the LHS range indicates a relation between OP1 and OP2.
1043 gt_op1_op2_relation (const irange
&lhs
)
1045 if (lhs
.undefined_p ())
1046 return VREL_UNDEFINED
;
1048 // FALSE = op1 > op2 indicates LE_EXPR.
1052 // TRUE = op1 > op2 indicates GT_EXPR.
1053 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1055 return VREL_VARYING
;
1059 operator_gt::op1_op2_relation (const irange
&lhs
) const
1061 return gt_op1_op2_relation (lhs
);
1066 operator_gt::fold_range (irange
&r
, tree type
,
1067 const irange
&op1
, const irange
&op2
,
1068 relation_trio rel
) const
1070 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1073 signop sign
= TYPE_SIGN (op1
.type ());
1074 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1076 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1077 r
= range_true (type
);
1078 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1079 r
= range_false (type
);
1081 r
= range_true_and_false (type
);
1086 operator_gt::op1_range (irange
&r
, tree type
,
1087 const irange
&lhs
, const irange
&op2
,
1088 relation_trio
) const
1090 switch (get_bool_state (r
, lhs
, type
))
1093 build_gt (r
, type
, op2
.lower_bound ());
1097 build_le (r
, type
, op2
.upper_bound ());
1107 operator_gt::op2_range (irange
&r
, tree type
,
1110 relation_trio
) const
1112 switch (get_bool_state (r
, lhs
, type
))
1115 build_lt (r
, type
, op1
.upper_bound ());
1119 build_ge (r
, type
, op1
.lower_bound ());
1129 class operator_ge
: public range_operator
1131 using range_operator::fold_range
;
1132 using range_operator::op1_range
;
1133 using range_operator::op2_range
;
1135 virtual bool fold_range (irange
&r
, tree type
,
1138 relation_trio
= TRIO_VARYING
) const;
1139 virtual bool op1_range (irange
&r
, tree type
,
1142 relation_trio
= TRIO_VARYING
) const;
1143 virtual bool op2_range (irange
&r
, tree type
,
1146 relation_trio
= TRIO_VARYING
) const;
1147 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1150 // Check if the LHS range indicates a relation between OP1 and OP2.
1153 ge_op1_op2_relation (const irange
&lhs
)
1155 if (lhs
.undefined_p ())
1156 return VREL_UNDEFINED
;
1158 // FALSE = op1 >= op2 indicates LT_EXPR.
1162 // TRUE = op1 >= op2 indicates GE_EXPR.
1163 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1165 return VREL_VARYING
;
1169 operator_ge::op1_op2_relation (const irange
&lhs
) const
1171 return ge_op1_op2_relation (lhs
);
1175 operator_ge::fold_range (irange
&r
, tree type
,
1178 relation_trio rel
) const
1180 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1183 signop sign
= TYPE_SIGN (op1
.type ());
1184 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1186 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1187 r
= range_true (type
);
1188 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1189 r
= range_false (type
);
1191 r
= range_true_and_false (type
);
1196 operator_ge::op1_range (irange
&r
, tree type
,
1199 relation_trio
) const
1201 switch (get_bool_state (r
, lhs
, type
))
1204 build_ge (r
, type
, op2
.lower_bound ());
1208 build_lt (r
, type
, op2
.upper_bound ());
1218 operator_ge::op2_range (irange
&r
, tree type
,
1221 relation_trio
) const
1223 switch (get_bool_state (r
, lhs
, type
))
1226 build_le (r
, type
, op1
.upper_bound ());
1230 build_gt (r
, type
, op1
.lower_bound ());
1240 class operator_plus
: public range_operator
1242 using range_operator::op1_range
;
1243 using range_operator::op2_range
;
1244 using range_operator::lhs_op1_relation
;
1245 using range_operator::lhs_op2_relation
;
1247 virtual bool op1_range (irange
&r
, tree type
,
1250 relation_trio
) const;
1251 virtual bool op2_range (irange
&r
, tree type
,
1254 relation_trio
) const;
1255 virtual void wi_fold (irange
&r
, tree type
,
1256 const wide_int
&lh_lb
,
1257 const wide_int
&lh_ub
,
1258 const wide_int
&rh_lb
,
1259 const wide_int
&rh_ub
) const;
1260 virtual relation_kind
lhs_op1_relation (const irange
&lhs
, const irange
&op1
,
1262 relation_kind rel
) const;
1263 virtual relation_kind
lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1265 relation_kind rel
) const;
1268 // Check to see if the range of OP2 indicates anything about the relation
1269 // between LHS and OP1.
1272 operator_plus::lhs_op1_relation (const irange
&lhs
,
1275 relation_kind
) const
1277 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1278 return VREL_VARYING
;
1280 tree type
= lhs
.type ();
1281 unsigned prec
= TYPE_PRECISION (type
);
1282 wi::overflow_type ovf1
, ovf2
;
1283 signop sign
= TYPE_SIGN (type
);
1285 // LHS = OP1 + 0 indicates LHS == OP1.
1289 if (TYPE_OVERFLOW_WRAPS (type
))
1291 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1292 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1295 ovf1
= ovf2
= wi::OVF_NONE
;
1297 // Never wrapping additions.
1300 // Positive op2 means lhs > op1.
1301 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1303 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1306 // Negative op2 means lhs < op1.
1307 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1309 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1312 // Always wrapping additions.
1313 else if (ovf1
&& ovf1
== ovf2
)
1315 // Positive op2 means lhs < op1.
1316 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1318 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1321 // Negative op2 means lhs > op1.
1322 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1324 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1328 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1329 if (!range_includes_zero_p (&op2
))
1332 return VREL_VARYING
;
1335 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1339 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1340 const irange
&op2
, relation_kind rel
) const
1342 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1346 operator_plus::wi_fold (irange
&r
, tree type
,
1347 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1348 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1350 wi::overflow_type ov_lb
, ov_ub
;
1351 signop s
= TYPE_SIGN (type
);
1352 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1353 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1354 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1357 // Given addition or subtraction, determine the possible NORMAL ranges and
1358 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1359 // Return the relation that exists between the LHS and OP1 in order for the
1360 // NORMAL range to apply.
1361 // a return value of VREL_VARYING means no ranges were applicable.
1363 static relation_kind
1364 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1367 relation_kind kind
= VREL_VARYING
;
1368 // For now, only deal with constant adds. This could be extended to ranges
1369 // when someone is so motivated.
1370 if (!offset
.singleton_p () || offset
.zero_p ())
1373 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1374 wide_int off
= offset
.lower_bound ();
1375 if (wi::neg_p (off
, SIGNED
))
1378 off
= wi::neg (off
);
1381 wi::overflow_type ov
;
1382 tree type
= offset
.type ();
1383 unsigned prec
= TYPE_PRECISION (type
);
1386 // calculate the normal range and relation for the operation.
1390 lb
= wi::zero (prec
);
1391 ub
= wi::sub (wi::to_wide (vrp_val_max (type
)), off
, UNSIGNED
, &ov
);
1398 ub
= wi::to_wide (vrp_val_max (type
));
1401 int_range
<2> normal_range (type
, lb
, ub
);
1402 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1405 r_normal
= normal_range
;
1409 // Once op1 has been calculated by operator_plus or operator_minus, check
1410 // to see if the relation passed causes any part of the calculation to
1411 // be not possible. ie
1412 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1413 // and that further refines a_2 to [0, 0].
1414 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1415 // relation between LHS relatoin OP1 and ADD_P is true for PLUS, false for
1416 // MINUS. IF any adjustment can be made, R will reflect it.
1419 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1422 if (r
.undefined_p ())
1424 tree type
= r
.type ();
1425 // Check for unsigned overflow and calculate the overflow part.
1426 signop s
= TYPE_SIGN (type
);
1427 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1430 // Only work with <, <=, >, >= relations.
1431 if (!relation_lt_le_gt_ge_p (rel
))
1434 // Get the ranges for this offset.
1435 int_range_max normal
, overflow
;
1436 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1438 // VREL_VARYING means there are no adjustments.
1439 if (k
== VREL_VARYING
)
1442 // If the relations match use the normal range, otherwise use overflow range.
1443 if (relation_intersect (k
, rel
) == k
)
1444 r
.intersect (normal
);
1446 r
.intersect (overflow
);
1451 operator_plus::op1_range (irange
&r
, tree type
,
1454 relation_trio trio
) const
1456 if (lhs
.undefined_p ())
1458 // Start with the default operation.
1459 range_op_handler
minus (MINUS_EXPR
, type
);
1462 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1463 relation_kind rel
= trio
.lhs_op2 ();
1464 // Check for a relation refinement.
1466 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1471 operator_plus::op2_range (irange
&r
, tree type
,
1474 relation_trio rel
) const
1476 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1480 class operator_minus
: public range_operator
1482 using range_operator::fold_range
;
1483 using range_operator::op1_range
;
1484 using range_operator::op2_range
;
1486 virtual bool op1_range (irange
&r
, tree type
,
1489 relation_trio
) const;
1490 virtual bool op2_range (irange
&r
, tree type
,
1493 relation_trio
) const;
1494 virtual void wi_fold (irange
&r
, tree type
,
1495 const wide_int
&lh_lb
,
1496 const wide_int
&lh_ub
,
1497 const wide_int
&rh_lb
,
1498 const wide_int
&rh_ub
) const;
1499 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
1502 relation_kind rel
) const;
1503 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1505 const irange
&op1_range
,
1506 const irange
&op2_range
,
1507 relation_kind rel
) const;
1511 operator_minus::wi_fold (irange
&r
, tree type
,
1512 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1513 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1515 wi::overflow_type ov_lb
, ov_ub
;
1516 signop s
= TYPE_SIGN (type
);
1517 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
1518 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
1519 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1523 // Return the relation between LHS and OP1 based on the relation between
1527 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
1528 const irange
&, relation_kind rel
) const
1530 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
1539 return VREL_VARYING
;
1542 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1543 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1544 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1547 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1548 const irange
&op1_range ATTRIBUTE_UNUSED
,
1549 const irange
&op2_range ATTRIBUTE_UNUSED
,
1552 if (rel
== VREL_VARYING
)
1555 int_range
<2> rel_range
;
1556 unsigned prec
= TYPE_PRECISION (type
);
1557 signop sgn
= TYPE_SIGN (type
);
1559 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1561 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
1562 else if (rel
== VREL_NE
)
1563 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1565 else if (TYPE_OVERFLOW_WRAPS (type
))
1569 // For wrapping signed values and unsigned, if op1 > op2 or
1570 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1573 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1584 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1586 rel_range
= int_range
<2> (type
, wi::one (prec
),
1587 wi::max_value (prec
, sgn
));
1589 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1591 rel_range
= int_range
<2> (type
, wi::zero (prec
),
1592 wi::max_value (prec
, sgn
));
1594 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1596 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1597 wi::minus_one (prec
));
1599 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1601 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1608 lhs_range
.intersect (rel_range
);
1613 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1614 const irange
&op1_range
,
1615 const irange
&op2_range
,
1616 relation_kind rel
) const
1618 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1623 operator_minus::op1_range (irange
&r
, tree type
,
1626 relation_trio trio
) const
1628 if (lhs
.undefined_p ())
1630 // Start with the default operation.
1631 range_op_handler
minus (PLUS_EXPR
, type
);
1634 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1635 relation_kind rel
= trio
.lhs_op2 ();
1637 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
1643 operator_minus::op2_range (irange
&r
, tree type
,
1646 relation_trio
) const
1648 if (lhs
.undefined_p ())
1650 return fold_range (r
, type
, op1
, lhs
);
1654 class operator_pointer_diff
: public range_operator
1656 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1658 const irange
&op1_range
,
1659 const irange
&op2_range
,
1660 relation_kind rel
) const;
1664 operator_pointer_diff::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1665 const irange
&op1_range
,
1666 const irange
&op2_range
,
1667 relation_kind rel
) const
1669 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1674 class operator_min
: public range_operator
1677 virtual void wi_fold (irange
&r
, tree type
,
1678 const wide_int
&lh_lb
,
1679 const wide_int
&lh_ub
,
1680 const wide_int
&rh_lb
,
1681 const wide_int
&rh_ub
) const;
1685 operator_min::wi_fold (irange
&r
, tree type
,
1686 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1687 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1689 signop s
= TYPE_SIGN (type
);
1690 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1691 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1692 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1696 class operator_max
: public range_operator
1699 virtual void wi_fold (irange
&r
, tree type
,
1700 const wide_int
&lh_lb
,
1701 const wide_int
&lh_ub
,
1702 const wide_int
&rh_lb
,
1703 const wide_int
&rh_ub
) const;
1707 operator_max::wi_fold (irange
&r
, tree type
,
1708 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1709 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1711 signop s
= TYPE_SIGN (type
);
1712 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1713 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1714 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1718 class cross_product_operator
: public range_operator
1721 // Perform an operation between two wide-ints and place the result
1722 // in R. Return true if the operation overflowed.
1723 virtual bool wi_op_overflows (wide_int
&r
,
1726 const wide_int
&) const = 0;
1728 // Calculate the cross product of two sets of sub-ranges and return it.
1729 void wi_cross_product (irange
&r
, tree type
,
1730 const wide_int
&lh_lb
,
1731 const wide_int
&lh_ub
,
1732 const wide_int
&rh_lb
,
1733 const wide_int
&rh_ub
) const;
1736 // Calculate the cross product of two sets of ranges and return it.
1738 // Multiplications, divisions and shifts are a bit tricky to handle,
1739 // depending on the mix of signs we have in the two ranges, we need to
1740 // operate on different values to get the minimum and maximum values
1741 // for the new range. One approach is to figure out all the
1742 // variations of range combinations and do the operations.
1744 // However, this involves several calls to compare_values and it is
1745 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1746 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1747 // figure the smallest and largest values to form the new range.
1750 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1751 const wide_int
&lh_lb
,
1752 const wide_int
&lh_ub
,
1753 const wide_int
&rh_lb
,
1754 const wide_int
&rh_ub
) const
1756 wide_int cp1
, cp2
, cp3
, cp4
;
1757 // Default to varying.
1758 r
.set_varying (type
);
1760 // Compute the 4 cross operations, bailing if we get an overflow we
1762 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1764 if (wi::eq_p (lh_lb
, lh_ub
))
1766 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
1768 if (wi::eq_p (rh_lb
, rh_ub
))
1770 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
1772 if (wi::eq_p (lh_lb
, lh_ub
))
1774 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
1778 signop sign
= TYPE_SIGN (type
);
1779 if (wi::gt_p (cp1
, cp2
, sign
))
1780 std::swap (cp1
, cp2
);
1781 if (wi::gt_p (cp3
, cp4
, sign
))
1782 std::swap (cp3
, cp4
);
1784 // Choose min and max from the ordered pairs.
1785 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
1786 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
1787 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
1791 class operator_mult
: public cross_product_operator
1793 using range_operator::op1_range
;
1794 using range_operator::op2_range
;
1796 virtual void wi_fold (irange
&r
, tree type
,
1797 const wide_int
&lh_lb
,
1798 const wide_int
&lh_ub
,
1799 const wide_int
&rh_lb
,
1800 const wide_int
&rh_ub
) const final override
;
1801 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1802 const wide_int
&w0
, const wide_int
&w1
)
1803 const final override
;
1804 virtual bool op1_range (irange
&r
, tree type
,
1807 relation_trio
) const final override
;
1808 virtual bool op2_range (irange
&r
, tree type
,
1811 relation_trio
) const final override
;
1815 operator_mult::op1_range (irange
&r
, tree type
,
1816 const irange
&lhs
, const irange
&op2
,
1817 relation_trio
) const
1820 if (lhs
.undefined_p ())
1823 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1824 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1825 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1826 if (TYPE_OVERFLOW_WRAPS (type
))
1829 if (op2
.singleton_p (&offset
) && !integer_zerop (offset
))
1830 return range_op_handler (TRUNC_DIV_EXPR
, type
).fold_range (r
, type
,
1836 operator_mult::op2_range (irange
&r
, tree type
,
1837 const irange
&lhs
, const irange
&op1
,
1838 relation_trio rel
) const
1840 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1844 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
1845 const wide_int
&w0
, const wide_int
&w1
) const
1847 wi::overflow_type overflow
= wi::OVF_NONE
;
1848 signop sign
= TYPE_SIGN (type
);
1849 res
= wi::mul (w0
, w1
, sign
, &overflow
);
1850 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1852 // For multiplication, the sign of the overflow is given
1853 // by the comparison of the signs of the operands.
1854 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
1855 res
= wi::max_value (w0
.get_precision (), sign
);
1857 res
= wi::min_value (w0
.get_precision (), sign
);
1864 operator_mult::wi_fold (irange
&r
, tree type
,
1865 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1866 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1868 if (TYPE_OVERFLOW_UNDEFINED (type
))
1870 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1874 // Multiply the ranges when overflow wraps. This is basically fancy
1875 // code so we don't drop to varying with an unsigned
1878 // This test requires 2*prec bits if both operands are signed and
1879 // 2*prec + 2 bits if either is not. Therefore, extend the values
1880 // using the sign of the result to PREC2. From here on out,
1881 // everthing is just signed math no matter what the input types
1884 signop sign
= TYPE_SIGN (type
);
1885 unsigned prec
= TYPE_PRECISION (type
);
1886 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
1887 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
1888 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
1889 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
1890 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
1891 widest2_int size
= sizem1
+ 1;
1893 // Canonicalize the intervals.
1894 if (sign
== UNSIGNED
)
1896 if (wi::ltu_p (size
, min0
+ max0
))
1901 if (wi::ltu_p (size
, min1
+ max1
))
1908 // Sort the 4 products so that min is in prod0 and max is in
1910 widest2_int prod0
= min0
* min1
;
1911 widest2_int prod1
= min0
* max1
;
1912 widest2_int prod2
= max0
* min1
;
1913 widest2_int prod3
= max0
* max1
;
1915 // min0min1 > max0max1
1917 std::swap (prod0
, prod3
);
1919 // min0max1 > max0min1
1921 std::swap (prod1
, prod2
);
1924 std::swap (prod0
, prod1
);
1927 std::swap (prod2
, prod3
);
1930 prod2
= prod3
- prod0
;
1931 if (wi::geu_p (prod2
, sizem1
))
1933 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
1934 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
1935 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
1937 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
1939 zero
.set_zero (type
);
1943 // The range covers all values.
1944 r
.set_varying (type
);
1948 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
1949 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
1950 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
1955 class operator_div
: public cross_product_operator
1958 virtual void wi_fold (irange
&r
, tree type
,
1959 const wide_int
&lh_lb
,
1960 const wide_int
&lh_ub
,
1961 const wide_int
&rh_lb
,
1962 const wide_int
&rh_ub
) const final override
;
1963 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1964 const wide_int
&, const wide_int
&)
1965 const final override
;
1969 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
1970 const wide_int
&w0
, const wide_int
&w1
) const
1975 wi::overflow_type overflow
= wi::OVF_NONE
;
1976 signop sign
= TYPE_SIGN (type
);
1980 case EXACT_DIV_EXPR
:
1981 case TRUNC_DIV_EXPR
:
1982 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
1984 case FLOOR_DIV_EXPR
:
1985 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
1987 case ROUND_DIV_EXPR
:
1988 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
1991 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
1997 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1999 // For division, the only case is -INF / -1 = +INF.
2000 res
= wi::max_value (w0
.get_precision (), sign
);
2007 operator_div::wi_fold (irange
&r
, tree type
,
2008 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2009 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2011 const wide_int dividend_min
= lh_lb
;
2012 const wide_int dividend_max
= lh_ub
;
2013 const wide_int divisor_min
= rh_lb
;
2014 const wide_int divisor_max
= rh_ub
;
2015 signop sign
= TYPE_SIGN (type
);
2016 unsigned prec
= TYPE_PRECISION (type
);
2017 wide_int extra_min
, extra_max
;
2019 // If we know we won't divide by zero, just do the division.
2020 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2022 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2023 divisor_min
, divisor_max
);
2027 // If we're definitely dividing by zero, there's nothing to do.
2028 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2034 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2035 // skip any division by zero.
2037 // First divide by the negative numbers, if any.
2038 if (wi::neg_p (divisor_min
, sign
))
2039 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2040 divisor_min
, wi::minus_one (prec
));
2044 // Then divide by the non-zero positive numbers, if any.
2045 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2048 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2049 wi::one (prec
), divisor_max
);
2052 // We shouldn't still have undefined here.
2053 gcc_checking_assert (!r
.undefined_p ());
2057 class operator_exact_divide
: public operator_div
2059 using range_operator::op1_range
;
2061 virtual bool op1_range (irange
&r
, tree type
,
2064 relation_trio
) const;
2069 operator_exact_divide::op1_range (irange
&r
, tree type
,
2072 relation_trio
) const
2074 if (lhs
.undefined_p ())
2077 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2078 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2079 // We wont bother trying to enumerate all the in between stuff :-P
2080 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
2081 // the time however.
2082 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2083 if (op2
.singleton_p (&offset
)
2084 && !integer_zerop (offset
))
2085 return range_op_handler (MULT_EXPR
, type
).fold_range (r
, type
, lhs
, op2
);
2090 class operator_lshift
: public cross_product_operator
2092 using range_operator::fold_range
;
2093 using range_operator::op1_range
;
2095 virtual bool op1_range (irange
&r
, tree type
,
2098 relation_trio rel
= TRIO_VARYING
) const;
2099 virtual bool fold_range (irange
&r
, tree type
,
2102 relation_trio rel
= TRIO_VARYING
) const;
2104 virtual void wi_fold (irange
&r
, tree type
,
2105 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2106 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2107 virtual bool wi_op_overflows (wide_int
&res
,
2110 const wide_int
&) const;
2113 class operator_rshift
: public cross_product_operator
2115 using range_operator::fold_range
;
2116 using range_operator::op1_range
;
2117 using range_operator::lhs_op1_relation
;
2119 virtual bool fold_range (irange
&r
, tree type
,
2122 relation_trio rel
= TRIO_VARYING
) const;
2123 virtual void wi_fold (irange
&r
, tree type
,
2124 const wide_int
&lh_lb
,
2125 const wide_int
&lh_ub
,
2126 const wide_int
&rh_lb
,
2127 const wide_int
&rh_ub
) const;
2128 virtual bool wi_op_overflows (wide_int
&res
,
2131 const wide_int
&w1
) const;
2132 virtual bool op1_range (irange
&, tree type
,
2135 relation_trio rel
= TRIO_VARYING
) const;
2136 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2139 relation_kind rel
) const;
2144 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2147 relation_kind
) const
2149 // If both operands range are >= 0, then the LHS <= op1.
2150 if (!op1
.undefined_p () && !op2
.undefined_p ()
2151 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2152 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2154 return VREL_VARYING
;
2158 operator_lshift::fold_range (irange
&r
, tree type
,
2161 relation_trio rel
) const
2163 int_range_max shift_range
;
2164 if (!get_shift_range (shift_range
, type
, op2
))
2166 if (op2
.undefined_p ())
2169 r
.set_varying (type
);
2173 // Transform left shifts by constants into multiplies.
2174 if (shift_range
.singleton_p ())
2176 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2177 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2178 int_range
<1> mult (type
, tmp
, tmp
);
2180 // Force wrapping multiplication.
2181 bool saved_flag_wrapv
= flag_wrapv
;
2182 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2184 flag_wrapv_pointer
= 1;
2185 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2186 flag_wrapv
= saved_flag_wrapv
;
2187 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2191 // Otherwise, invoke the generic fold routine.
2192 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2196 operator_lshift::wi_fold (irange
&r
, tree type
,
2197 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2198 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2200 signop sign
= TYPE_SIGN (type
);
2201 unsigned prec
= TYPE_PRECISION (type
);
2202 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2203 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2204 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2205 // overflow. However, for that to happen, rh.max needs to be zero,
2206 // which means rh is a singleton range of zero, which means we simply return
2207 // [lh_lb, lh_ub] as the range.
2208 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2210 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2214 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2215 wide_int complement
= ~(bound
- 1);
2216 wide_int low_bound
, high_bound
;
2217 bool in_bounds
= false;
2219 if (sign
== UNSIGNED
)
2222 high_bound
= complement
;
2223 if (wi::ltu_p (lh_ub
, low_bound
))
2225 // [5, 6] << [1, 2] == [10, 24].
2226 // We're shifting out only zeroes, the value increases
2230 else if (wi::ltu_p (high_bound
, lh_lb
))
2232 // [0xffffff00, 0xffffffff] << [1, 2]
2233 // == [0xfffffc00, 0xfffffffe].
2234 // We're shifting out only ones, the value decreases
2241 // [-1, 1] << [1, 2] == [-4, 4]
2242 low_bound
= complement
;
2244 if (wi::lts_p (lh_ub
, high_bound
)
2245 && wi::lts_p (low_bound
, lh_lb
))
2247 // For non-negative numbers, we're shifting out only zeroes,
2248 // the value increases monotonically. For negative numbers,
2249 // we're shifting out only ones, the value decreases
2256 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2258 r
.set_varying (type
);
2262 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2263 const wide_int
&w0
, const wide_int
&w1
) const
2265 signop sign
= TYPE_SIGN (type
);
2268 // It's unclear from the C standard whether shifts can overflow.
2269 // The following code ignores overflow; perhaps a C standard
2270 // interpretation ruling is needed.
2271 res
= wi::rshift (w0
, -w1
, sign
);
2274 res
= wi::lshift (w0
, w1
);
2279 operator_lshift::op1_range (irange
&r
,
2283 relation_trio
) const
2285 if (lhs
.undefined_p ())
2289 if (!lhs
.contains_p (build_zero_cst (type
)))
2290 r
.set_nonzero (type
);
2292 r
.set_varying (type
);
2294 if (op2
.singleton_p (&shift_amount
))
2296 wide_int shift
= wi::to_wide (shift_amount
);
2297 if (wi::lt_p (shift
, 0, SIGNED
))
2299 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2300 TYPE_PRECISION (op2
.type ())),
2309 // Work completely in unsigned mode to start.
2311 int_range_max tmp_range
;
2312 if (TYPE_SIGN (type
) == SIGNED
)
2314 int_range_max tmp
= lhs
;
2315 utype
= unsigned_type_for (type
);
2316 range_cast (tmp
, utype
);
2317 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2320 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2322 // Start with ranges which can produce the LHS by right shifting the
2323 // result by the shift amount.
2324 // ie [0x08, 0xF0] = op1 << 2 will start with
2325 // [00001000, 11110000] = op1 << 2
2326 // [0x02, 0x4C] aka [00000010, 00111100]
2328 // Then create a range from the LB with the least significant upper bit
2329 // set, to the upper bound with all the bits set.
2330 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2332 // Ideally we do this for each subrange, but just lump them all for now.
2333 unsigned low_bits
= TYPE_PRECISION (utype
)
2334 - TREE_INT_CST_LOW (shift_amount
);
2335 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2336 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2337 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2338 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2339 tmp_range
.union_ (fill_range
);
2342 range_cast (tmp_range
, type
);
2344 r
.intersect (tmp_range
);
2348 return !r
.varying_p ();
2352 operator_rshift::op1_range (irange
&r
,
2356 relation_trio
) const
2359 if (lhs
.undefined_p ())
2361 if (op2
.singleton_p (&shift
))
2363 // Ignore nonsensical shifts.
2364 unsigned prec
= TYPE_PRECISION (type
);
2365 if (wi::ge_p (wi::to_wide (shift
),
2366 wi::uhwi (prec
, TYPE_PRECISION (TREE_TYPE (shift
))),
2369 if (wi::to_wide (shift
) == 0)
2375 // Folding the original operation may discard some impossible
2376 // ranges from the LHS.
2377 int_range_max lhs_refined
;
2378 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2379 lhs_refined
.intersect (lhs
);
2380 if (lhs_refined
.undefined_p ())
2385 int_range_max
shift_range (shift
, shift
);
2386 int_range_max lb
, ub
;
2387 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2389 // 0000 0111 = OP1 >> 3
2391 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2392 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2393 // right hand side (0x07).
2394 tree mask
= fold_build1 (BIT_NOT_EXPR
, type
,
2395 fold_build2 (LSHIFT_EXPR
, type
,
2396 build_minus_one_cst (type
),
2398 int_range_max
mask_range (build_zero_cst (type
), mask
);
2399 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2402 if (!lhs_refined
.contains_p (build_zero_cst (type
)))
2404 mask_range
.invert ();
2405 r
.intersect (mask_range
);
2413 operator_rshift::wi_op_overflows (wide_int
&res
,
2416 const wide_int
&w1
) const
2418 signop sign
= TYPE_SIGN (type
);
2420 res
= wi::lshift (w0
, -w1
);
2423 // It's unclear from the C standard whether shifts can overflow.
2424 // The following code ignores overflow; perhaps a C standard
2425 // interpretation ruling is needed.
2426 res
= wi::rshift (w0
, w1
, sign
);
2432 operator_rshift::fold_range (irange
&r
, tree type
,
2435 relation_trio rel
) const
2437 int_range_max shift
;
2438 if (!get_shift_range (shift
, type
, op2
))
2440 if (op2
.undefined_p ())
2443 r
.set_varying (type
);
2447 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2451 operator_rshift::wi_fold (irange
&r
, tree type
,
2452 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2453 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2455 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2459 class operator_cast
: public range_operator
2461 using range_operator::fold_range
;
2462 using range_operator::op1_range
;
2464 virtual bool fold_range (irange
&r
, tree type
,
2467 relation_trio rel
= TRIO_VARYING
) const;
2468 virtual bool op1_range (irange
&r
, tree type
,
2471 relation_trio rel
= TRIO_VARYING
) const;
2472 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2475 relation_kind
) const;
2477 bool truncating_cast_p (const irange
&inner
, const irange
&outer
) const;
2478 bool inside_domain_p (const wide_int
&min
, const wide_int
&max
,
2479 const irange
&outer
) const;
2480 void fold_pair (irange
&r
, unsigned index
, const irange
&inner
,
2481 const irange
&outer
) const;
2484 // Add a partial equivalence between the LHS and op1 for casts.
2487 operator_cast::lhs_op1_relation (const irange
&lhs
,
2489 const irange
&op2 ATTRIBUTE_UNUSED
,
2490 relation_kind
) const
2492 if (lhs
.undefined_p () || op1
.undefined_p ())
2493 return VREL_VARYING
;
2494 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2495 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2496 // If the result gets sign extended into a larger type check first if this
2497 // qualifies as a partial equivalence.
2498 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2500 // If the result is sign extended, and the LHS is larger than op1,
2501 // check if op1's range can be negative as the sign extention will
2502 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2503 int_range
<3> negs
= range_negatives (op1
.type ());
2504 negs
.intersect (op1
);
2505 if (!negs
.undefined_p ())
2506 return VREL_VARYING
;
2509 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2510 return bits_to_pe (prec
);
2513 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2516 operator_cast::truncating_cast_p (const irange
&inner
,
2517 const irange
&outer
) const
2519 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2522 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2525 operator_cast::inside_domain_p (const wide_int
&min
,
2526 const wide_int
&max
,
2527 const irange
&range
) const
2529 wide_int domain_min
= wi::to_wide (vrp_val_min (range
.type ()));
2530 wide_int domain_max
= wi::to_wide (vrp_val_max (range
.type ()));
2531 signop domain_sign
= TYPE_SIGN (range
.type ());
2532 return (wi::le_p (min
, domain_max
, domain_sign
)
2533 && wi::le_p (max
, domain_max
, domain_sign
)
2534 && wi::ge_p (min
, domain_min
, domain_sign
)
2535 && wi::ge_p (max
, domain_min
, domain_sign
));
2539 // Helper for fold_range which work on a pair at a time.
2542 operator_cast::fold_pair (irange
&r
, unsigned index
,
2543 const irange
&inner
,
2544 const irange
&outer
) const
2546 tree inner_type
= inner
.type ();
2547 tree outer_type
= outer
.type ();
2548 signop inner_sign
= TYPE_SIGN (inner_type
);
2549 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2551 // check to see if casting from INNER to OUTER is a conversion that
2552 // fits in the resulting OUTER type.
2553 wide_int inner_lb
= inner
.lower_bound (index
);
2554 wide_int inner_ub
= inner
.upper_bound (index
);
2555 if (truncating_cast_p (inner
, outer
))
2557 // We may be able to accomodate a truncating cast if the
2558 // resulting range can be represented in the target type...
2559 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
2560 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
2563 r
.set_varying (outer_type
);
2567 // ...but we must still verify that the final range fits in the
2568 // domain. This catches -fstrict-enum restrictions where the domain
2569 // range is smaller than what fits in the underlying type.
2570 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
2571 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
2572 if (inside_domain_p (min
, max
, outer
))
2573 create_possibly_reversed_range (r
, outer_type
, min
, max
);
2575 r
.set_varying (outer_type
);
2580 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2581 const irange
&inner
,
2582 const irange
&outer
,
2583 relation_trio
) const
2585 if (empty_range_varying (r
, type
, inner
, outer
))
2588 gcc_checking_assert (outer
.varying_p ());
2589 gcc_checking_assert (inner
.num_pairs () > 0);
2591 // Avoid a temporary by folding the first pair directly into the result.
2592 fold_pair (r
, 0, inner
, outer
);
2594 // Then process any additonal pairs by unioning with their results.
2595 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
2598 fold_pair (tmp
, x
, inner
, outer
);
2604 // Update the nonzero mask. Truncating casts are problematic unless
2605 // the conversion fits in the resulting outer type.
2606 wide_int nz
= inner
.get_nonzero_bits ();
2607 if (truncating_cast_p (inner
, outer
)
2608 && wi::rshift (nz
, wi::uhwi (TYPE_PRECISION (outer
.type ()),
2609 TYPE_PRECISION (inner
.type ())),
2610 TYPE_SIGN (inner
.type ())) != 0)
2612 nz
= wide_int::from (nz
, TYPE_PRECISION (type
), TYPE_SIGN (inner
.type ()));
2613 r
.set_nonzero_bits (nz
);
2619 operator_cast::op1_range (irange
&r
, tree type
,
2622 relation_trio
) const
2624 if (lhs
.undefined_p ())
2626 tree lhs_type
= lhs
.type ();
2627 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
2629 // If we are calculating a pointer, shortcut to what we really care about.
2630 if (POINTER_TYPE_P (type
))
2632 // Conversion from other pointers or a constant (including 0/NULL)
2633 // are straightforward.
2634 if (POINTER_TYPE_P (lhs
.type ())
2635 || (lhs
.singleton_p ()
2636 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
2639 range_cast (r
, type
);
2643 // If the LHS is not a pointer nor a singleton, then it is
2644 // either VARYING or non-zero.
2645 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
2646 r
.set_nonzero (type
);
2648 r
.set_varying (type
);
2654 if (truncating_cast_p (op2
, lhs
))
2656 if (lhs
.varying_p ())
2657 r
.set_varying (type
);
2660 // We want to insert the LHS as an unsigned value since it
2661 // would not trigger the signed bit of the larger type.
2662 int_range_max converted_lhs
= lhs
;
2663 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
2664 range_cast (converted_lhs
, type
);
2665 // Start by building the positive signed outer range for the type.
2666 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
2667 TYPE_PRECISION (type
));
2668 r
= int_range
<1> (type
, lim
, wi::max_value (TYPE_PRECISION (type
),
2670 // For the signed part, we need to simply union the 2 ranges now.
2671 r
.union_ (converted_lhs
);
2673 // Create maximal negative number outside of LHS bits.
2674 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
2675 TYPE_PRECISION (type
));
2676 // Add this to the unsigned LHS range(s).
2677 int_range_max
lim_range (type
, lim
, lim
);
2678 int_range_max lhs_neg
;
2679 range_op_handler (PLUS_EXPR
, type
).fold_range (lhs_neg
, type
,
2682 // lhs_neg now has all the negative versions of the LHS.
2683 // Now union in all the values from SIGNED MIN (0x80000) to
2684 // lim-1 in order to fill in all the ranges with the upper
2687 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2688 // we don't need to create a range from min to lim-1
2689 // calculate neg range traps trying to create [lim, lim - 1].
2690 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
2693 int_range_max
neg (type
,
2694 wi::min_value (TYPE_PRECISION (type
),
2697 lhs_neg
.union_ (neg
);
2699 // And finally, munge the signed and unsigned portions.
2702 // And intersect with any known value passed in the extra operand.
2708 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
2712 // The cast is not truncating, and the range is restricted to
2713 // the range of the RHS by this assignment.
2715 // Cast the range of the RHS to the type of the LHS.
2716 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
2717 // Intersect this with the LHS range will produce the range,
2718 // which will be cast to the RHS type before returning.
2719 tmp
.intersect (lhs
);
2722 // Cast the calculated range to the type of the RHS.
2723 fold_range (r
, type
, tmp
, int_range
<1> (type
));
2728 class operator_logical_and
: public range_operator
2730 using range_operator::fold_range
;
2731 using range_operator::op1_range
;
2732 using range_operator::op2_range
;
2734 virtual bool fold_range (irange
&r
, tree type
,
2737 relation_trio rel
= TRIO_VARYING
) const;
2738 virtual bool op1_range (irange
&r
, tree type
,
2741 relation_trio rel
= TRIO_VARYING
) const;
2742 virtual bool op2_range (irange
&r
, tree type
,
2745 relation_trio rel
= TRIO_VARYING
) const;
2750 operator_logical_and::fold_range (irange
&r
, tree type
,
2753 relation_trio
) const
2755 if (empty_range_varying (r
, type
, lh
, rh
))
2758 // 0 && anything is 0.
2759 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
2760 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
2761 r
= range_false (type
);
2762 else if (lh
.contains_p (build_zero_cst (lh
.type ()))
2763 || rh
.contains_p (build_zero_cst (rh
.type ())))
2764 // To reach this point, there must be a logical 1 on each side, and
2765 // the only remaining question is whether there is a zero or not.
2766 r
= range_true_and_false (type
);
2768 r
= range_true (type
);
2773 operator_logical_and::op1_range (irange
&r
, tree type
,
2775 const irange
&op2 ATTRIBUTE_UNUSED
,
2776 relation_trio
) const
2778 switch (get_bool_state (r
, lhs
, type
))
2781 // A true result means both sides of the AND must be true.
2782 r
= range_true (type
);
2785 // Any other result means only one side has to be false, the
2786 // other side can be anything. So we cannot be sure of any
2788 r
= range_true_and_false (type
);
2795 operator_logical_and::op2_range (irange
&r
, tree type
,
2798 relation_trio
) const
2800 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
2804 class operator_bitwise_and
: public range_operator
2806 using range_operator::op1_range
;
2807 using range_operator::op2_range
;
2809 virtual bool op1_range (irange
&r
, tree type
,
2812 relation_trio rel
= TRIO_VARYING
) const;
2813 virtual bool op2_range (irange
&r
, tree type
,
2816 relation_trio rel
= TRIO_VARYING
) const;
2817 virtual void wi_fold (irange
&r
, tree type
,
2818 const wide_int
&lh_lb
,
2819 const wide_int
&lh_ub
,
2820 const wide_int
&rh_lb
,
2821 const wide_int
&rh_ub
) const;
2822 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2825 relation_kind
) const;
2827 void simple_op1_range_solver (irange
&r
, tree type
,
2829 const irange
&op2
) const;
2833 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
2834 // by considering the number of leading redundant sign bit copies.
2835 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
2836 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
2838 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
2839 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2840 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2842 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
2843 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
2844 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
2847 int type_prec
= TYPE_PRECISION (type
);
2848 int rprec
= (type_prec
- new_clrsb
) - 1;
2849 value_range_with_overflow (r
, type
,
2850 wi::mask (rprec
, true, type_prec
),
2851 wi::mask (rprec
, false, type_prec
));
2855 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
2859 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
2862 relation_kind
) const
2864 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
2865 return VREL_VARYING
;
2866 if (!op2
.singleton_p ())
2867 return VREL_VARYING
;
2868 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
2869 int prec1
= TYPE_PRECISION (op1
.type ());
2870 int prec2
= TYPE_PRECISION (op2
.type ());
2872 wide_int mask
= op2
.lower_bound ();
2873 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
2875 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
2877 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
2879 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
2881 return bits_to_pe (MIN (prec1
, mask_prec
));
2884 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2885 // possible. Basically, see if we can optimize:
2889 // [LB op Z, UB op Z]
2891 // If the optimization was successful, accumulate the range in R and
2895 wi_optimize_and_or (irange
&r
,
2896 enum tree_code code
,
2898 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2899 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2901 // Calculate the singleton mask among the ranges, if any.
2902 wide_int lower_bound
, upper_bound
, mask
;
2903 if (wi::eq_p (rh_lb
, rh_ub
))
2906 lower_bound
= lh_lb
;
2907 upper_bound
= lh_ub
;
2909 else if (wi::eq_p (lh_lb
, lh_ub
))
2912 lower_bound
= rh_lb
;
2913 upper_bound
= rh_ub
;
2918 // If Z is a constant which (for op | its bitwise not) has n
2919 // consecutive least significant bits cleared followed by m 1
2920 // consecutive bits set immediately above it and either
2921 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2923 // The least significant n bits of all the values in the range are
2924 // cleared or set, the m bits above it are preserved and any bits
2925 // above these are required to be the same for all values in the
2929 if (code
== BIT_IOR_EXPR
)
2931 if (wi::eq_p (w
, 0))
2932 n
= w
.get_precision ();
2936 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
2937 if (wi::eq_p (w
, 0))
2938 m
= w
.get_precision () - n
;
2940 m
= wi::ctz (w
) - n
;
2942 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
2943 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
2946 wide_int res_lb
, res_ub
;
2947 if (code
== BIT_AND_EXPR
)
2949 res_lb
= wi::bit_and (lower_bound
, mask
);
2950 res_ub
= wi::bit_and (upper_bound
, mask
);
2952 else if (code
== BIT_IOR_EXPR
)
2954 res_lb
= wi::bit_or (lower_bound
, mask
);
2955 res_ub
= wi::bit_or (upper_bound
, mask
);
2959 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2961 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
2962 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
2965 tmp
.set_nonzero (type
);
2971 // For range [LB, UB] compute two wide_int bit masks.
2973 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
2974 // for all numbers in the range the bit is 0, otherwise it might be 0
2977 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
2978 // for all numbers in the range the bit is 1, otherwise it might be 0
2982 wi_set_zero_nonzero_bits (tree type
,
2983 const wide_int
&lb
, const wide_int
&ub
,
2984 wide_int
&maybe_nonzero
,
2985 wide_int
&mustbe_nonzero
)
2987 signop sign
= TYPE_SIGN (type
);
2989 if (wi::eq_p (lb
, ub
))
2990 maybe_nonzero
= mustbe_nonzero
= lb
;
2991 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
2993 wide_int xor_mask
= lb
^ ub
;
2994 maybe_nonzero
= lb
| ub
;
2995 mustbe_nonzero
= lb
& ub
;
2998 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
2999 maybe_nonzero
.get_precision ());
3000 maybe_nonzero
= maybe_nonzero
| mask
;
3001 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3006 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3007 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3012 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3013 const wide_int
&lh_lb
,
3014 const wide_int
&lh_ub
,
3015 const wide_int
&rh_lb
,
3016 const wide_int
&rh_ub
) const
3018 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3021 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3022 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3023 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3024 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3025 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3026 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3028 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3029 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3030 signop sign
= TYPE_SIGN (type
);
3031 unsigned prec
= TYPE_PRECISION (type
);
3032 // If both input ranges contain only negative values, we can
3033 // truncate the result range maximum to the minimum of the
3034 // input range maxima.
3035 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3037 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3038 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3040 // If either input range contains only non-negative values
3041 // we can truncate the result range maximum to the respective
3042 // maximum of the input range.
3043 if (wi::ge_p (lh_lb
, 0, sign
))
3044 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3045 if (wi::ge_p (rh_lb
, 0, sign
))
3046 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3047 // PR68217: In case of signed & sign-bit-CST should
3048 // result in [-INF, 0] instead of [-INF, INF].
3049 if (wi::gt_p (new_lb
, new_ub
, sign
))
3051 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3053 && ((wi::eq_p (lh_lb
, lh_ub
)
3054 && !wi::cmps (lh_lb
, sign_bit
))
3055 || (wi::eq_p (rh_lb
, rh_ub
)
3056 && !wi::cmps (rh_lb
, sign_bit
))))
3058 new_lb
= wi::min_value (prec
, sign
);
3059 new_ub
= wi::zero (prec
);
3062 // If the limits got swapped around, return varying.
3063 if (wi::gt_p (new_lb
, new_ub
,sign
))
3066 && wi_optimize_signed_bitwise_op (r
, type
,
3070 r
.set_varying (type
);
3073 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3077 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3079 if (!lhs
.contains_p (build_zero_cst (type
)))
3080 r
= range_nonzero (type
);
3082 r
.set_varying (type
);
3085 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3086 (otherwise return VAL). VAL and MASK must be zero-extended for
3087 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3088 (to transform signed values into unsigned) and at the end xor
3092 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3093 const wide_int
&sgnbit
, unsigned int prec
)
3095 wide_int bit
= wi::one (prec
), res
;
3098 wide_int val
= val_in
^ sgnbit
;
3099 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3102 if ((res
& bit
) == 0)
3105 res
= wi::bit_and_not (val
+ bit
, res
);
3107 if (wi::gtu_p (res
, val
))
3108 return res
^ sgnbit
;
3110 return val
^ sgnbit
;
3113 // This was shamelessly stolen from register_edge_assert_for_2 and
3114 // adjusted to work with iranges.
3117 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3119 const irange
&op2
) const
3121 if (!op2
.singleton_p ())
3123 set_nonzero_range_from_mask (r
, type
, lhs
);
3126 unsigned int nprec
= TYPE_PRECISION (type
);
3127 wide_int cst2v
= op2
.lower_bound ();
3128 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3131 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3133 sgnbit
= wi::zero (nprec
);
3135 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3137 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3138 // maximum unsigned value is ~0. For signed comparison, if CST2
3139 // doesn't have the most significant bit set, handle it similarly. If
3140 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3141 wide_int valv
= lhs
.lower_bound ();
3142 wide_int minv
= valv
& cst2v
, maxv
;
3143 bool we_know_nothing
= false;
3146 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3147 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3150 // If we can't determine anything on this bound, fall
3151 // through and conservatively solve for the other end point.
3152 we_know_nothing
= true;
3155 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3156 if (we_know_nothing
)
3157 r
.set_varying (type
);
3159 r
= int_range
<1> (type
, minv
, maxv
);
3161 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3163 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3164 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3166 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3168 // For signed comparison, if CST2 doesn't have most significant bit
3169 // set, handle it similarly. If CST2 has MSB set, the maximum is
3170 // the same and minimum is INT_MIN.
3171 valv
= lhs
.upper_bound ();
3172 minv
= valv
& cst2v
;
3177 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3180 // If we couldn't determine anything on either bound, return
3182 if (we_know_nothing
)
3190 int_range
<1> upper_bits (type
, minv
, maxv
);
3191 r
.intersect (upper_bits
);
3195 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3198 relation_trio
) const
3200 if (lhs
.undefined_p ())
3202 if (types_compatible_p (type
, boolean_type_node
))
3203 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3206 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3208 int_range_max
chunk (lhs
.type (),
3209 lhs
.lower_bound (i
),
3210 lhs
.upper_bound (i
));
3212 simple_op1_range_solver (res
, type
, chunk
, op2
);
3215 if (r
.undefined_p ())
3216 set_nonzero_range_from_mask (r
, type
, lhs
);
3218 // For 0 = op1 & MASK, op1 is ~MASK.
3219 if (lhs
.zero_p () && op2
.singleton_p ())
3221 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3222 int_range
<2> tmp (type
);
3223 tmp
.set_nonzero_bits (nz
);
3230 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3233 relation_trio
) const
3235 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3239 class operator_logical_or
: public range_operator
3241 using range_operator::fold_range
;
3242 using range_operator::op1_range
;
3243 using range_operator::op2_range
;
3245 virtual bool fold_range (irange
&r
, tree type
,
3248 relation_trio rel
= TRIO_VARYING
) const;
3249 virtual bool op1_range (irange
&r
, tree type
,
3252 relation_trio rel
= TRIO_VARYING
) const;
3253 virtual bool op2_range (irange
&r
, tree type
,
3256 relation_trio rel
= TRIO_VARYING
) const;
3260 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3263 relation_trio
) const
3265 if (empty_range_varying (r
, type
, lh
, rh
))
3274 operator_logical_or::op1_range (irange
&r
, tree type
,
3276 const irange
&op2 ATTRIBUTE_UNUSED
,
3277 relation_trio
) const
3279 switch (get_bool_state (r
, lhs
, type
))
3282 // A false result means both sides of the OR must be false.
3283 r
= range_false (type
);
3286 // Any other result means only one side has to be true, the
3287 // other side can be anything. so we can't be sure of any result
3289 r
= range_true_and_false (type
);
3296 operator_logical_or::op2_range (irange
&r
, tree type
,
3299 relation_trio
) const
3301 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3305 class operator_bitwise_or
: public range_operator
3307 using range_operator::op1_range
;
3308 using range_operator::op2_range
;
3310 virtual bool op1_range (irange
&r
, tree type
,
3313 relation_trio rel
= TRIO_VARYING
) const;
3314 virtual bool op2_range (irange
&r
, tree type
,
3317 relation_trio rel
= TRIO_VARYING
) const;
3318 virtual void wi_fold (irange
&r
, tree type
,
3319 const wide_int
&lh_lb
,
3320 const wide_int
&lh_ub
,
3321 const wide_int
&rh_lb
,
3322 const wide_int
&rh_ub
) const;
3326 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3327 const wide_int
&lh_lb
,
3328 const wide_int
&lh_ub
,
3329 const wide_int
&rh_lb
,
3330 const wide_int
&rh_ub
) const
3332 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3335 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3336 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3337 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3338 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3339 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3340 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3341 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3342 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3343 signop sign
= TYPE_SIGN (type
);
3344 // If the input ranges contain only positive values we can
3345 // truncate the minimum of the result range to the maximum
3346 // of the input range minima.
3347 if (wi::ge_p (lh_lb
, 0, sign
)
3348 && wi::ge_p (rh_lb
, 0, sign
))
3350 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3351 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3353 // If either input range contains only negative values
3354 // we can truncate the minimum of the result range to the
3355 // respective minimum range.
3356 if (wi::lt_p (lh_ub
, 0, sign
))
3357 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3358 if (wi::lt_p (rh_ub
, 0, sign
))
3359 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3360 // If the limits got swapped around, return a conservative range.
3361 if (wi::gt_p (new_lb
, new_ub
, sign
))
3363 // Make sure that nonzero|X is nonzero.
3364 if (wi::gt_p (lh_lb
, 0, sign
)
3365 || wi::gt_p (rh_lb
, 0, sign
)
3366 || wi::lt_p (lh_ub
, 0, sign
)
3367 || wi::lt_p (rh_ub
, 0, sign
))
3368 r
.set_nonzero (type
);
3369 else if (sign
== SIGNED
3370 && wi_optimize_signed_bitwise_op (r
, type
,
3375 r
.set_varying (type
);
3378 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3382 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3385 relation_trio
) const
3387 if (lhs
.undefined_p ())
3389 // If this is really a logical wi_fold, call that.
3390 if (types_compatible_p (type
, boolean_type_node
))
3391 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3395 tree zero
= build_zero_cst (type
);
3396 r
= int_range
<1> (zero
, zero
);
3399 r
.set_varying (type
);
3404 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3407 relation_trio
) const
3409 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3413 class operator_bitwise_xor
: public range_operator
3415 using range_operator::op1_range
;
3416 using range_operator::op2_range
;
3418 virtual void wi_fold (irange
&r
, tree type
,
3419 const wide_int
&lh_lb
,
3420 const wide_int
&lh_ub
,
3421 const wide_int
&rh_lb
,
3422 const wide_int
&rh_ub
) const;
3423 virtual bool op1_range (irange
&r
, tree type
,
3426 relation_trio rel
= TRIO_VARYING
) const;
3427 virtual bool op2_range (irange
&r
, tree type
,
3430 relation_trio rel
= TRIO_VARYING
) const;
3431 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
3433 const irange
&op1_range
,
3434 const irange
&op2_range
,
3435 relation_kind rel
) const;
3439 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3440 const wide_int
&lh_lb
,
3441 const wide_int
&lh_ub
,
3442 const wide_int
&rh_lb
,
3443 const wide_int
&rh_ub
) const
3445 signop sign
= TYPE_SIGN (type
);
3446 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3447 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3448 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3449 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3450 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3451 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3453 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3454 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3455 wide_int result_one_bits
3456 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3457 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3458 wide_int new_ub
= ~result_zero_bits
;
3459 wide_int new_lb
= result_one_bits
;
3461 // If the range has all positive or all negative values, the result
3462 // is better than VARYING.
3463 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3464 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3465 else if (sign
== SIGNED
3466 && wi_optimize_signed_bitwise_op (r
, type
,
3471 r
.set_varying (type
);
3473 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3474 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3475 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3476 || wi::ne_p (result_one_bits
, 0))
3479 tmp
.set_nonzero (type
);
3485 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3489 relation_kind rel
) const
3491 if (rel
== VREL_VARYING
)
3494 int_range
<2> rel_range
;
3499 rel_range
.set_zero (type
);
3502 rel_range
.set_nonzero (type
);
3508 lhs_range
.intersect (rel_range
);
3513 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3516 relation_trio
) const
3518 if (lhs
.undefined_p () || lhs
.varying_p ())
3523 if (types_compatible_p (type
, boolean_type_node
))
3525 switch (get_bool_state (r
, lhs
, type
))
3528 if (op2
.varying_p ())
3529 r
.set_varying (type
);
3530 else if (op2
.zero_p ())
3531 r
= range_true (type
);
3532 // See get_bool_state for the rationale
3533 else if (op2
.contains_p (build_zero_cst (op2
.type ())))
3534 r
= range_true_and_false (type
);
3536 r
= range_false (type
);
3546 r
.set_varying (type
);
3551 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3554 relation_trio
) const
3556 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3559 class operator_trunc_mod
: public range_operator
3561 using range_operator::op1_range
;
3562 using range_operator::op2_range
;
3564 virtual void wi_fold (irange
&r
, tree type
,
3565 const wide_int
&lh_lb
,
3566 const wide_int
&lh_ub
,
3567 const wide_int
&rh_lb
,
3568 const wide_int
&rh_ub
) const;
3569 virtual bool op1_range (irange
&r
, tree type
,
3572 relation_trio
) const;
3573 virtual bool op2_range (irange
&r
, tree type
,
3576 relation_trio
) const;
3580 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
3581 const wide_int
&lh_lb
,
3582 const wide_int
&lh_ub
,
3583 const wide_int
&rh_lb
,
3584 const wide_int
&rh_ub
) const
3586 wide_int new_lb
, new_ub
, tmp
;
3587 signop sign
= TYPE_SIGN (type
);
3588 unsigned prec
= TYPE_PRECISION (type
);
3590 // Mod 0 is undefined.
3591 if (wi_zero_p (type
, rh_lb
, rh_ub
))
3597 // Check for constant and try to fold.
3598 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
3600 wi::overflow_type ov
= wi::OVF_NONE
;
3601 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
3602 if (ov
== wi::OVF_NONE
)
3604 r
= int_range
<2> (type
, tmp
, tmp
);
3609 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3614 new_ub
= wi::smax (new_ub
, tmp
);
3617 if (sign
== UNSIGNED
)
3618 new_lb
= wi::zero (prec
);
3623 if (wi::gts_p (tmp
, 0))
3624 tmp
= wi::zero (prec
);
3625 new_lb
= wi::smax (new_lb
, tmp
);
3628 if (sign
== SIGNED
&& wi::neg_p (tmp
))
3629 tmp
= wi::zero (prec
);
3630 new_ub
= wi::min (new_ub
, tmp
, sign
);
3632 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3636 operator_trunc_mod::op1_range (irange
&r
, tree type
,
3639 relation_trio
) const
3641 if (lhs
.undefined_p ())
3644 signop sign
= TYPE_SIGN (type
);
3645 unsigned prec
= TYPE_PRECISION (type
);
3646 // (a % b) >= x && x > 0 , then a >= x.
3647 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3649 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
3652 // (a % b) <= x && x < 0 , then a <= x.
3653 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3655 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
3662 operator_trunc_mod::op2_range (irange
&r
, tree type
,
3665 relation_trio
) const
3667 if (lhs
.undefined_p ())
3670 signop sign
= TYPE_SIGN (type
);
3671 unsigned prec
= TYPE_PRECISION (type
);
3672 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3673 // or b > x for unsigned.
3674 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3677 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
3678 lhs
.lower_bound (), VR_ANTI_RANGE
);
3679 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
3681 r
= value_range (type
, lhs
.lower_bound () + 1,
3682 wi::max_value (prec
, sign
));
3687 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3688 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3690 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
3691 r
= value_range (type
, lhs
.upper_bound (),
3692 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
3701 class operator_logical_not
: public range_operator
3703 using range_operator::fold_range
;
3704 using range_operator::op1_range
;
3706 virtual bool fold_range (irange
&r
, tree type
,
3709 relation_trio rel
= TRIO_VARYING
) const;
3710 virtual bool op1_range (irange
&r
, tree type
,
3713 relation_trio rel
= TRIO_VARYING
) const;
3716 // Folding a logical NOT, oddly enough, involves doing nothing on the
3717 // forward pass through. During the initial walk backwards, the
3718 // logical NOT reversed the desired outcome on the way back, so on the
3719 // way forward all we do is pass the range forward.
3724 // to determine the TRUE branch, walking backward
3725 // if (b_3) if ([1,1])
3726 // b_3 = !b_2 [1,1] = ![0,0]
3727 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3728 // which is the result we are looking for.. so.. pass it through.
3731 operator_logical_not::fold_range (irange
&r
, tree type
,
3733 const irange
&rh ATTRIBUTE_UNUSED
,
3734 relation_trio
) const
3736 if (empty_range_varying (r
, type
, lh
, rh
))
3740 if (!lh
.varying_p () && !lh
.undefined_p ())
3747 operator_logical_not::op1_range (irange
&r
,
3751 relation_trio
) const
3753 // Logical NOT is involutary...do it again.
3754 return fold_range (r
, type
, lhs
, op2
);
3758 class operator_bitwise_not
: public range_operator
3760 using range_operator::fold_range
;
3761 using range_operator::op1_range
;
3763 virtual bool fold_range (irange
&r
, tree type
,
3766 relation_trio rel
= TRIO_VARYING
) const;
3767 virtual bool op1_range (irange
&r
, tree type
,
3770 relation_trio rel
= TRIO_VARYING
) const;
3774 operator_bitwise_not::fold_range (irange
&r
, tree type
,
3777 relation_trio
) const
3779 if (empty_range_varying (r
, type
, lh
, rh
))
3782 if (types_compatible_p (type
, boolean_type_node
))
3783 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
3785 // ~X is simply -1 - X.
3786 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
3787 wi::minus_one (TYPE_PRECISION (type
)));
3788 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
, minusone
, lh
);
3792 operator_bitwise_not::op1_range (irange
&r
, tree type
,
3795 relation_trio
) const
3797 if (lhs
.undefined_p ())
3799 if (types_compatible_p (type
, boolean_type_node
))
3800 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
3802 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
3803 return fold_range (r
, type
, lhs
, op2
);
3807 class operator_cst
: public range_operator
3809 using range_operator::fold_range
;
3811 virtual bool fold_range (irange
&r
, tree type
,
3814 relation_trio rel
= TRIO_VARYING
) const;
3818 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3820 const irange
&rh ATTRIBUTE_UNUSED
,
3821 relation_trio
) const
3828 class operator_identity
: public range_operator
3830 using range_operator::fold_range
;
3831 using range_operator::op1_range
;
3832 using range_operator::lhs_op1_relation
;
3834 virtual bool fold_range (irange
&r
, tree type
,
3837 relation_trio rel
= TRIO_VARYING
) const;
3838 virtual bool op1_range (irange
&r
, tree type
,
3841 relation_trio rel
= TRIO_VARYING
) const;
3842 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
3845 relation_kind rel
) const;
3848 // Determine if there is a relationship between LHS and OP1.
3851 operator_identity::lhs_op1_relation (const irange
&lhs
,
3852 const irange
&op1 ATTRIBUTE_UNUSED
,
3853 const irange
&op2 ATTRIBUTE_UNUSED
,
3854 relation_kind
) const
3856 if (lhs
.undefined_p ())
3857 return VREL_VARYING
;
3858 // Simply a copy, so they are equivalent.
3863 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3865 const irange
&rh ATTRIBUTE_UNUSED
,
3866 relation_trio
) const
3873 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3875 const irange
&op2 ATTRIBUTE_UNUSED
,
3876 relation_trio
) const
3883 class operator_unknown
: public range_operator
3885 using range_operator::fold_range
;
3887 virtual bool fold_range (irange
&r
, tree type
,
3890 relation_trio rel
= TRIO_VARYING
) const;
3894 operator_unknown::fold_range (irange
&r
, tree type
,
3895 const irange
&lh ATTRIBUTE_UNUSED
,
3896 const irange
&rh ATTRIBUTE_UNUSED
,
3897 relation_trio
) const
3899 r
.set_varying (type
);
3904 class operator_abs
: public range_operator
3906 using range_operator::op1_range
;
3908 virtual void wi_fold (irange
&r
, tree type
,
3909 const wide_int
&lh_lb
,
3910 const wide_int
&lh_ub
,
3911 const wide_int
&rh_lb
,
3912 const wide_int
&rh_ub
) const;
3913 virtual bool op1_range (irange
&r
, tree type
,
3916 relation_trio
) const;
3920 operator_abs::wi_fold (irange
&r
, tree type
,
3921 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3922 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
3923 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
3926 signop sign
= TYPE_SIGN (type
);
3927 unsigned prec
= TYPE_PRECISION (type
);
3929 // Pass through LH for the easy cases.
3930 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
3932 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
3936 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
3938 wide_int min_value
= wi::min_value (prec
, sign
);
3939 wide_int max_value
= wi::max_value (prec
, sign
);
3940 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
3942 r
.set_varying (type
);
3946 // ABS_EXPR may flip the range around, if the original range
3947 // included negative values.
3948 if (wi::eq_p (lh_lb
, min_value
))
3950 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
3951 // returned [-MIN,-MIN] so this preserves that behaviour. PR37078
3952 if (wi::eq_p (lh_ub
, min_value
))
3954 r
= int_range
<1> (type
, min_value
, min_value
);
3960 min
= wi::abs (lh_lb
);
3962 if (wi::eq_p (lh_ub
, min_value
))
3965 max
= wi::abs (lh_ub
);
3967 // If the range contains zero then we know that the minimum value in the
3968 // range will be zero.
3969 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
3971 if (wi::gt_p (min
, max
, sign
))
3973 min
= wi::zero (prec
);
3977 // If the range was reversed, swap MIN and MAX.
3978 if (wi::gt_p (min
, max
, sign
))
3979 std::swap (min
, max
);
3982 // If the new range has its limits swapped around (MIN > MAX), then
3983 // the operation caused one of them to wrap around. The only thing
3984 // we know is that the result is positive.
3985 if (wi::gt_p (min
, max
, sign
))
3987 min
= wi::zero (prec
);
3990 r
= int_range
<1> (type
, min
, max
);
3994 operator_abs::op1_range (irange
&r
, tree type
,
3997 relation_trio
) const
3999 if (empty_range_varying (r
, type
, lhs
, op2
))
4001 if (TYPE_UNSIGNED (type
))
4006 // Start with the positives because negatives are an impossible result.
4007 int_range_max positives
= range_positives (type
);
4008 positives
.intersect (lhs
);
4010 // Then add the negative of each pair:
4011 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4012 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4013 r
.union_ (int_range
<1> (type
,
4014 -positives
.upper_bound (i
),
4015 -positives
.lower_bound (i
)));
4016 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4017 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4018 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4019 wide_int lb
= lhs
.lower_bound ();
4020 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4021 r
.union_ (int_range
<2> (type
, lb
, lb
));
4026 class operator_absu
: public range_operator
4029 virtual void wi_fold (irange
&r
, tree type
,
4030 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4031 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4035 operator_absu::wi_fold (irange
&r
, tree type
,
4036 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4037 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4038 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4040 wide_int new_lb
, new_ub
;
4042 // Pass through VR0 the easy cases.
4043 if (wi::ges_p (lh_lb
, 0))
4050 new_lb
= wi::abs (lh_lb
);
4051 new_ub
= wi::abs (lh_ub
);
4053 // If the range contains zero then we know that the minimum
4054 // value in the range will be zero.
4055 if (wi::ges_p (lh_ub
, 0))
4057 if (wi::gtu_p (new_lb
, new_ub
))
4059 new_lb
= wi::zero (TYPE_PRECISION (type
));
4062 std::swap (new_lb
, new_ub
);
4065 gcc_checking_assert (TYPE_UNSIGNED (type
));
4066 r
= int_range
<1> (type
, new_lb
, new_ub
);
4070 class operator_negate
: public range_operator
4072 using range_operator::fold_range
;
4073 using range_operator::op1_range
;
4075 virtual bool fold_range (irange
&r
, tree type
,
4078 relation_trio rel
= TRIO_VARYING
) const;
4079 virtual bool op1_range (irange
&r
, tree type
,
4082 relation_trio rel
= TRIO_VARYING
) const;
4086 operator_negate::fold_range (irange
&r
, tree type
,
4089 relation_trio
) const
4091 if (empty_range_varying (r
, type
, lh
, rh
))
4093 // -X is simply 0 - X.
4094 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
,
4095 range_zero (type
), lh
);
4099 operator_negate::op1_range (irange
&r
, tree type
,
4102 relation_trio
) const
4104 // NEGATE is involutory.
4105 return fold_range (r
, type
, lhs
, op2
);
4109 class operator_addr_expr
: public range_operator
4111 using range_operator::fold_range
;
4112 using range_operator::op1_range
;
4114 virtual bool fold_range (irange
&r
, tree type
,
4117 relation_trio rel
= TRIO_VARYING
) const;
4118 virtual bool op1_range (irange
&r
, tree type
,
4121 relation_trio rel
= TRIO_VARYING
) const;
4125 operator_addr_expr::fold_range (irange
&r
, tree type
,
4128 relation_trio
) const
4130 if (empty_range_varying (r
, type
, lh
, rh
))
4133 // Return a non-null pointer of the LHS type (passed in op2).
4135 r
= range_zero (type
);
4136 else if (!lh
.contains_p (build_zero_cst (lh
.type ())))
4137 r
= range_nonzero (type
);
4139 r
.set_varying (type
);
4144 operator_addr_expr::op1_range (irange
&r
, tree type
,
4147 relation_trio
) const
4149 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
4153 class pointer_plus_operator
: public range_operator
4156 virtual void wi_fold (irange
&r
, tree type
,
4157 const wide_int
&lh_lb
,
4158 const wide_int
&lh_ub
,
4159 const wide_int
&rh_lb
,
4160 const wide_int
&rh_ub
) const;
4164 pointer_plus_operator::wi_fold (irange
&r
, tree type
,
4165 const wide_int
&lh_lb
,
4166 const wide_int
&lh_ub
,
4167 const wide_int
&rh_lb
,
4168 const wide_int
&rh_ub
) const
4170 // Check for [0,0] + const, and simply return the const.
4171 if (lh_lb
== 0 && lh_ub
== 0 && rh_lb
== rh_ub
)
4173 tree val
= wide_int_to_tree (type
, rh_lb
);
4178 // For pointer types, we are really only interested in asserting
4179 // whether the expression evaluates to non-NULL.
4181 // With -fno-delete-null-pointer-checks we need to be more
4182 // conservative. As some object might reside at address 0,
4183 // then some offset could be added to it and the same offset
4184 // subtracted again and the result would be NULL.
4186 // static int a[12]; where &a[0] is NULL and
4189 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
4190 // where the first range doesn't include zero and the second one
4191 // doesn't either. As the second operand is sizetype (unsigned),
4192 // consider all ranges where the MSB could be set as possible
4193 // subtractions where the result might be NULL.
4194 if ((!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4195 || !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4196 && !TYPE_OVERFLOW_WRAPS (type
)
4197 && (flag_delete_null_pointer_checks
4198 || !wi::sign_mask (rh_ub
)))
4199 r
= range_nonzero (type
);
4200 else if (lh_lb
== lh_ub
&& lh_lb
== 0
4201 && rh_lb
== rh_ub
&& rh_lb
== 0)
4202 r
= range_zero (type
);
4204 r
.set_varying (type
);
4208 class pointer_min_max_operator
: public range_operator
4211 virtual void wi_fold (irange
& r
, tree type
,
4212 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4213 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4217 pointer_min_max_operator::wi_fold (irange
&r
, tree type
,
4218 const wide_int
&lh_lb
,
4219 const wide_int
&lh_ub
,
4220 const wide_int
&rh_lb
,
4221 const wide_int
&rh_ub
) const
4223 // For MIN/MAX expressions with pointers, we only care about
4224 // nullness. If both are non null, then the result is nonnull.
4225 // If both are null, then the result is null. Otherwise they
4227 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4228 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4229 r
= range_nonzero (type
);
4230 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4231 r
= range_zero (type
);
4233 r
.set_varying (type
);
4237 class pointer_and_operator
: public range_operator
4240 virtual void wi_fold (irange
&r
, tree type
,
4241 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4242 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4246 pointer_and_operator::wi_fold (irange
&r
, tree type
,
4247 const wide_int
&lh_lb
,
4248 const wide_int
&lh_ub
,
4249 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4250 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4252 // For pointer types, we are really only interested in asserting
4253 // whether the expression evaluates to non-NULL.
4254 if (wi_zero_p (type
, lh_lb
, lh_ub
) || wi_zero_p (type
, lh_lb
, lh_ub
))
4255 r
= range_zero (type
);
4257 r
.set_varying (type
);
4261 class pointer_or_operator
: public range_operator
4263 using range_operator::op1_range
;
4264 using range_operator::op2_range
;
4266 virtual bool op1_range (irange
&r
, tree type
,
4269 relation_trio rel
= TRIO_VARYING
) const;
4270 virtual bool op2_range (irange
&r
, tree type
,
4273 relation_trio rel
= TRIO_VARYING
) const;
4274 virtual void wi_fold (irange
&r
, tree type
,
4275 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4276 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4280 pointer_or_operator::op1_range (irange
&r
, tree type
,
4282 const irange
&op2 ATTRIBUTE_UNUSED
,
4283 relation_trio
) const
4285 if (lhs
.undefined_p ())
4289 tree zero
= build_zero_cst (type
);
4290 r
= int_range
<1> (zero
, zero
);
4293 r
.set_varying (type
);
4298 pointer_or_operator::op2_range (irange
&r
, tree type
,
4301 relation_trio
) const
4303 return pointer_or_operator::op1_range (r
, type
, lhs
, op1
);
4307 pointer_or_operator::wi_fold (irange
&r
, tree type
,
4308 const wide_int
&lh_lb
,
4309 const wide_int
&lh_ub
,
4310 const wide_int
&rh_lb
,
4311 const wide_int
&rh_ub
) const
4313 // For pointer types, we are really only interested in asserting
4314 // whether the expression evaluates to non-NULL.
4315 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4316 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4317 r
= range_nonzero (type
);
4318 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4319 r
= range_zero (type
);
4321 r
.set_varying (type
);
4324 // Return a pointer to the range_operator instance, if there is one
4325 // associated with tree_code CODE.
4328 range_op_table::operator[] (enum tree_code code
)
4330 gcc_checking_assert (code
> 0 && code
< MAX_TREE_CODES
);
4331 return m_range_tree
[code
];
4334 // Add OP to the handler table for CODE.
4337 range_op_table::set (enum tree_code code
, range_operator
&op
)
4339 gcc_checking_assert (m_range_tree
[code
] == NULL
);
4340 m_range_tree
[code
] = &op
;
4341 gcc_checking_assert (op
.m_code
== ERROR_MARK
|| op
.m_code
== code
);
4345 // Shared operators that require separate instantiations because they
4346 // do not share a common tree code.
4347 static operator_cast op_nop
, op_convert
;
4348 static operator_identity op_ssa
, op_paren
, op_obj_type
;
4349 static operator_unknown op_realpart
, op_imagpart
;
4350 static pointer_min_max_operator op_ptr_min
, op_ptr_max
;
4351 static operator_div op_trunc_div
;
4352 static operator_div op_floor_div
;
4353 static operator_div op_round_div
;
4354 static operator_div op_ceil_div
;
4356 // Instantiate a range op table for integral operations.
4358 class integral_table
: public range_op_table
4362 } integral_tree_table
;
4364 integral_table::integral_table ()
4366 set (EQ_EXPR
, op_equal
);
4367 set (NE_EXPR
, op_not_equal
);
4368 set (LT_EXPR
, op_lt
);
4369 set (LE_EXPR
, op_le
);
4370 set (GT_EXPR
, op_gt
);
4371 set (GE_EXPR
, op_ge
);
4372 set (PLUS_EXPR
, op_plus
);
4373 set (MINUS_EXPR
, op_minus
);
4374 set (MIN_EXPR
, op_min
);
4375 set (MAX_EXPR
, op_max
);
4376 set (MULT_EXPR
, op_mult
);
4377 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4378 set (FLOOR_DIV_EXPR
, op_floor_div
);
4379 set (ROUND_DIV_EXPR
, op_round_div
);
4380 set (CEIL_DIV_EXPR
, op_ceil_div
);
4381 set (EXACT_DIV_EXPR
, op_exact_div
);
4382 set (LSHIFT_EXPR
, op_lshift
);
4383 set (RSHIFT_EXPR
, op_rshift
);
4384 set (NOP_EXPR
, op_nop
);
4385 set (CONVERT_EXPR
, op_convert
);
4386 set (TRUTH_AND_EXPR
, op_logical_and
);
4387 set (BIT_AND_EXPR
, op_bitwise_and
);
4388 set (TRUTH_OR_EXPR
, op_logical_or
);
4389 set (BIT_IOR_EXPR
, op_bitwise_or
);
4390 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4391 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4392 set (TRUTH_NOT_EXPR
, op_logical_not
);
4393 set (BIT_NOT_EXPR
, op_bitwise_not
);
4394 set (INTEGER_CST
, op_integer_cst
);
4395 set (SSA_NAME
, op_ssa
);
4396 set (PAREN_EXPR
, op_paren
);
4397 set (OBJ_TYPE_REF
, op_obj_type
);
4398 set (IMAGPART_EXPR
, op_imagpart
);
4399 set (REALPART_EXPR
, op_realpart
);
4400 set (POINTER_DIFF_EXPR
, op_pointer_diff
);
4401 set (ABS_EXPR
, op_abs
);
4402 set (ABSU_EXPR
, op_absu
);
4403 set (NEGATE_EXPR
, op_negate
);
4404 set (ADDR_EXPR
, op_addr
);
4407 // Instantiate a range op table for pointer operations.
4409 class pointer_table
: public range_op_table
4413 } pointer_tree_table
;
4415 pointer_table::pointer_table ()
4417 set (BIT_AND_EXPR
, op_pointer_and
);
4418 set (BIT_IOR_EXPR
, op_pointer_or
);
4419 set (MIN_EXPR
, op_ptr_min
);
4420 set (MAX_EXPR
, op_ptr_max
);
4421 set (POINTER_PLUS_EXPR
, op_pointer_plus
);
4423 set (EQ_EXPR
, op_equal
);
4424 set (NE_EXPR
, op_not_equal
);
4425 set (LT_EXPR
, op_lt
);
4426 set (LE_EXPR
, op_le
);
4427 set (GT_EXPR
, op_gt
);
4428 set (GE_EXPR
, op_ge
);
4429 set (SSA_NAME
, op_ssa
);
4430 set (INTEGER_CST
, op_integer_cst
);
4431 set (ADDR_EXPR
, op_addr
);
4432 set (NOP_EXPR
, op_nop
);
4433 set (CONVERT_EXPR
, op_convert
);
4435 set (BIT_NOT_EXPR
, op_bitwise_not
);
4436 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4439 // The tables are hidden and accessed via a simple extern function.
4441 static inline range_operator
*
4442 get_handler (enum tree_code code
, tree type
)
4444 // First check if there is a pointer specialization.
4445 if (POINTER_TYPE_P (type
))
4446 return pointer_tree_table
[code
];
4447 if (INTEGRAL_TYPE_P (type
))
4448 return integral_tree_table
[code
];
4452 // Return the floating point operator for CODE or NULL if none available.
4454 static inline range_operator_float
*
4455 get_float_handler (enum tree_code code
, tree
)
4457 return (*floating_tree_table
)[code
];
4461 range_op_handler::set_op_handler (tree_code code
, tree type
)
4463 if (irange::supports_p (type
))
4466 m_int
= get_handler (code
, type
);
4467 m_valid
= m_int
!= NULL
;
4469 else if (frange::supports_p (type
))
4472 m_float
= get_float_handler (code
, type
);
4473 m_valid
= m_float
!= NULL
;
4483 range_op_handler::range_op_handler ()
4490 range_op_handler::range_op_handler (tree_code code
, tree type
)
4492 set_op_handler (code
, type
);
4497 range_op_handler::fold_range (vrange
&r
, tree type
,
4500 relation_trio rel
) const
4502 gcc_checking_assert (m_valid
);
4504 return m_int
->fold_range (as_a
<irange
> (r
), type
,
4506 as_a
<irange
> (rh
), rel
);
4508 if (is_a
<irange
> (r
))
4510 if (is_a
<irange
> (rh
))
4511 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4513 as_a
<irange
> (rh
), rel
);
4515 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4517 as_a
<frange
> (rh
), rel
);
4519 return m_float
->fold_range (as_a
<frange
> (r
), type
,
4521 as_a
<frange
> (rh
), rel
);
4525 range_op_handler::op1_range (vrange
&r
, tree type
,
4528 relation_trio rel
) const
4530 gcc_checking_assert (m_valid
);
4532 if (lhs
.undefined_p ())
4535 return m_int
->op1_range (as_a
<irange
> (r
), type
,
4536 as_a
<irange
> (lhs
),
4537 as_a
<irange
> (op2
), rel
);
4539 if (is_a
<irange
> (lhs
))
4540 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4541 as_a
<irange
> (lhs
),
4542 as_a
<frange
> (op2
), rel
);
4543 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4544 as_a
<frange
> (lhs
),
4545 as_a
<frange
> (op2
), rel
);
4549 range_op_handler::op2_range (vrange
&r
, tree type
,
4552 relation_trio rel
) const
4554 gcc_checking_assert (m_valid
);
4555 if (lhs
.undefined_p ())
4558 return m_int
->op2_range (as_a
<irange
> (r
), type
,
4559 as_a
<irange
> (lhs
),
4560 as_a
<irange
> (op1
), rel
);
4562 if (is_a
<irange
> (lhs
))
4563 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4564 as_a
<irange
> (lhs
),
4565 as_a
<frange
> (op1
), rel
);
4566 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4567 as_a
<frange
> (lhs
),
4568 as_a
<frange
> (op1
), rel
);
4572 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
4575 relation_kind rel
) const
4577 gcc_checking_assert (m_valid
);
4579 return m_int
->lhs_op1_relation (as_a
<irange
> (lhs
),
4580 as_a
<irange
> (op1
),
4581 as_a
<irange
> (op2
), rel
);
4583 if (is_a
<irange
> (lhs
))
4584 return m_float
->lhs_op1_relation (as_a
<irange
> (lhs
),
4585 as_a
<frange
> (op1
),
4586 as_a
<frange
> (op2
), rel
);
4587 return m_float
->lhs_op1_relation (as_a
<frange
> (lhs
),
4588 as_a
<frange
> (op1
),
4589 as_a
<frange
> (op2
), rel
);
4593 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
4596 relation_kind rel
) const
4598 gcc_checking_assert (m_valid
);
4600 return m_int
->lhs_op2_relation (as_a
<irange
> (lhs
),
4601 as_a
<irange
> (op1
),
4602 as_a
<irange
> (op2
), rel
);
4604 if (is_a
<irange
> (lhs
))
4605 return m_float
->lhs_op2_relation (as_a
<irange
> (lhs
),
4606 as_a
<frange
> (op1
),
4607 as_a
<frange
> (op2
), rel
);
4608 return m_float
->lhs_op2_relation (as_a
<frange
> (lhs
),
4609 as_a
<frange
> (op1
),
4610 as_a
<frange
> (op2
), rel
);
4614 range_op_handler::op1_op2_relation (const vrange
&lhs
) const
4616 gcc_checking_assert (m_valid
);
4618 return m_int
->op1_op2_relation (as_a
<irange
> (lhs
));
4619 if (is_a
<irange
> (lhs
))
4620 return m_float
->op1_op2_relation (as_a
<irange
> (lhs
));
4621 return m_float
->op1_op2_relation (as_a
<frange
> (lhs
));
4624 // Cast the range in R to TYPE.
4627 range_cast (vrange
&r
, tree type
)
4629 Value_Range
tmp (r
);
4630 Value_Range
varying (type
);
4631 varying
.set_varying (type
);
4632 range_op_handler
op (CONVERT_EXPR
, type
);
4633 // Call op_convert, if it fails, the result is varying.
4634 if (!op
|| !op
.fold_range (r
, type
, tmp
, varying
))
4636 r
.set_varying (type
);
4643 #include "selftest.h"
4647 #define INT(N) build_int_cst (integer_type_node, (N))
4648 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
4649 #define INT16(N) build_int_cst (short_integer_type_node, (N))
4650 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
4651 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
4652 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
4655 range_op_cast_tests ()
4657 int_range
<1> r0
, r1
, r2
, rold
;
4658 r0
.set_varying (integer_type_node
);
4659 tree maxint
= wide_int_to_tree (integer_type_node
, r0
.upper_bound ());
4661 // If a range is in any way outside of the range for the converted
4662 // to range, default to the range for the new type.
4663 r0
.set_varying (short_integer_type_node
);
4664 tree minshort
= wide_int_to_tree (short_integer_type_node
, r0
.lower_bound ());
4665 tree maxshort
= wide_int_to_tree (short_integer_type_node
, r0
.upper_bound ());
4666 if (TYPE_PRECISION (TREE_TYPE (maxint
))
4667 > TYPE_PRECISION (short_integer_type_node
))
4669 r1
= int_range
<1> (integer_zero_node
, maxint
);
4670 range_cast (r1
, short_integer_type_node
);
4671 ASSERT_TRUE (r1
.lower_bound () == wi::to_wide (minshort
)
4672 && r1
.upper_bound() == wi::to_wide (maxshort
));
4675 // (unsigned char)[-5,-1] => [251,255].
4676 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (-1));
4677 range_cast (r0
, unsigned_char_type_node
);
4678 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (251), UCHAR (255)));
4679 range_cast (r0
, signed_char_type_node
);
4680 ASSERT_TRUE (r0
== rold
);
4682 // (signed char)[15, 150] => [-128,-106][15,127].
4683 r0
= rold
= int_range
<1> (UCHAR (15), UCHAR (150));
4684 range_cast (r0
, signed_char_type_node
);
4685 r1
= int_range
<1> (SCHAR (15), SCHAR (127));
4686 r2
= int_range
<1> (SCHAR (-128), SCHAR (-106));
4688 ASSERT_TRUE (r1
== r0
);
4689 range_cast (r0
, unsigned_char_type_node
);
4690 ASSERT_TRUE (r0
== rold
);
4692 // (unsigned char)[-5, 5] => [0,5][251,255].
4693 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (5));
4694 range_cast (r0
, unsigned_char_type_node
);
4695 r1
= int_range
<1> (UCHAR (251), UCHAR (255));
4696 r2
= int_range
<1> (UCHAR (0), UCHAR (5));
4698 ASSERT_TRUE (r0
== r1
);
4699 range_cast (r0
, signed_char_type_node
);
4700 ASSERT_TRUE (r0
== rold
);
4702 // (unsigned char)[-5,5] => [0,5][251,255].
4703 r0
= int_range
<1> (INT (-5), INT (5));
4704 range_cast (r0
, unsigned_char_type_node
);
4705 r1
= int_range
<1> (UCHAR (0), UCHAR (5));
4706 r1
.union_ (int_range
<1> (UCHAR (251), UCHAR (255)));
4707 ASSERT_TRUE (r0
== r1
);
4709 // (unsigned char)[5U,1974U] => [0,255].
4710 r0
= int_range
<1> (UINT (5), UINT (1974));
4711 range_cast (r0
, unsigned_char_type_node
);
4712 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (0), UCHAR (255)));
4713 range_cast (r0
, integer_type_node
);
4714 // Going to a wider range should not sign extend.
4715 ASSERT_TRUE (r0
== int_range
<1> (INT (0), INT (255)));
4717 // (unsigned char)[-350,15] => [0,255].
4718 r0
= int_range
<1> (INT (-350), INT (15));
4719 range_cast (r0
, unsigned_char_type_node
);
4720 ASSERT_TRUE (r0
== (int_range
<1>
4721 (TYPE_MIN_VALUE (unsigned_char_type_node
),
4722 TYPE_MAX_VALUE (unsigned_char_type_node
))));
4724 // Casting [-120,20] from signed char to unsigned short.
4725 // => [0, 20][0xff88, 0xffff].
4726 r0
= int_range
<1> (SCHAR (-120), SCHAR (20));
4727 range_cast (r0
, short_unsigned_type_node
);
4728 r1
= int_range
<1> (UINT16 (0), UINT16 (20));
4729 r2
= int_range
<1> (UINT16 (0xff88), UINT16 (0xffff));
4731 ASSERT_TRUE (r0
== r1
);
4732 // A truncating cast back to signed char will work because [-120, 20]
4733 // is representable in signed char.
4734 range_cast (r0
, signed_char_type_node
);
4735 ASSERT_TRUE (r0
== int_range
<1> (SCHAR (-120), SCHAR (20)));
4737 // unsigned char -> signed short
4738 // (signed short)[(unsigned char)25, (unsigned char)250]
4739 // => [(signed short)25, (signed short)250]
4740 r0
= rold
= int_range
<1> (UCHAR (25), UCHAR (250));
4741 range_cast (r0
, short_integer_type_node
);
4742 r1
= int_range
<1> (INT16 (25), INT16 (250));
4743 ASSERT_TRUE (r0
== r1
);
4744 range_cast (r0
, unsigned_char_type_node
);
4745 ASSERT_TRUE (r0
== rold
);
4747 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
4748 r0
= int_range
<1> (TYPE_MIN_VALUE (long_long_integer_type_node
),
4749 TYPE_MAX_VALUE (long_long_integer_type_node
));
4750 range_cast (r0
, short_unsigned_type_node
);
4751 r1
= int_range
<1> (TYPE_MIN_VALUE (short_unsigned_type_node
),
4752 TYPE_MAX_VALUE (short_unsigned_type_node
));
4753 ASSERT_TRUE (r0
== r1
);
4755 // Casting NONZERO to a narrower type will wrap/overflow so
4756 // it's just the entire range for the narrower type.
4758 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4759 // is outside of the range of a smaller range, return the full
4761 if (TYPE_PRECISION (integer_type_node
)
4762 > TYPE_PRECISION (short_integer_type_node
))
4764 r0
= range_nonzero (integer_type_node
);
4765 range_cast (r0
, short_integer_type_node
);
4766 r1
= int_range
<1> (TYPE_MIN_VALUE (short_integer_type_node
),
4767 TYPE_MAX_VALUE (short_integer_type_node
));
4768 ASSERT_TRUE (r0
== r1
);
4771 // Casting NONZERO from a narrower signed to a wider signed.
4773 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4774 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4775 r0
= range_nonzero (short_integer_type_node
);
4776 range_cast (r0
, integer_type_node
);
4777 r1
= int_range
<1> (INT (-32768), INT (-1));
4778 r2
= int_range
<1> (INT (1), INT (32767));
4780 ASSERT_TRUE (r0
== r1
);
4784 range_op_lshift_tests ()
4786 // Test that 0x808.... & 0x8.... still contains 0x8....
4787 // for a large set of numbers.
4790 tree big_type
= long_long_unsigned_type_node
;
4791 // big_num = 0x808,0000,0000,0000
4792 tree big_num
= fold_build2 (LSHIFT_EXPR
, big_type
,
4793 build_int_cst (big_type
, 0x808),
4794 build_int_cst (big_type
, 48));
4795 op_bitwise_and
.fold_range (res
, big_type
,
4796 int_range
<1> (big_type
),
4797 int_range
<1> (big_num
, big_num
));
4798 // val = 0x8,0000,0000,0000
4799 tree val
= fold_build2 (LSHIFT_EXPR
, big_type
,
4800 build_int_cst (big_type
, 0x8),
4801 build_int_cst (big_type
, 48));
4802 ASSERT_TRUE (res
.contains_p (val
));
4805 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4807 // unsigned VARYING = op1 << 1 should be VARYING.
4808 int_range
<2> lhs (unsigned_type_node
);
4809 int_range
<2> shift (INT (1), INT (1));
4811 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4812 ASSERT_TRUE (op1
.varying_p ());
4814 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4815 int_range
<2> zero (UINT (0), UINT (0));
4816 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4817 ASSERT_TRUE (op1
.num_pairs () == 2);
4818 // Remove the [0,0] range.
4819 op1
.intersect (zero
);
4820 ASSERT_TRUE (op1
.num_pairs () == 1);
4821 // op1 << 1 should be [0x8000,0x8000] << 1,
4822 // which should result in [0,0].
4823 int_range_max result
;
4824 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4825 ASSERT_TRUE (result
== zero
);
4827 // signed VARYING = op1 << 1 should be VARYING.
4828 if (TYPE_PRECISION (integer_type_node
) > 31)
4830 // unsigned VARYING = op1 << 1 hould be VARYING.
4831 int_range
<2> lhs (integer_type_node
);
4832 int_range
<2> shift (INT (1), INT (1));
4834 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4835 ASSERT_TRUE (op1
.varying_p ());
4837 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4838 int_range
<2> zero (INT (0), INT (0));
4839 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4840 ASSERT_TRUE (op1
.num_pairs () == 2);
4841 // Remove the [0,0] range.
4842 op1
.intersect (zero
);
4843 ASSERT_TRUE (op1
.num_pairs () == 1);
4844 // op1 << 1 shuould be [0x8000,0x8000] << 1,
4845 // which should result in [0,0].
4846 int_range_max result
;
4847 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4848 ASSERT_TRUE (result
== zero
);
4853 range_op_rshift_tests ()
4855 // unsigned: [3, MAX] = OP1 >> 1
4857 int_range_max
lhs (build_int_cst (unsigned_type_node
, 3),
4858 TYPE_MAX_VALUE (unsigned_type_node
));
4859 int_range_max
one (build_one_cst (unsigned_type_node
),
4860 build_one_cst (unsigned_type_node
));
4862 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4863 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4866 // signed: [3, MAX] = OP1 >> 1
4868 int_range_max
lhs (INT (3), TYPE_MAX_VALUE (integer_type_node
));
4869 int_range_max
one (INT (1), INT (1));
4871 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4872 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4875 // This is impossible, so OP1 should be [].
4876 // signed: [MIN, MIN] = OP1 >> 1
4878 int_range_max
lhs (TYPE_MIN_VALUE (integer_type_node
),
4879 TYPE_MIN_VALUE (integer_type_node
));
4880 int_range_max
one (INT (1), INT (1));
4882 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4883 ASSERT_TRUE (op1
.undefined_p ());
4886 // signed: ~[-1] = OP1 >> 31
4887 if (TYPE_PRECISION (integer_type_node
) > 31)
4889 int_range_max
lhs (INT (-1), INT (-1), VR_ANTI_RANGE
);
4890 int_range_max
shift (INT (31), INT (31));
4892 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4893 int_range_max negatives
= range_negatives (integer_type_node
);
4894 negatives
.intersect (op1
);
4895 ASSERT_TRUE (negatives
.undefined_p ());
4900 range_op_bitwise_and_tests ()
4903 tree min
= vrp_val_min (integer_type_node
);
4904 tree max
= vrp_val_max (integer_type_node
);
4905 tree tiny
= fold_build2 (PLUS_EXPR
, integer_type_node
, min
,
4906 build_one_cst (integer_type_node
));
4907 int_range_max
i1 (tiny
, max
);
4908 int_range_max
i2 (build_int_cst (integer_type_node
, 255),
4909 build_int_cst (integer_type_node
, 255));
4911 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4912 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4913 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4915 // VARYING = OP1 & 255: OP1 is VARYING
4916 i1
= int_range
<1> (integer_type_node
);
4917 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4918 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4920 // For 0 = x & MASK, x is ~MASK.
4922 int_range
<2> zero (integer_zero_node
, integer_zero_node
);
4923 int_range
<2> mask
= int_range
<2> (INT (7), INT (7));
4924 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4925 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4926 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4929 // (NONZERO | X) is nonzero.
4930 i1
.set_nonzero (integer_type_node
);
4931 i2
.set_varying (integer_type_node
);
4932 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4933 ASSERT_TRUE (res
.nonzero_p ());
4935 // (NEGATIVE | X) is nonzero.
4936 i1
= int_range
<1> (INT (-5), INT (-3));
4937 i2
.set_varying (integer_type_node
);
4938 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4939 ASSERT_FALSE (res
.contains_p (INT (0)));
4943 range_relational_tests ()
4945 int_range
<2> lhs (unsigned_char_type_node
);
4946 int_range
<2> op1 (UCHAR (8), UCHAR (10));
4947 int_range
<2> op2 (UCHAR (20), UCHAR (20));
4949 // Never wrapping additions mean LHS > OP1.
4950 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4951 ASSERT_TRUE (code
== VREL_GT
);
4953 // Most wrapping additions mean nothing...
4954 op1
= int_range
<2> (UCHAR (8), UCHAR (10));
4955 op2
= int_range
<2> (UCHAR (0), UCHAR (255));
4956 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4957 ASSERT_TRUE (code
== VREL_VARYING
);
4959 // However, always wrapping additions mean LHS < OP1.
4960 op1
= int_range
<2> (UCHAR (1), UCHAR (255));
4961 op2
= int_range
<2> (UCHAR (255), UCHAR (255));
4962 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4963 ASSERT_TRUE (code
== VREL_LT
);
4969 range_op_rshift_tests ();
4970 range_op_lshift_tests ();
4971 range_op_bitwise_and_tests ();
4972 range_op_cast_tests ();
4973 range_relational_tests ();
4975 extern void range_op_float_tests ();
4976 range_op_float_tests ();
4979 } // namespace selftest
4981 #endif // CHECKING_P