1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
51 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
54 irange_to_masked_value (const irange
&r
, widest_int
&value
, widest_int
&mask
)
59 value
= widest_int::from (r
.lower_bound (), TYPE_SIGN (r
.type ()));
63 mask
= widest_int::from (r
.get_nonzero_bits (), TYPE_SIGN (r
.type ()));
68 // Update the known bitmasks in R when applying the operation CODE to
72 update_known_bitmask (irange
&r
, tree_code code
,
73 const irange
&lh
, const irange
&rh
)
75 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ())
78 widest_int value
, mask
, lh_mask
, rh_mask
, lh_value
, rh_value
;
79 tree type
= r
.type ();
80 signop sign
= TYPE_SIGN (type
);
81 int prec
= TYPE_PRECISION (type
);
82 signop lh_sign
= TYPE_SIGN (lh
.type ());
83 signop rh_sign
= TYPE_SIGN (rh
.type ());
84 int lh_prec
= TYPE_PRECISION (lh
.type ());
85 int rh_prec
= TYPE_PRECISION (rh
.type ());
87 irange_to_masked_value (lh
, lh_value
, lh_mask
);
88 irange_to_masked_value (rh
, rh_value
, rh_mask
);
89 bit_value_binop (code
, sign
, prec
, &value
, &mask
,
90 lh_sign
, lh_prec
, lh_value
, lh_mask
,
91 rh_sign
, rh_prec
, rh_value
, rh_mask
);
92 r
.set_nonzero_bits (value
| mask
);
95 // Return the upper limit for a type.
97 static inline wide_int
98 max_limit (const_tree type
)
100 return wi::max_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
103 // Return the lower limit for a type.
105 static inline wide_int
106 min_limit (const_tree type
)
108 return wi::min_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
111 // Return false if shifting by OP is undefined behavior. Otherwise, return
112 // true and the range it is to be shifted by. This allows trimming out of
113 // undefined ranges, leaving only valid ranges if there are any.
116 get_shift_range (irange
&r
, tree type
, const irange
&op
)
118 if (op
.undefined_p ())
121 // Build valid range and intersect it with the shift range.
122 r
= value_range (build_int_cst_type (op
.type (), 0),
123 build_int_cst_type (op
.type (), TYPE_PRECISION (type
) - 1));
126 // If there are no valid ranges in the shift range, returned false.
127 if (r
.undefined_p ())
132 // Return TRUE if 0 is within [WMIN, WMAX].
135 wi_includes_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
137 signop sign
= TYPE_SIGN (type
);
138 return wi::le_p (wmin
, 0, sign
) && wi::ge_p (wmax
, 0, sign
);
141 // Return TRUE if [WMIN, WMAX] is the singleton 0.
144 wi_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
146 unsigned prec
= TYPE_PRECISION (type
);
147 return wmin
== wmax
&& wi::eq_p (wmin
, wi::zero (prec
));
150 // Default wide_int fold operation returns [MIN, MAX].
153 range_operator::wi_fold (irange
&r
, tree type
,
154 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
155 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
156 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
157 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
159 gcc_checking_assert (r
.supports_type_p (type
));
160 r
.set_varying (type
);
163 // Call wi_fold when both op1 and op2 are equivalent. Further split small
164 // subranges into constants. This can provide better precision.
165 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
166 // [0,0][2, 2][4,4][6, 6][8, 8]
167 // LIMIT is the maximum number of elements in range allowed before we
168 // do not process them individually.
171 range_operator::wi_fold_in_parts_equiv (irange
&r
, tree type
,
172 const wide_int
&lh_lb
,
173 const wide_int
&lh_ub
,
174 unsigned limit
) const
177 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
178 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
179 // if there are 1 to 8 values in the LH range, split them up.
181 if (lh_range
>= 0 && lh_range
< limit
)
183 for (unsigned x
= 0; x
<= lh_range
; x
++)
185 wide_int val
= lh_lb
+ x
;
186 wi_fold (tmp
, type
, val
, val
, val
, val
);
190 // Otherwise just call wi_fold.
192 wi_fold (r
, type
, lh_lb
, lh_ub
, lh_lb
, lh_ub
);
195 // Call wi_fold, except further split small subranges into constants.
196 // This can provide better precision. For something 8 >> [0,1]
197 // Instead of [8, 16], we will produce [8,8][16,16]
200 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
201 const wide_int
&lh_lb
,
202 const wide_int
&lh_ub
,
203 const wide_int
&rh_lb
,
204 const wide_int
&rh_ub
) const
207 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
208 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
209 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
210 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
211 // If there are 2, 3, or 4 values in the RH range, do them separately.
212 // Call wi_fold_in_parts to check the RH side.
213 if (rh_range
> 0 && rh_range
< 4)
215 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
218 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
222 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
226 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
229 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
230 // The RH side has been checked, so no recursion needed.
231 else if (lh_range
> 0 && lh_range
< 4)
233 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
236 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
240 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
244 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
247 // Otherwise just call wi_fold.
249 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
252 // The default for fold is to break all ranges into sub-ranges and
253 // invoke the wi_fold method on each sub-range pair.
256 range_operator::fold_range (irange
&r
, tree type
,
259 relation_trio trio
) const
261 gcc_checking_assert (r
.supports_type_p (type
));
262 if (empty_range_varying (r
, type
, lh
, rh
))
265 relation_kind rel
= trio
.op1_op2 ();
266 unsigned num_lh
= lh
.num_pairs ();
267 unsigned num_rh
= rh
.num_pairs ();
269 // If op1 and op2 are equivalences, then we don't need a complete cross
270 // product, just pairs of matching elements.
271 if (relation_equiv_p (rel
) && lh
== rh
)
275 for (unsigned x
= 0; x
< num_lh
; ++x
)
277 // If the number of subranges is too high, limit subrange creation.
278 unsigned limit
= (r
.num_pairs () > 32) ? 0 : 8;
279 wide_int lh_lb
= lh
.lower_bound (x
);
280 wide_int lh_ub
= lh
.upper_bound (x
);
281 wi_fold_in_parts_equiv (tmp
, type
, lh_lb
, lh_ub
, limit
);
286 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
287 update_known_bitmask (r
, m_code
, lh
, rh
);
291 // If both ranges are single pairs, fold directly into the result range.
292 // If the number of subranges grows too high, produce a summary result as the
293 // loop becomes exponential with little benefit. See PR 103821.
294 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
296 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
297 rh
.lower_bound (), rh
.upper_bound ());
298 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
299 update_known_bitmask (r
, m_code
, lh
, rh
);
305 for (unsigned x
= 0; x
< num_lh
; ++x
)
306 for (unsigned y
= 0; y
< num_rh
; ++y
)
308 wide_int lh_lb
= lh
.lower_bound (x
);
309 wide_int lh_ub
= lh
.upper_bound (x
);
310 wide_int rh_lb
= rh
.lower_bound (y
);
311 wide_int rh_ub
= rh
.upper_bound (y
);
312 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
316 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
317 update_known_bitmask (r
, m_code
, lh
, rh
);
321 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
322 update_known_bitmask (r
, m_code
, lh
, rh
);
326 // The default for op1_range is to return false.
329 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
330 tree type ATTRIBUTE_UNUSED
,
331 const irange
&lhs ATTRIBUTE_UNUSED
,
332 const irange
&op2 ATTRIBUTE_UNUSED
,
338 // The default for op2_range is to return false.
341 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
342 tree type ATTRIBUTE_UNUSED
,
343 const irange
&lhs ATTRIBUTE_UNUSED
,
344 const irange
&op1 ATTRIBUTE_UNUSED
,
350 // The default relation routines return VREL_VARYING.
353 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
354 const irange
&op1 ATTRIBUTE_UNUSED
,
355 const irange
&op2 ATTRIBUTE_UNUSED
,
356 relation_kind rel ATTRIBUTE_UNUSED
) const
362 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
363 const irange
&op1 ATTRIBUTE_UNUSED
,
364 const irange
&op2 ATTRIBUTE_UNUSED
,
365 relation_kind rel ATTRIBUTE_UNUSED
) const
371 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
) const
376 // Default is no relation affects the LHS.
379 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
380 tree type ATTRIBUTE_UNUSED
,
381 const irange
&op1_range ATTRIBUTE_UNUSED
,
382 const irange
&op2_range ATTRIBUTE_UNUSED
,
383 relation_kind rel ATTRIBUTE_UNUSED
) const
388 // Create and return a range from a pair of wide-ints that are known
389 // to have overflowed (or underflowed).
392 value_range_from_overflowed_bounds (irange
&r
, tree type
,
393 const wide_int
&wmin
,
394 const wide_int
&wmax
)
396 const signop sgn
= TYPE_SIGN (type
);
397 const unsigned int prec
= TYPE_PRECISION (type
);
399 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
400 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
405 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
408 if (wi::cmp (tmax
, tem
, sgn
) > 0)
411 // If the anti-range would cover nothing, drop to varying.
412 // Likewise if the anti-range bounds are outside of the types
414 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
415 r
.set_varying (type
);
418 tree tree_min
= wide_int_to_tree (type
, tmin
);
419 tree tree_max
= wide_int_to_tree (type
, tmax
);
420 r
.set (tree_min
, tree_max
, VR_ANTI_RANGE
);
424 // Create and return a range from a pair of wide-ints. MIN_OVF and
425 // MAX_OVF describe any overflow that might have occurred while
426 // calculating WMIN and WMAX respectively.
429 value_range_with_overflow (irange
&r
, tree type
,
430 const wide_int
&wmin
, const wide_int
&wmax
,
431 wi::overflow_type min_ovf
= wi::OVF_NONE
,
432 wi::overflow_type max_ovf
= wi::OVF_NONE
)
434 const signop sgn
= TYPE_SIGN (type
);
435 const unsigned int prec
= TYPE_PRECISION (type
);
436 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
438 // For one bit precision if max != min, then the range covers all
440 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
442 r
.set_varying (type
);
448 // If overflow wraps, truncate the values and adjust the range,
449 // kind, and bounds appropriately.
450 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
452 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
453 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
454 // If the limits are swapped, we wrapped around and cover
456 if (wi::gt_p (tmin
, tmax
, sgn
))
457 r
.set_varying (type
);
459 // No overflow or both overflow or underflow. The range
460 // kind stays normal.
461 r
.set (wide_int_to_tree (type
, tmin
),
462 wide_int_to_tree (type
, tmax
));
466 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
467 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
468 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
470 // Other underflow and/or overflow, drop to VR_VARYING.
471 r
.set_varying (type
);
475 // If both bounds either underflowed or overflowed, then the result
477 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
478 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
484 // If overflow does not wrap, saturate to [MIN, MAX].
485 wide_int new_lb
, new_ub
;
486 if (min_ovf
== wi::OVF_UNDERFLOW
)
487 new_lb
= wi::min_value (prec
, sgn
);
488 else if (min_ovf
== wi::OVF_OVERFLOW
)
489 new_lb
= wi::max_value (prec
, sgn
);
493 if (max_ovf
== wi::OVF_UNDERFLOW
)
494 new_ub
= wi::min_value (prec
, sgn
);
495 else if (max_ovf
== wi::OVF_OVERFLOW
)
496 new_ub
= wi::max_value (prec
, sgn
);
500 r
.set (wide_int_to_tree (type
, new_lb
),
501 wide_int_to_tree (type
, new_ub
));
505 // Create and return a range from a pair of wide-ints. Canonicalize
506 // the case where the bounds are swapped. In which case, we transform
507 // [10,5] into [MIN,5][10,MAX].
510 create_possibly_reversed_range (irange
&r
, tree type
,
511 const wide_int
&new_lb
, const wide_int
&new_ub
)
513 signop s
= TYPE_SIGN (type
);
514 // If the bounds are swapped, treat the result as if an overflow occurred.
515 if (wi::gt_p (new_lb
, new_ub
, s
))
516 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
518 // Otherwise it's just a normal range.
519 r
.set (wide_int_to_tree (type
, new_lb
), wide_int_to_tree (type
, new_ub
));
522 // Return the summary information about boolean range LHS. If EMPTY/FULL,
523 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
526 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
528 // If there is no result, then this is unexecutable.
529 if (lhs
.undefined_p ())
538 // For TRUE, we can't just test for [1,1] because Ada can have
539 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
540 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
542 r
.set_varying (val_type
);
550 class operator_equal
: public range_operator
552 using range_operator::fold_range
;
553 using range_operator::op1_range
;
554 using range_operator::op2_range
;
556 virtual bool fold_range (irange
&r
, tree type
,
559 relation_trio
= TRIO_VARYING
) const;
560 virtual bool op1_range (irange
&r
, tree type
,
563 relation_trio
= TRIO_VARYING
) const;
564 virtual bool op2_range (irange
&r
, tree type
,
567 relation_trio
= TRIO_VARYING
) const;
568 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
571 // Check if the LHS range indicates a relation between OP1 and OP2.
574 equal_op1_op2_relation (const irange
&lhs
)
576 if (lhs
.undefined_p ())
577 return VREL_UNDEFINED
;
579 // FALSE = op1 == op2 indicates NE_EXPR.
583 // TRUE = op1 == op2 indicates EQ_EXPR.
584 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
590 operator_equal::op1_op2_relation (const irange
&lhs
) const
592 return equal_op1_op2_relation (lhs
);
597 operator_equal::fold_range (irange
&r
, tree type
,
600 relation_trio rel
) const
602 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
605 // We can be sure the values are always equal or not if both ranges
606 // consist of a single value, and then compare them.
607 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
608 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
610 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
611 r
= range_true (type
);
613 r
= range_false (type
);
617 // If ranges do not intersect, we know the range is not equal,
618 // otherwise we don't know anything for sure.
619 int_range_max tmp
= op1
;
621 if (tmp
.undefined_p ())
622 r
= range_false (type
);
624 r
= range_true_and_false (type
);
630 operator_equal::op1_range (irange
&r
, tree type
,
635 switch (get_bool_state (r
, lhs
, type
))
638 // If it's true, the result is the same as OP2.
643 // If the result is false, the only time we know anything is
644 // if OP2 is a constant.
645 if (!op2
.undefined_p ()
646 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
652 r
.set_varying (type
);
662 operator_equal::op2_range (irange
&r
, tree type
,
665 relation_trio rel
) const
667 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
670 class operator_not_equal
: public range_operator
672 using range_operator::fold_range
;
673 using range_operator::op1_range
;
674 using range_operator::op2_range
;
676 virtual bool fold_range (irange
&r
, tree type
,
679 relation_trio
= TRIO_VARYING
) const;
680 virtual bool op1_range (irange
&r
, tree type
,
683 relation_trio
= TRIO_VARYING
) const;
684 virtual bool op2_range (irange
&r
, tree type
,
687 relation_trio
= TRIO_VARYING
) const;
688 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
691 // Check if the LHS range indicates a relation between OP1 and OP2.
694 not_equal_op1_op2_relation (const irange
&lhs
)
696 if (lhs
.undefined_p ())
697 return VREL_UNDEFINED
;
699 // FALSE = op1 != op2 indicates EQ_EXPR.
703 // TRUE = op1 != op2 indicates NE_EXPR.
704 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
710 operator_not_equal::op1_op2_relation (const irange
&lhs
) const
712 return not_equal_op1_op2_relation (lhs
);
716 operator_not_equal::fold_range (irange
&r
, tree type
,
719 relation_trio rel
) const
721 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
724 // We can be sure the values are always equal or not if both ranges
725 // consist of a single value, and then compare them.
726 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
727 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
729 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
730 r
= range_true (type
);
732 r
= range_false (type
);
736 // If ranges do not intersect, we know the range is not equal,
737 // otherwise we don't know anything for sure.
738 int_range_max tmp
= op1
;
740 if (tmp
.undefined_p ())
741 r
= range_true (type
);
743 r
= range_true_and_false (type
);
749 operator_not_equal::op1_range (irange
&r
, tree type
,
754 switch (get_bool_state (r
, lhs
, type
))
757 // If the result is true, the only time we know anything is if
758 // OP2 is a constant.
759 if (!op2
.undefined_p ()
760 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
766 r
.set_varying (type
);
770 // If it's false, the result is the same as OP2.
782 operator_not_equal::op2_range (irange
&r
, tree type
,
785 relation_trio rel
) const
787 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
790 // (X < VAL) produces the range of [MIN, VAL - 1].
793 build_lt (irange
&r
, tree type
, const wide_int
&val
)
795 wi::overflow_type ov
;
797 signop sgn
= TYPE_SIGN (type
);
799 // Signed 1 bit cannot represent 1 for subtraction.
801 lim
= wi::add (val
, -1, sgn
, &ov
);
803 lim
= wi::sub (val
, 1, sgn
, &ov
);
805 // If val - 1 underflows, check if X < MIN, which is an empty range.
809 r
= int_range
<1> (type
, min_limit (type
), lim
);
812 // (X <= VAL) produces the range of [MIN, VAL].
815 build_le (irange
&r
, tree type
, const wide_int
&val
)
817 r
= int_range
<1> (type
, min_limit (type
), val
);
820 // (X > VAL) produces the range of [VAL + 1, MAX].
823 build_gt (irange
&r
, tree type
, const wide_int
&val
)
825 wi::overflow_type ov
;
827 signop sgn
= TYPE_SIGN (type
);
829 // Signed 1 bit cannot represent 1 for addition.
831 lim
= wi::sub (val
, -1, sgn
, &ov
);
833 lim
= wi::add (val
, 1, sgn
, &ov
);
834 // If val + 1 overflows, check is for X > MAX, which is an empty range.
838 r
= int_range
<1> (type
, lim
, max_limit (type
));
841 // (X >= val) produces the range of [VAL, MAX].
844 build_ge (irange
&r
, tree type
, const wide_int
&val
)
846 r
= int_range
<1> (type
, val
, max_limit (type
));
850 class operator_lt
: public range_operator
852 using range_operator::fold_range
;
853 using range_operator::op1_range
;
854 using range_operator::op2_range
;
856 virtual bool fold_range (irange
&r
, tree type
,
859 relation_trio
= TRIO_VARYING
) const;
860 virtual bool op1_range (irange
&r
, tree type
,
863 relation_trio
= TRIO_VARYING
) const;
864 virtual bool op2_range (irange
&r
, tree type
,
867 relation_trio
= TRIO_VARYING
) const;
868 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
871 // Check if the LHS range indicates a relation between OP1 and OP2.
874 lt_op1_op2_relation (const irange
&lhs
)
876 if (lhs
.undefined_p ())
877 return VREL_UNDEFINED
;
879 // FALSE = op1 < op2 indicates GE_EXPR.
883 // TRUE = op1 < op2 indicates LT_EXPR.
884 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
890 operator_lt::op1_op2_relation (const irange
&lhs
) const
892 return lt_op1_op2_relation (lhs
);
896 operator_lt::fold_range (irange
&r
, tree type
,
899 relation_trio rel
) const
901 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
904 signop sign
= TYPE_SIGN (op1
.type ());
905 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
907 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
908 r
= range_true (type
);
909 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
910 r
= range_false (type
);
911 // Use nonzero bits to determine if < 0 is false.
912 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
913 r
= range_false (type
);
915 r
= range_true_and_false (type
);
920 operator_lt::op1_range (irange
&r
, tree type
,
925 if (op2
.undefined_p ())
928 switch (get_bool_state (r
, lhs
, type
))
931 build_lt (r
, type
, op2
.upper_bound ());
935 build_ge (r
, type
, op2
.lower_bound ());
945 operator_lt::op2_range (irange
&r
, tree type
,
950 if (op1
.undefined_p ())
953 switch (get_bool_state (r
, lhs
, type
))
956 build_gt (r
, type
, op1
.lower_bound ());
960 build_le (r
, type
, op1
.upper_bound ());
970 class operator_le
: public range_operator
972 using range_operator::fold_range
;
973 using range_operator::op1_range
;
974 using range_operator::op2_range
;
976 virtual bool fold_range (irange
&r
, tree type
,
979 relation_trio
= TRIO_VARYING
) const;
980 virtual bool op1_range (irange
&r
, tree type
,
983 relation_trio
= TRIO_VARYING
) const;
984 virtual bool op2_range (irange
&r
, tree type
,
987 relation_trio
= TRIO_VARYING
) const;
988 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
991 // Check if the LHS range indicates a relation between OP1 and OP2.
994 le_op1_op2_relation (const irange
&lhs
)
996 if (lhs
.undefined_p ())
997 return VREL_UNDEFINED
;
999 // FALSE = op1 <= op2 indicates GT_EXPR.
1003 // TRUE = op1 <= op2 indicates LE_EXPR.
1004 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1006 return VREL_VARYING
;
1010 operator_le::op1_op2_relation (const irange
&lhs
) const
1012 return le_op1_op2_relation (lhs
);
1016 operator_le::fold_range (irange
&r
, tree type
,
1019 relation_trio rel
) const
1021 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
1024 signop sign
= TYPE_SIGN (op1
.type ());
1025 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1027 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1028 r
= range_true (type
);
1029 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1030 r
= range_false (type
);
1032 r
= range_true_and_false (type
);
1037 operator_le::op1_range (irange
&r
, tree type
,
1040 relation_trio
) const
1042 if (op2
.undefined_p ())
1045 switch (get_bool_state (r
, lhs
, type
))
1048 build_le (r
, type
, op2
.upper_bound ());
1052 build_gt (r
, type
, op2
.lower_bound ());
1062 operator_le::op2_range (irange
&r
, tree type
,
1065 relation_trio
) const
1067 if (op1
.undefined_p ())
1070 switch (get_bool_state (r
, lhs
, type
))
1073 build_ge (r
, type
, op1
.lower_bound ());
1077 build_lt (r
, type
, op1
.upper_bound ());
1087 class operator_gt
: public range_operator
1089 using range_operator::fold_range
;
1090 using range_operator::op1_range
;
1091 using range_operator::op2_range
;
1093 virtual bool fold_range (irange
&r
, tree type
,
1096 relation_trio
= TRIO_VARYING
) const;
1097 virtual bool op1_range (irange
&r
, tree type
,
1100 relation_trio
= TRIO_VARYING
) const;
1101 virtual bool op2_range (irange
&r
, tree type
,
1104 relation_trio
= TRIO_VARYING
) const;
1105 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1108 // Check if the LHS range indicates a relation between OP1 and OP2.
1111 gt_op1_op2_relation (const irange
&lhs
)
1113 if (lhs
.undefined_p ())
1114 return VREL_UNDEFINED
;
1116 // FALSE = op1 > op2 indicates LE_EXPR.
1120 // TRUE = op1 > op2 indicates GT_EXPR.
1121 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1123 return VREL_VARYING
;
1127 operator_gt::op1_op2_relation (const irange
&lhs
) const
1129 return gt_op1_op2_relation (lhs
);
1134 operator_gt::fold_range (irange
&r
, tree type
,
1135 const irange
&op1
, const irange
&op2
,
1136 relation_trio rel
) const
1138 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1141 signop sign
= TYPE_SIGN (op1
.type ());
1142 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1144 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1145 r
= range_true (type
);
1146 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1147 r
= range_false (type
);
1149 r
= range_true_and_false (type
);
1154 operator_gt::op1_range (irange
&r
, tree type
,
1155 const irange
&lhs
, const irange
&op2
,
1156 relation_trio
) const
1158 if (op2
.undefined_p ())
1161 switch (get_bool_state (r
, lhs
, type
))
1164 build_gt (r
, type
, op2
.lower_bound ());
1168 build_le (r
, type
, op2
.upper_bound ());
1178 operator_gt::op2_range (irange
&r
, tree type
,
1181 relation_trio
) const
1183 if (op1
.undefined_p ())
1186 switch (get_bool_state (r
, lhs
, type
))
1189 build_lt (r
, type
, op1
.upper_bound ());
1193 build_ge (r
, type
, op1
.lower_bound ());
1203 class operator_ge
: public range_operator
1205 using range_operator::fold_range
;
1206 using range_operator::op1_range
;
1207 using range_operator::op2_range
;
1209 virtual bool fold_range (irange
&r
, tree type
,
1212 relation_trio
= TRIO_VARYING
) const;
1213 virtual bool op1_range (irange
&r
, tree type
,
1216 relation_trio
= TRIO_VARYING
) const;
1217 virtual bool op2_range (irange
&r
, tree type
,
1220 relation_trio
= TRIO_VARYING
) const;
1221 virtual relation_kind
op1_op2_relation (const irange
&lhs
) const;
1224 // Check if the LHS range indicates a relation between OP1 and OP2.
1227 ge_op1_op2_relation (const irange
&lhs
)
1229 if (lhs
.undefined_p ())
1230 return VREL_UNDEFINED
;
1232 // FALSE = op1 >= op2 indicates LT_EXPR.
1236 // TRUE = op1 >= op2 indicates GE_EXPR.
1237 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
1239 return VREL_VARYING
;
1243 operator_ge::op1_op2_relation (const irange
&lhs
) const
1245 return ge_op1_op2_relation (lhs
);
1249 operator_ge::fold_range (irange
&r
, tree type
,
1252 relation_trio rel
) const
1254 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1257 signop sign
= TYPE_SIGN (op1
.type ());
1258 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1260 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1261 r
= range_true (type
);
1262 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1263 r
= range_false (type
);
1265 r
= range_true_and_false (type
);
1270 operator_ge::op1_range (irange
&r
, tree type
,
1273 relation_trio
) const
1275 if (op2
.undefined_p ())
1278 switch (get_bool_state (r
, lhs
, type
))
1281 build_ge (r
, type
, op2
.lower_bound ());
1285 build_lt (r
, type
, op2
.upper_bound ());
1295 operator_ge::op2_range (irange
&r
, tree type
,
1298 relation_trio
) const
1300 if (op1
.undefined_p ())
1303 switch (get_bool_state (r
, lhs
, type
))
1306 build_le (r
, type
, op1
.upper_bound ());
1310 build_gt (r
, type
, op1
.lower_bound ());
1320 class operator_plus
: public range_operator
1322 using range_operator::op1_range
;
1323 using range_operator::op2_range
;
1324 using range_operator::lhs_op1_relation
;
1325 using range_operator::lhs_op2_relation
;
1327 virtual bool op1_range (irange
&r
, tree type
,
1330 relation_trio
) const;
1331 virtual bool op2_range (irange
&r
, tree type
,
1334 relation_trio
) const;
1335 virtual void wi_fold (irange
&r
, tree type
,
1336 const wide_int
&lh_lb
,
1337 const wide_int
&lh_ub
,
1338 const wide_int
&rh_lb
,
1339 const wide_int
&rh_ub
) const;
1340 virtual relation_kind
lhs_op1_relation (const irange
&lhs
, const irange
&op1
,
1342 relation_kind rel
) const;
1343 virtual relation_kind
lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1345 relation_kind rel
) const;
1348 // Check to see if the range of OP2 indicates anything about the relation
1349 // between LHS and OP1.
1352 operator_plus::lhs_op1_relation (const irange
&lhs
,
1355 relation_kind
) const
1357 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1358 return VREL_VARYING
;
1360 tree type
= lhs
.type ();
1361 unsigned prec
= TYPE_PRECISION (type
);
1362 wi::overflow_type ovf1
, ovf2
;
1363 signop sign
= TYPE_SIGN (type
);
1365 // LHS = OP1 + 0 indicates LHS == OP1.
1369 if (TYPE_OVERFLOW_WRAPS (type
))
1371 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1372 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1375 ovf1
= ovf2
= wi::OVF_NONE
;
1377 // Never wrapping additions.
1380 // Positive op2 means lhs > op1.
1381 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1383 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1386 // Negative op2 means lhs < op1.
1387 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1389 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1392 // Always wrapping additions.
1393 else if (ovf1
&& ovf1
== ovf2
)
1395 // Positive op2 means lhs < op1.
1396 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1398 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1401 // Negative op2 means lhs > op1.
1402 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1404 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1408 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1409 if (!range_includes_zero_p (&op2
))
1412 return VREL_VARYING
;
1415 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1419 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1420 const irange
&op2
, relation_kind rel
) const
1422 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1426 operator_plus::wi_fold (irange
&r
, tree type
,
1427 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1428 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1430 wi::overflow_type ov_lb
, ov_ub
;
1431 signop s
= TYPE_SIGN (type
);
1432 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1433 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1434 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1437 // Given addition or subtraction, determine the possible NORMAL ranges and
1438 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1439 // Return the relation that exists between the LHS and OP1 in order for the
1440 // NORMAL range to apply.
1441 // a return value of VREL_VARYING means no ranges were applicable.
1443 static relation_kind
1444 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1447 relation_kind kind
= VREL_VARYING
;
1448 // For now, only deal with constant adds. This could be extended to ranges
1449 // when someone is so motivated.
1450 if (!offset
.singleton_p () || offset
.zero_p ())
1453 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1454 wide_int off
= offset
.lower_bound ();
1455 if (wi::neg_p (off
, SIGNED
))
1458 off
= wi::neg (off
);
1461 wi::overflow_type ov
;
1462 tree type
= offset
.type ();
1463 unsigned prec
= TYPE_PRECISION (type
);
1466 // calculate the normal range and relation for the operation.
1470 lb
= wi::zero (prec
);
1471 ub
= wi::sub (wi::to_wide (vrp_val_max (type
)), off
, UNSIGNED
, &ov
);
1478 ub
= wi::to_wide (vrp_val_max (type
));
1481 int_range
<2> normal_range (type
, lb
, ub
);
1482 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1485 r_normal
= normal_range
;
1489 // Once op1 has been calculated by operator_plus or operator_minus, check
1490 // to see if the relation passed causes any part of the calculation to
1491 // be not possible. ie
1492 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1493 // and that further refines a_2 to [0, 0].
1494 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1495 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1496 // MINUS. IF any adjustment can be made, R will reflect it.
1499 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1502 if (r
.undefined_p ())
1504 tree type
= r
.type ();
1505 // Check for unsigned overflow and calculate the overflow part.
1506 signop s
= TYPE_SIGN (type
);
1507 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1510 // Only work with <, <=, >, >= relations.
1511 if (!relation_lt_le_gt_ge_p (rel
))
1514 // Get the ranges for this offset.
1515 int_range_max normal
, overflow
;
1516 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1518 // VREL_VARYING means there are no adjustments.
1519 if (k
== VREL_VARYING
)
1522 // If the relations match use the normal range, otherwise use overflow range.
1523 if (relation_intersect (k
, rel
) == k
)
1524 r
.intersect (normal
);
1526 r
.intersect (overflow
);
1531 operator_plus::op1_range (irange
&r
, tree type
,
1534 relation_trio trio
) const
1536 if (lhs
.undefined_p ())
1538 // Start with the default operation.
1539 range_op_handler
minus (MINUS_EXPR
, type
);
1542 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1543 relation_kind rel
= trio
.lhs_op1 ();
1544 // Check for a relation refinement.
1546 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1551 operator_plus::op2_range (irange
&r
, tree type
,
1554 relation_trio rel
) const
1556 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1559 class operator_widen_plus_signed
: public range_operator
1562 virtual void wi_fold (irange
&r
, tree type
,
1563 const wide_int
&lh_lb
,
1564 const wide_int
&lh_ub
,
1565 const wide_int
&rh_lb
,
1566 const wide_int
&rh_ub
) const;
1567 } op_widen_plus_signed
;
1568 range_operator
*ptr_op_widen_plus_signed
= &op_widen_plus_signed
;
1571 operator_widen_plus_signed::wi_fold (irange
&r
, tree type
,
1572 const wide_int
&lh_lb
,
1573 const wide_int
&lh_ub
,
1574 const wide_int
&rh_lb
,
1575 const wide_int
&rh_ub
) const
1577 wi::overflow_type ov_lb
, ov_ub
;
1578 signop s
= TYPE_SIGN (type
);
1581 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
1583 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
1584 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1585 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1587 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1588 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1590 r
= int_range
<2> (type
, new_lb
, new_ub
);
1593 class operator_widen_plus_unsigned
: public range_operator
1596 virtual void wi_fold (irange
&r
, tree type
,
1597 const wide_int
&lh_lb
,
1598 const wide_int
&lh_ub
,
1599 const wide_int
&rh_lb
,
1600 const wide_int
&rh_ub
) const;
1601 } op_widen_plus_unsigned
;
1602 range_operator
*ptr_op_widen_plus_unsigned
= &op_widen_plus_unsigned
;
1605 operator_widen_plus_unsigned::wi_fold (irange
&r
, tree type
,
1606 const wide_int
&lh_lb
,
1607 const wide_int
&lh_ub
,
1608 const wide_int
&rh_lb
,
1609 const wide_int
&rh_ub
) const
1611 wi::overflow_type ov_lb
, ov_ub
;
1612 signop s
= TYPE_SIGN (type
);
1615 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
1617 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
1618 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1619 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1621 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1622 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1624 r
= int_range
<2> (type
, new_lb
, new_ub
);
1627 class operator_minus
: public range_operator
1629 using range_operator::fold_range
;
1630 using range_operator::op1_range
;
1631 using range_operator::op2_range
;
1633 virtual bool op1_range (irange
&r
, tree type
,
1636 relation_trio
) const;
1637 virtual bool op2_range (irange
&r
, tree type
,
1640 relation_trio
) const;
1641 virtual void wi_fold (irange
&r
, tree type
,
1642 const wide_int
&lh_lb
,
1643 const wide_int
&lh_ub
,
1644 const wide_int
&rh_lb
,
1645 const wide_int
&rh_ub
) const;
1646 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
1649 relation_kind rel
) const;
1650 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1652 const irange
&op1_range
,
1653 const irange
&op2_range
,
1654 relation_kind rel
) const;
1658 operator_minus::wi_fold (irange
&r
, tree type
,
1659 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1660 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1662 wi::overflow_type ov_lb
, ov_ub
;
1663 signop s
= TYPE_SIGN (type
);
1664 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
1665 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
1666 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1670 // Return the relation between LHS and OP1 based on the relation between
1674 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
1675 const irange
&, relation_kind rel
) const
1677 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
1686 return VREL_VARYING
;
1689 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1690 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1691 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1694 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1695 const irange
&op1_range ATTRIBUTE_UNUSED
,
1696 const irange
&op2_range ATTRIBUTE_UNUSED
,
1699 if (rel
== VREL_VARYING
)
1702 int_range
<2> rel_range
;
1703 unsigned prec
= TYPE_PRECISION (type
);
1704 signop sgn
= TYPE_SIGN (type
);
1706 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1708 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
1709 else if (rel
== VREL_NE
)
1710 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1712 else if (TYPE_OVERFLOW_WRAPS (type
))
1716 // For wrapping signed values and unsigned, if op1 > op2 or
1717 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1720 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1731 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1733 rel_range
= int_range
<2> (type
, wi::one (prec
),
1734 wi::max_value (prec
, sgn
));
1736 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1738 rel_range
= int_range
<2> (type
, wi::zero (prec
),
1739 wi::max_value (prec
, sgn
));
1741 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1743 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1744 wi::minus_one (prec
));
1746 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1748 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1755 lhs_range
.intersect (rel_range
);
1760 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1761 const irange
&op1_range
,
1762 const irange
&op2_range
,
1763 relation_kind rel
) const
1765 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1770 operator_minus::op1_range (irange
&r
, tree type
,
1773 relation_trio trio
) const
1775 if (lhs
.undefined_p ())
1777 // Start with the default operation.
1778 range_op_handler
minus (PLUS_EXPR
, type
);
1781 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1782 relation_kind rel
= trio
.lhs_op1 ();
1784 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
1790 operator_minus::op2_range (irange
&r
, tree type
,
1793 relation_trio
) const
1795 if (lhs
.undefined_p ())
1797 return fold_range (r
, type
, op1
, lhs
);
1801 class operator_pointer_diff
: public range_operator
1803 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
1805 const irange
&op1_range
,
1806 const irange
&op2_range
,
1807 relation_kind rel
) const;
1811 operator_pointer_diff::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1812 const irange
&op1_range
,
1813 const irange
&op2_range
,
1814 relation_kind rel
) const
1816 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1821 class operator_min
: public range_operator
1824 virtual void wi_fold (irange
&r
, tree type
,
1825 const wide_int
&lh_lb
,
1826 const wide_int
&lh_ub
,
1827 const wide_int
&rh_lb
,
1828 const wide_int
&rh_ub
) const;
1832 operator_min::wi_fold (irange
&r
, tree type
,
1833 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1834 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1836 signop s
= TYPE_SIGN (type
);
1837 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1838 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1839 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1843 class operator_max
: public range_operator
1846 virtual void wi_fold (irange
&r
, tree type
,
1847 const wide_int
&lh_lb
,
1848 const wide_int
&lh_ub
,
1849 const wide_int
&rh_lb
,
1850 const wide_int
&rh_ub
) const;
1854 operator_max::wi_fold (irange
&r
, tree type
,
1855 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1856 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1858 signop s
= TYPE_SIGN (type
);
1859 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1860 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1861 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1865 class cross_product_operator
: public range_operator
1868 // Perform an operation between two wide-ints and place the result
1869 // in R. Return true if the operation overflowed.
1870 virtual bool wi_op_overflows (wide_int
&r
,
1873 const wide_int
&) const = 0;
1875 // Calculate the cross product of two sets of sub-ranges and return it.
1876 void wi_cross_product (irange
&r
, tree type
,
1877 const wide_int
&lh_lb
,
1878 const wide_int
&lh_ub
,
1879 const wide_int
&rh_lb
,
1880 const wide_int
&rh_ub
) const;
1883 // Calculate the cross product of two sets of ranges and return it.
1885 // Multiplications, divisions and shifts are a bit tricky to handle,
1886 // depending on the mix of signs we have in the two ranges, we need to
1887 // operate on different values to get the minimum and maximum values
1888 // for the new range. One approach is to figure out all the
1889 // variations of range combinations and do the operations.
1891 // However, this involves several calls to compare_values and it is
1892 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1893 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1894 // figure the smallest and largest values to form the new range.
1897 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1898 const wide_int
&lh_lb
,
1899 const wide_int
&lh_ub
,
1900 const wide_int
&rh_lb
,
1901 const wide_int
&rh_ub
) const
1903 wide_int cp1
, cp2
, cp3
, cp4
;
1904 // Default to varying.
1905 r
.set_varying (type
);
1907 // Compute the 4 cross operations, bailing if we get an overflow we
1909 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1911 if (wi::eq_p (lh_lb
, lh_ub
))
1913 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
1915 if (wi::eq_p (rh_lb
, rh_ub
))
1917 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
1919 if (wi::eq_p (lh_lb
, lh_ub
))
1921 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
1925 signop sign
= TYPE_SIGN (type
);
1926 if (wi::gt_p (cp1
, cp2
, sign
))
1927 std::swap (cp1
, cp2
);
1928 if (wi::gt_p (cp3
, cp4
, sign
))
1929 std::swap (cp3
, cp4
);
1931 // Choose min and max from the ordered pairs.
1932 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
1933 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
1934 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
1938 class operator_mult
: public cross_product_operator
1940 using range_operator::op1_range
;
1941 using range_operator::op2_range
;
1943 virtual void wi_fold (irange
&r
, tree type
,
1944 const wide_int
&lh_lb
,
1945 const wide_int
&lh_ub
,
1946 const wide_int
&rh_lb
,
1947 const wide_int
&rh_ub
) const final override
;
1948 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1949 const wide_int
&w0
, const wide_int
&w1
)
1950 const final override
;
1951 virtual bool op1_range (irange
&r
, tree type
,
1954 relation_trio
) const final override
;
1955 virtual bool op2_range (irange
&r
, tree type
,
1958 relation_trio
) const final override
;
1962 operator_mult::op1_range (irange
&r
, tree type
,
1963 const irange
&lhs
, const irange
&op2
,
1964 relation_trio
) const
1967 if (lhs
.undefined_p ())
1970 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1971 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1972 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1973 if (TYPE_OVERFLOW_WRAPS (type
))
1976 if (op2
.singleton_p (&offset
) && !integer_zerop (offset
))
1977 return range_op_handler (TRUNC_DIV_EXPR
, type
).fold_range (r
, type
,
1983 operator_mult::op2_range (irange
&r
, tree type
,
1984 const irange
&lhs
, const irange
&op1
,
1985 relation_trio rel
) const
1987 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1991 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
1992 const wide_int
&w0
, const wide_int
&w1
) const
1994 wi::overflow_type overflow
= wi::OVF_NONE
;
1995 signop sign
= TYPE_SIGN (type
);
1996 res
= wi::mul (w0
, w1
, sign
, &overflow
);
1997 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1999 // For multiplication, the sign of the overflow is given
2000 // by the comparison of the signs of the operands.
2001 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
2002 res
= wi::max_value (w0
.get_precision (), sign
);
2004 res
= wi::min_value (w0
.get_precision (), sign
);
2011 operator_mult::wi_fold (irange
&r
, tree type
,
2012 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2013 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2015 if (TYPE_OVERFLOW_UNDEFINED (type
))
2017 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2021 // Multiply the ranges when overflow wraps. This is basically fancy
2022 // code so we don't drop to varying with an unsigned
2025 // This test requires 2*prec bits if both operands are signed and
2026 // 2*prec + 2 bits if either is not. Therefore, extend the values
2027 // using the sign of the result to PREC2. From here on out,
2028 // everything is just signed math no matter what the input types
2031 signop sign
= TYPE_SIGN (type
);
2032 unsigned prec
= TYPE_PRECISION (type
);
2033 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
2034 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
2035 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
2036 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
2037 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
2038 widest2_int size
= sizem1
+ 1;
2040 // Canonicalize the intervals.
2041 if (sign
== UNSIGNED
)
2043 if (wi::ltu_p (size
, min0
+ max0
))
2048 if (wi::ltu_p (size
, min1
+ max1
))
2055 // Sort the 4 products so that min is in prod0 and max is in
2057 widest2_int prod0
= min0
* min1
;
2058 widest2_int prod1
= min0
* max1
;
2059 widest2_int prod2
= max0
* min1
;
2060 widest2_int prod3
= max0
* max1
;
2062 // min0min1 > max0max1
2064 std::swap (prod0
, prod3
);
2066 // min0max1 > max0min1
2068 std::swap (prod1
, prod2
);
2071 std::swap (prod0
, prod1
);
2074 std::swap (prod2
, prod3
);
2077 prod2
= prod3
- prod0
;
2078 if (wi::geu_p (prod2
, sizem1
))
2080 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2081 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
2082 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
2084 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
2086 zero
.set_zero (type
);
2090 // The range covers all values.
2091 r
.set_varying (type
);
2095 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
2096 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
2097 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
2101 class operator_widen_mult_signed
: public range_operator
2104 virtual void wi_fold (irange
&r
, tree type
,
2105 const wide_int
&lh_lb
,
2106 const wide_int
&lh_ub
,
2107 const wide_int
&rh_lb
,
2108 const wide_int
&rh_ub
)
2110 } op_widen_mult_signed
;
2111 range_operator
*ptr_op_widen_mult_signed
= &op_widen_mult_signed
;
2114 operator_widen_mult_signed::wi_fold (irange
&r
, tree type
,
2115 const wide_int
&lh_lb
,
2116 const wide_int
&lh_ub
,
2117 const wide_int
&rh_lb
,
2118 const wide_int
&rh_ub
) const
2120 signop s
= TYPE_SIGN (type
);
2122 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
2123 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
2124 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2125 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2127 /* We don't expect a widening multiplication to be able to overflow but range
2128 calculations for multiplications are complicated. After widening the
2129 operands lets call the base class. */
2130 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2134 class operator_widen_mult_unsigned
: public range_operator
2137 virtual void wi_fold (irange
&r
, tree type
,
2138 const wide_int
&lh_lb
,
2139 const wide_int
&lh_ub
,
2140 const wide_int
&rh_lb
,
2141 const wide_int
&rh_ub
)
2143 } op_widen_mult_unsigned
;
2144 range_operator
*ptr_op_widen_mult_unsigned
= &op_widen_mult_unsigned
;
2147 operator_widen_mult_unsigned::wi_fold (irange
&r
, tree type
,
2148 const wide_int
&lh_lb
,
2149 const wide_int
&lh_ub
,
2150 const wide_int
&rh_lb
,
2151 const wide_int
&rh_ub
) const
2153 signop s
= TYPE_SIGN (type
);
2155 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
2156 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
2157 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2158 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2160 /* We don't expect a widening multiplication to be able to overflow but range
2161 calculations for multiplications are complicated. After widening the
2162 operands lets call the base class. */
2163 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2166 class operator_div
: public cross_product_operator
2169 virtual void wi_fold (irange
&r
, tree type
,
2170 const wide_int
&lh_lb
,
2171 const wide_int
&lh_ub
,
2172 const wide_int
&rh_lb
,
2173 const wide_int
&rh_ub
) const final override
;
2174 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
2175 const wide_int
&, const wide_int
&)
2176 const final override
;
2180 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2181 const wide_int
&w0
, const wide_int
&w1
) const
2186 wi::overflow_type overflow
= wi::OVF_NONE
;
2187 signop sign
= TYPE_SIGN (type
);
2191 case EXACT_DIV_EXPR
:
2192 case TRUNC_DIV_EXPR
:
2193 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2195 case FLOOR_DIV_EXPR
:
2196 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2198 case ROUND_DIV_EXPR
:
2199 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2202 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2208 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2210 // For division, the only case is -INF / -1 = +INF.
2211 res
= wi::max_value (w0
.get_precision (), sign
);
2218 operator_div::wi_fold (irange
&r
, tree type
,
2219 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2220 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2222 const wide_int dividend_min
= lh_lb
;
2223 const wide_int dividend_max
= lh_ub
;
2224 const wide_int divisor_min
= rh_lb
;
2225 const wide_int divisor_max
= rh_ub
;
2226 signop sign
= TYPE_SIGN (type
);
2227 unsigned prec
= TYPE_PRECISION (type
);
2228 wide_int extra_min
, extra_max
;
2230 // If we know we won't divide by zero, just do the division.
2231 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2233 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2234 divisor_min
, divisor_max
);
2238 // If we're definitely dividing by zero, there's nothing to do.
2239 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2245 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2246 // skip any division by zero.
2248 // First divide by the negative numbers, if any.
2249 if (wi::neg_p (divisor_min
, sign
))
2250 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2251 divisor_min
, wi::minus_one (prec
));
2255 // Then divide by the non-zero positive numbers, if any.
2256 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2259 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2260 wi::one (prec
), divisor_max
);
2263 // We shouldn't still have undefined here.
2264 gcc_checking_assert (!r
.undefined_p ());
2268 class operator_exact_divide
: public operator_div
2270 using range_operator::op1_range
;
2272 virtual bool op1_range (irange
&r
, tree type
,
2275 relation_trio
) const;
2280 operator_exact_divide::op1_range (irange
&r
, tree type
,
2283 relation_trio
) const
2285 if (lhs
.undefined_p ())
2288 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2289 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2290 // We wont bother trying to enumerate all the in between stuff :-P
2291 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2292 // the time however.
2293 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2294 if (op2
.singleton_p (&offset
)
2295 && !integer_zerop (offset
))
2296 return range_op_handler (MULT_EXPR
, type
).fold_range (r
, type
, lhs
, op2
);
2301 class operator_lshift
: public cross_product_operator
2303 using range_operator::fold_range
;
2304 using range_operator::op1_range
;
2306 virtual bool op1_range (irange
&r
, tree type
,
2309 relation_trio rel
= TRIO_VARYING
) const;
2310 virtual bool fold_range (irange
&r
, tree type
,
2313 relation_trio rel
= TRIO_VARYING
) const;
2315 virtual void wi_fold (irange
&r
, tree type
,
2316 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2317 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2318 virtual bool wi_op_overflows (wide_int
&res
,
2321 const wide_int
&) const;
2324 class operator_rshift
: public cross_product_operator
2326 using range_operator::fold_range
;
2327 using range_operator::op1_range
;
2328 using range_operator::lhs_op1_relation
;
2330 virtual bool fold_range (irange
&r
, tree type
,
2333 relation_trio rel
= TRIO_VARYING
) const;
2334 virtual void wi_fold (irange
&r
, tree type
,
2335 const wide_int
&lh_lb
,
2336 const wide_int
&lh_ub
,
2337 const wide_int
&rh_lb
,
2338 const wide_int
&rh_ub
) const;
2339 virtual bool wi_op_overflows (wide_int
&res
,
2342 const wide_int
&w1
) const;
2343 virtual bool op1_range (irange
&, tree type
,
2346 relation_trio rel
= TRIO_VARYING
) const;
2347 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2350 relation_kind rel
) const;
2355 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2358 relation_kind
) const
2360 // If both operands range are >= 0, then the LHS <= op1.
2361 if (!op1
.undefined_p () && !op2
.undefined_p ()
2362 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2363 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2365 return VREL_VARYING
;
2369 operator_lshift::fold_range (irange
&r
, tree type
,
2372 relation_trio rel
) const
2374 int_range_max shift_range
;
2375 if (!get_shift_range (shift_range
, type
, op2
))
2377 if (op2
.undefined_p ())
2384 // Transform left shifts by constants into multiplies.
2385 if (shift_range
.singleton_p ())
2387 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2388 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2389 int_range
<1> mult (type
, tmp
, tmp
);
2391 // Force wrapping multiplication.
2392 bool saved_flag_wrapv
= flag_wrapv
;
2393 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2395 flag_wrapv_pointer
= 1;
2396 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2397 flag_wrapv
= saved_flag_wrapv
;
2398 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2402 // Otherwise, invoke the generic fold routine.
2403 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2407 operator_lshift::wi_fold (irange
&r
, tree type
,
2408 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2409 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2411 signop sign
= TYPE_SIGN (type
);
2412 unsigned prec
= TYPE_PRECISION (type
);
2413 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2414 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2415 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2416 // overflow. However, for that to happen, rh.max needs to be zero,
2417 // which means rh is a singleton range of zero, which means we simply return
2418 // [lh_lb, lh_ub] as the range.
2419 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2421 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2425 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2426 wide_int complement
= ~(bound
- 1);
2427 wide_int low_bound
, high_bound
;
2428 bool in_bounds
= false;
2430 if (sign
== UNSIGNED
)
2433 high_bound
= complement
;
2434 if (wi::ltu_p (lh_ub
, low_bound
))
2436 // [5, 6] << [1, 2] == [10, 24].
2437 // We're shifting out only zeroes, the value increases
2441 else if (wi::ltu_p (high_bound
, lh_lb
))
2443 // [0xffffff00, 0xffffffff] << [1, 2]
2444 // == [0xfffffc00, 0xfffffffe].
2445 // We're shifting out only ones, the value decreases
2452 // [-1, 1] << [1, 2] == [-4, 4]
2453 low_bound
= complement
;
2455 if (wi::lts_p (lh_ub
, high_bound
)
2456 && wi::lts_p (low_bound
, lh_lb
))
2458 // For non-negative numbers, we're shifting out only zeroes,
2459 // the value increases monotonically. For negative numbers,
2460 // we're shifting out only ones, the value decreases
2467 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2469 r
.set_varying (type
);
2473 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2474 const wide_int
&w0
, const wide_int
&w1
) const
2476 signop sign
= TYPE_SIGN (type
);
2479 // It's unclear from the C standard whether shifts can overflow.
2480 // The following code ignores overflow; perhaps a C standard
2481 // interpretation ruling is needed.
2482 res
= wi::rshift (w0
, -w1
, sign
);
2485 res
= wi::lshift (w0
, w1
);
2490 operator_lshift::op1_range (irange
&r
,
2494 relation_trio
) const
2496 if (lhs
.undefined_p ())
2500 if (!lhs
.contains_p (build_zero_cst (type
)))
2501 r
.set_nonzero (type
);
2503 r
.set_varying (type
);
2505 if (op2
.singleton_p (&shift_amount
))
2507 wide_int shift
= wi::to_wide (shift_amount
);
2508 if (wi::lt_p (shift
, 0, SIGNED
))
2510 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2511 TYPE_PRECISION (op2
.type ())),
2520 // Work completely in unsigned mode to start.
2522 int_range_max tmp_range
;
2523 if (TYPE_SIGN (type
) == SIGNED
)
2525 int_range_max tmp
= lhs
;
2526 utype
= unsigned_type_for (type
);
2527 range_cast (tmp
, utype
);
2528 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2531 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2533 // Start with ranges which can produce the LHS by right shifting the
2534 // result by the shift amount.
2535 // ie [0x08, 0xF0] = op1 << 2 will start with
2536 // [00001000, 11110000] = op1 << 2
2537 // [0x02, 0x4C] aka [00000010, 00111100]
2539 // Then create a range from the LB with the least significant upper bit
2540 // set, to the upper bound with all the bits set.
2541 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2543 // Ideally we do this for each subrange, but just lump them all for now.
2544 unsigned low_bits
= TYPE_PRECISION (utype
)
2545 - TREE_INT_CST_LOW (shift_amount
);
2546 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2547 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2548 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2549 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2550 tmp_range
.union_ (fill_range
);
2553 range_cast (tmp_range
, type
);
2555 r
.intersect (tmp_range
);
2559 return !r
.varying_p ();
2563 operator_rshift::op1_range (irange
&r
,
2567 relation_trio
) const
2570 if (lhs
.undefined_p ())
2572 if (op2
.singleton_p (&shift
))
2574 // Ignore nonsensical shifts.
2575 unsigned prec
= TYPE_PRECISION (type
);
2576 if (wi::ge_p (wi::to_wide (shift
),
2577 wi::uhwi (prec
, TYPE_PRECISION (TREE_TYPE (shift
))),
2580 if (wi::to_wide (shift
) == 0)
2586 // Folding the original operation may discard some impossible
2587 // ranges from the LHS.
2588 int_range_max lhs_refined
;
2589 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2590 lhs_refined
.intersect (lhs
);
2591 if (lhs_refined
.undefined_p ())
2596 int_range_max
shift_range (shift
, shift
);
2597 int_range_max lb
, ub
;
2598 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2600 // 0000 0111 = OP1 >> 3
2602 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2603 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2604 // right hand side (0x07).
2605 tree mask
= fold_build1 (BIT_NOT_EXPR
, type
,
2606 fold_build2 (LSHIFT_EXPR
, type
,
2607 build_minus_one_cst (type
),
2609 int_range_max
mask_range (build_zero_cst (type
), mask
);
2610 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2613 if (!lhs_refined
.contains_p (build_zero_cst (type
)))
2615 mask_range
.invert ();
2616 r
.intersect (mask_range
);
2624 operator_rshift::wi_op_overflows (wide_int
&res
,
2627 const wide_int
&w1
) const
2629 signop sign
= TYPE_SIGN (type
);
2631 res
= wi::lshift (w0
, -w1
);
2634 // It's unclear from the C standard whether shifts can overflow.
2635 // The following code ignores overflow; perhaps a C standard
2636 // interpretation ruling is needed.
2637 res
= wi::rshift (w0
, w1
, sign
);
2643 operator_rshift::fold_range (irange
&r
, tree type
,
2646 relation_trio rel
) const
2648 int_range_max shift
;
2649 if (!get_shift_range (shift
, type
, op2
))
2651 if (op2
.undefined_p ())
2658 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2662 operator_rshift::wi_fold (irange
&r
, tree type
,
2663 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2664 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2666 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2670 class operator_cast
: public range_operator
2672 using range_operator::fold_range
;
2673 using range_operator::op1_range
;
2675 virtual bool fold_range (irange
&r
, tree type
,
2678 relation_trio rel
= TRIO_VARYING
) const;
2679 virtual bool op1_range (irange
&r
, tree type
,
2682 relation_trio rel
= TRIO_VARYING
) const;
2683 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2686 relation_kind
) const;
2688 bool truncating_cast_p (const irange
&inner
, const irange
&outer
) const;
2689 bool inside_domain_p (const wide_int
&min
, const wide_int
&max
,
2690 const irange
&outer
) const;
2691 void fold_pair (irange
&r
, unsigned index
, const irange
&inner
,
2692 const irange
&outer
) const;
2695 // Add a partial equivalence between the LHS and op1 for casts.
2698 operator_cast::lhs_op1_relation (const irange
&lhs
,
2700 const irange
&op2 ATTRIBUTE_UNUSED
,
2701 relation_kind
) const
2703 if (lhs
.undefined_p () || op1
.undefined_p ())
2704 return VREL_VARYING
;
2705 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2706 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2707 // If the result gets sign extended into a larger type check first if this
2708 // qualifies as a partial equivalence.
2709 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2711 // If the result is sign extended, and the LHS is larger than op1,
2712 // check if op1's range can be negative as the sign extension will
2713 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2714 int_range
<3> negs
= range_negatives (op1
.type ());
2715 negs
.intersect (op1
);
2716 if (!negs
.undefined_p ())
2717 return VREL_VARYING
;
2720 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2721 return bits_to_pe (prec
);
2724 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2727 operator_cast::truncating_cast_p (const irange
&inner
,
2728 const irange
&outer
) const
2730 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2733 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2736 operator_cast::inside_domain_p (const wide_int
&min
,
2737 const wide_int
&max
,
2738 const irange
&range
) const
2740 wide_int domain_min
= wi::to_wide (vrp_val_min (range
.type ()));
2741 wide_int domain_max
= wi::to_wide (vrp_val_max (range
.type ()));
2742 signop domain_sign
= TYPE_SIGN (range
.type ());
2743 return (wi::le_p (min
, domain_max
, domain_sign
)
2744 && wi::le_p (max
, domain_max
, domain_sign
)
2745 && wi::ge_p (min
, domain_min
, domain_sign
)
2746 && wi::ge_p (max
, domain_min
, domain_sign
));
2750 // Helper for fold_range which work on a pair at a time.
2753 operator_cast::fold_pair (irange
&r
, unsigned index
,
2754 const irange
&inner
,
2755 const irange
&outer
) const
2757 tree inner_type
= inner
.type ();
2758 tree outer_type
= outer
.type ();
2759 signop inner_sign
= TYPE_SIGN (inner_type
);
2760 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2762 // check to see if casting from INNER to OUTER is a conversion that
2763 // fits in the resulting OUTER type.
2764 wide_int inner_lb
= inner
.lower_bound (index
);
2765 wide_int inner_ub
= inner
.upper_bound (index
);
2766 if (truncating_cast_p (inner
, outer
))
2768 // We may be able to accommodate a truncating cast if the
2769 // resulting range can be represented in the target type...
2770 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
2771 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
2774 r
.set_varying (outer_type
);
2778 // ...but we must still verify that the final range fits in the
2779 // domain. This catches -fstrict-enum restrictions where the domain
2780 // range is smaller than what fits in the underlying type.
2781 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
2782 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
2783 if (inside_domain_p (min
, max
, outer
))
2784 create_possibly_reversed_range (r
, outer_type
, min
, max
);
2786 r
.set_varying (outer_type
);
2791 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2792 const irange
&inner
,
2793 const irange
&outer
,
2794 relation_trio
) const
2796 if (empty_range_varying (r
, type
, inner
, outer
))
2799 gcc_checking_assert (outer
.varying_p ());
2800 gcc_checking_assert (inner
.num_pairs () > 0);
2802 // Avoid a temporary by folding the first pair directly into the result.
2803 fold_pair (r
, 0, inner
, outer
);
2805 // Then process any additional pairs by unioning with their results.
2806 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
2809 fold_pair (tmp
, x
, inner
, outer
);
2815 // Update the nonzero mask. Truncating casts are problematic unless
2816 // the conversion fits in the resulting outer type.
2817 wide_int nz
= inner
.get_nonzero_bits ();
2818 if (truncating_cast_p (inner
, outer
)
2819 && wi::rshift (nz
, wi::uhwi (TYPE_PRECISION (outer
.type ()),
2820 TYPE_PRECISION (inner
.type ())),
2821 TYPE_SIGN (inner
.type ())) != 0)
2823 nz
= wide_int::from (nz
, TYPE_PRECISION (type
), TYPE_SIGN (inner
.type ()));
2824 r
.set_nonzero_bits (nz
);
2830 operator_cast::op1_range (irange
&r
, tree type
,
2833 relation_trio
) const
2835 if (lhs
.undefined_p ())
2837 tree lhs_type
= lhs
.type ();
2838 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
2840 // If we are calculating a pointer, shortcut to what we really care about.
2841 if (POINTER_TYPE_P (type
))
2843 // Conversion from other pointers or a constant (including 0/NULL)
2844 // are straightforward.
2845 if (POINTER_TYPE_P (lhs
.type ())
2846 || (lhs
.singleton_p ()
2847 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
2850 range_cast (r
, type
);
2854 // If the LHS is not a pointer nor a singleton, then it is
2855 // either VARYING or non-zero.
2856 if (!lhs
.contains_p (build_zero_cst (lhs
.type ())))
2857 r
.set_nonzero (type
);
2859 r
.set_varying (type
);
2865 if (truncating_cast_p (op2
, lhs
))
2867 if (lhs
.varying_p ())
2868 r
.set_varying (type
);
2871 // We want to insert the LHS as an unsigned value since it
2872 // would not trigger the signed bit of the larger type.
2873 int_range_max converted_lhs
= lhs
;
2874 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
2875 range_cast (converted_lhs
, type
);
2876 // Start by building the positive signed outer range for the type.
2877 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
2878 TYPE_PRECISION (type
));
2879 r
= int_range
<1> (type
, lim
, wi::max_value (TYPE_PRECISION (type
),
2881 // For the signed part, we need to simply union the 2 ranges now.
2882 r
.union_ (converted_lhs
);
2884 // Create maximal negative number outside of LHS bits.
2885 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
2886 TYPE_PRECISION (type
));
2887 // Add this to the unsigned LHS range(s).
2888 int_range_max
lim_range (type
, lim
, lim
);
2889 int_range_max lhs_neg
;
2890 range_op_handler (PLUS_EXPR
, type
).fold_range (lhs_neg
, type
,
2893 // lhs_neg now has all the negative versions of the LHS.
2894 // Now union in all the values from SIGNED MIN (0x80000) to
2895 // lim-1 in order to fill in all the ranges with the upper
2898 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2899 // we don't need to create a range from min to lim-1
2900 // calculate neg range traps trying to create [lim, lim - 1].
2901 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
2904 int_range_max
neg (type
,
2905 wi::min_value (TYPE_PRECISION (type
),
2908 lhs_neg
.union_ (neg
);
2910 // And finally, munge the signed and unsigned portions.
2913 // And intersect with any known value passed in the extra operand.
2919 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
2923 // The cast is not truncating, and the range is restricted to
2924 // the range of the RHS by this assignment.
2926 // Cast the range of the RHS to the type of the LHS.
2927 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
2928 // Intersect this with the LHS range will produce the range,
2929 // which will be cast to the RHS type before returning.
2930 tmp
.intersect (lhs
);
2933 // Cast the calculated range to the type of the RHS.
2934 fold_range (r
, type
, tmp
, int_range
<1> (type
));
2939 class operator_logical_and
: public range_operator
2941 using range_operator::fold_range
;
2942 using range_operator::op1_range
;
2943 using range_operator::op2_range
;
2945 virtual bool fold_range (irange
&r
, tree type
,
2948 relation_trio rel
= TRIO_VARYING
) const;
2949 virtual bool op1_range (irange
&r
, tree type
,
2952 relation_trio rel
= TRIO_VARYING
) const;
2953 virtual bool op2_range (irange
&r
, tree type
,
2956 relation_trio rel
= TRIO_VARYING
) const;
2961 operator_logical_and::fold_range (irange
&r
, tree type
,
2964 relation_trio
) const
2966 if (empty_range_varying (r
, type
, lh
, rh
))
2969 // 0 && anything is 0.
2970 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
2971 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
2972 r
= range_false (type
);
2973 else if (lh
.contains_p (build_zero_cst (lh
.type ()))
2974 || rh
.contains_p (build_zero_cst (rh
.type ())))
2975 // To reach this point, there must be a logical 1 on each side, and
2976 // the only remaining question is whether there is a zero or not.
2977 r
= range_true_and_false (type
);
2979 r
= range_true (type
);
2984 operator_logical_and::op1_range (irange
&r
, tree type
,
2986 const irange
&op2 ATTRIBUTE_UNUSED
,
2987 relation_trio
) const
2989 switch (get_bool_state (r
, lhs
, type
))
2992 // A true result means both sides of the AND must be true.
2993 r
= range_true (type
);
2996 // Any other result means only one side has to be false, the
2997 // other side can be anything. So we cannot be sure of any
2999 r
= range_true_and_false (type
);
3006 operator_logical_and::op2_range (irange
&r
, tree type
,
3009 relation_trio
) const
3011 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
3015 class operator_bitwise_and
: public range_operator
3017 using range_operator::op1_range
;
3018 using range_operator::op2_range
;
3020 virtual bool op1_range (irange
&r
, tree type
,
3023 relation_trio rel
= TRIO_VARYING
) const;
3024 virtual bool op2_range (irange
&r
, tree type
,
3027 relation_trio rel
= TRIO_VARYING
) const;
3028 virtual void wi_fold (irange
&r
, tree type
,
3029 const wide_int
&lh_lb
,
3030 const wide_int
&lh_ub
,
3031 const wide_int
&rh_lb
,
3032 const wide_int
&rh_ub
) const;
3033 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
3036 relation_kind
) const;
3038 void simple_op1_range_solver (irange
&r
, tree type
,
3040 const irange
&op2
) const;
3044 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3045 // by considering the number of leading redundant sign bit copies.
3046 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3047 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3049 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
3050 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3051 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3053 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
3054 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
3055 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
3058 int type_prec
= TYPE_PRECISION (type
);
3059 int rprec
= (type_prec
- new_clrsb
) - 1;
3060 value_range_with_overflow (r
, type
,
3061 wi::mask (rprec
, true, type_prec
),
3062 wi::mask (rprec
, false, type_prec
));
3066 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3070 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
3073 relation_kind
) const
3075 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
3076 return VREL_VARYING
;
3077 if (!op2
.singleton_p ())
3078 return VREL_VARYING
;
3079 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3080 int prec1
= TYPE_PRECISION (op1
.type ());
3081 int prec2
= TYPE_PRECISION (op2
.type ());
3083 wide_int mask
= op2
.lower_bound ();
3084 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
3086 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
3088 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
3090 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
3092 return bits_to_pe (MIN (prec1
, mask_prec
));
3095 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3096 // possible. Basically, see if we can optimize:
3100 // [LB op Z, UB op Z]
3102 // If the optimization was successful, accumulate the range in R and
3106 wi_optimize_and_or (irange
&r
,
3107 enum tree_code code
,
3109 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3110 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3112 // Calculate the singleton mask among the ranges, if any.
3113 wide_int lower_bound
, upper_bound
, mask
;
3114 if (wi::eq_p (rh_lb
, rh_ub
))
3117 lower_bound
= lh_lb
;
3118 upper_bound
= lh_ub
;
3120 else if (wi::eq_p (lh_lb
, lh_ub
))
3123 lower_bound
= rh_lb
;
3124 upper_bound
= rh_ub
;
3129 // If Z is a constant which (for op | its bitwise not) has n
3130 // consecutive least significant bits cleared followed by m 1
3131 // consecutive bits set immediately above it and either
3132 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3134 // The least significant n bits of all the values in the range are
3135 // cleared or set, the m bits above it are preserved and any bits
3136 // above these are required to be the same for all values in the
3140 if (code
== BIT_IOR_EXPR
)
3142 if (wi::eq_p (w
, 0))
3143 n
= w
.get_precision ();
3147 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3148 if (wi::eq_p (w
, 0))
3149 m
= w
.get_precision () - n
;
3151 m
= wi::ctz (w
) - n
;
3153 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3154 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3157 wide_int res_lb
, res_ub
;
3158 if (code
== BIT_AND_EXPR
)
3160 res_lb
= wi::bit_and (lower_bound
, mask
);
3161 res_ub
= wi::bit_and (upper_bound
, mask
);
3163 else if (code
== BIT_IOR_EXPR
)
3165 res_lb
= wi::bit_or (lower_bound
, mask
);
3166 res_ub
= wi::bit_or (upper_bound
, mask
);
3170 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3172 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3173 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3176 tmp
.set_nonzero (type
);
3182 // For range [LB, UB] compute two wide_int bit masks.
3184 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3185 // for all numbers in the range the bit is 0, otherwise it might be 0
3188 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3189 // for all numbers in the range the bit is 1, otherwise it might be 0
3193 wi_set_zero_nonzero_bits (tree type
,
3194 const wide_int
&lb
, const wide_int
&ub
,
3195 wide_int
&maybe_nonzero
,
3196 wide_int
&mustbe_nonzero
)
3198 signop sign
= TYPE_SIGN (type
);
3200 if (wi::eq_p (lb
, ub
))
3201 maybe_nonzero
= mustbe_nonzero
= lb
;
3202 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3204 wide_int xor_mask
= lb
^ ub
;
3205 maybe_nonzero
= lb
| ub
;
3206 mustbe_nonzero
= lb
& ub
;
3209 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3210 maybe_nonzero
.get_precision ());
3211 maybe_nonzero
= maybe_nonzero
| mask
;
3212 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3217 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3218 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3223 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3224 const wide_int
&lh_lb
,
3225 const wide_int
&lh_ub
,
3226 const wide_int
&rh_lb
,
3227 const wide_int
&rh_ub
) const
3229 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3232 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3233 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3234 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3235 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3236 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3237 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3239 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3240 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3241 signop sign
= TYPE_SIGN (type
);
3242 unsigned prec
= TYPE_PRECISION (type
);
3243 // If both input ranges contain only negative values, we can
3244 // truncate the result range maximum to the minimum of the
3245 // input range maxima.
3246 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3248 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3249 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3251 // If either input range contains only non-negative values
3252 // we can truncate the result range maximum to the respective
3253 // maximum of the input range.
3254 if (wi::ge_p (lh_lb
, 0, sign
))
3255 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3256 if (wi::ge_p (rh_lb
, 0, sign
))
3257 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3258 // PR68217: In case of signed & sign-bit-CST should
3259 // result in [-INF, 0] instead of [-INF, INF].
3260 if (wi::gt_p (new_lb
, new_ub
, sign
))
3262 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3264 && ((wi::eq_p (lh_lb
, lh_ub
)
3265 && !wi::cmps (lh_lb
, sign_bit
))
3266 || (wi::eq_p (rh_lb
, rh_ub
)
3267 && !wi::cmps (rh_lb
, sign_bit
))))
3269 new_lb
= wi::min_value (prec
, sign
);
3270 new_ub
= wi::zero (prec
);
3273 // If the limits got swapped around, return varying.
3274 if (wi::gt_p (new_lb
, new_ub
,sign
))
3277 && wi_optimize_signed_bitwise_op (r
, type
,
3281 r
.set_varying (type
);
3284 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3288 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3290 if (!lhs
.contains_p (build_zero_cst (type
)))
3291 r
= range_nonzero (type
);
3293 r
.set_varying (type
);
3296 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3297 (otherwise return VAL). VAL and MASK must be zero-extended for
3298 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3299 (to transform signed values into unsigned) and at the end xor
3303 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3304 const wide_int
&sgnbit
, unsigned int prec
)
3306 wide_int bit
= wi::one (prec
), res
;
3309 wide_int val
= val_in
^ sgnbit
;
3310 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3313 if ((res
& bit
) == 0)
3316 res
= wi::bit_and_not (val
+ bit
, res
);
3318 if (wi::gtu_p (res
, val
))
3319 return res
^ sgnbit
;
3321 return val
^ sgnbit
;
3324 // This was shamelessly stolen from register_edge_assert_for_2 and
3325 // adjusted to work with iranges.
3328 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3330 const irange
&op2
) const
3332 if (!op2
.singleton_p ())
3334 set_nonzero_range_from_mask (r
, type
, lhs
);
3337 unsigned int nprec
= TYPE_PRECISION (type
);
3338 wide_int cst2v
= op2
.lower_bound ();
3339 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3342 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3344 sgnbit
= wi::zero (nprec
);
3346 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3348 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3349 // maximum unsigned value is ~0. For signed comparison, if CST2
3350 // doesn't have the most significant bit set, handle it similarly. If
3351 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3352 wide_int valv
= lhs
.lower_bound ();
3353 wide_int minv
= valv
& cst2v
, maxv
;
3354 bool we_know_nothing
= false;
3357 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3358 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3361 // If we can't determine anything on this bound, fall
3362 // through and conservatively solve for the other end point.
3363 we_know_nothing
= true;
3366 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3367 if (we_know_nothing
)
3368 r
.set_varying (type
);
3370 r
= int_range
<1> (type
, minv
, maxv
);
3372 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3374 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3375 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3377 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3379 // For signed comparison, if CST2 doesn't have most significant bit
3380 // set, handle it similarly. If CST2 has MSB set, the maximum is
3381 // the same and minimum is INT_MIN.
3382 valv
= lhs
.upper_bound ();
3383 minv
= valv
& cst2v
;
3388 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3391 // If we couldn't determine anything on either bound, return
3393 if (we_know_nothing
)
3401 int_range
<1> upper_bits (type
, minv
, maxv
);
3402 r
.intersect (upper_bits
);
3406 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3409 relation_trio
) const
3411 if (lhs
.undefined_p ())
3413 if (types_compatible_p (type
, boolean_type_node
))
3414 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3417 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3419 int_range_max
chunk (lhs
.type (),
3420 lhs
.lower_bound (i
),
3421 lhs
.upper_bound (i
));
3423 simple_op1_range_solver (res
, type
, chunk
, op2
);
3426 if (r
.undefined_p ())
3427 set_nonzero_range_from_mask (r
, type
, lhs
);
3429 // For 0 = op1 & MASK, op1 is ~MASK.
3430 if (lhs
.zero_p () && op2
.singleton_p ())
3432 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3433 int_range
<2> tmp (type
);
3434 tmp
.set_nonzero_bits (nz
);
3441 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3444 relation_trio
) const
3446 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3450 class operator_logical_or
: public range_operator
3452 using range_operator::fold_range
;
3453 using range_operator::op1_range
;
3454 using range_operator::op2_range
;
3456 virtual bool fold_range (irange
&r
, tree type
,
3459 relation_trio rel
= TRIO_VARYING
) const;
3460 virtual bool op1_range (irange
&r
, tree type
,
3463 relation_trio rel
= TRIO_VARYING
) const;
3464 virtual bool op2_range (irange
&r
, tree type
,
3467 relation_trio rel
= TRIO_VARYING
) const;
3471 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3474 relation_trio
) const
3476 if (empty_range_varying (r
, type
, lh
, rh
))
3485 operator_logical_or::op1_range (irange
&r
, tree type
,
3487 const irange
&op2 ATTRIBUTE_UNUSED
,
3488 relation_trio
) const
3490 switch (get_bool_state (r
, lhs
, type
))
3493 // A false result means both sides of the OR must be false.
3494 r
= range_false (type
);
3497 // Any other result means only one side has to be true, the
3498 // other side can be anything. so we can't be sure of any result
3500 r
= range_true_and_false (type
);
3507 operator_logical_or::op2_range (irange
&r
, tree type
,
3510 relation_trio
) const
3512 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3516 class operator_bitwise_or
: public range_operator
3518 using range_operator::op1_range
;
3519 using range_operator::op2_range
;
3521 virtual bool op1_range (irange
&r
, tree type
,
3524 relation_trio rel
= TRIO_VARYING
) const;
3525 virtual bool op2_range (irange
&r
, tree type
,
3528 relation_trio rel
= TRIO_VARYING
) const;
3529 virtual void wi_fold (irange
&r
, tree type
,
3530 const wide_int
&lh_lb
,
3531 const wide_int
&lh_ub
,
3532 const wide_int
&rh_lb
,
3533 const wide_int
&rh_ub
) const;
3537 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3538 const wide_int
&lh_lb
,
3539 const wide_int
&lh_ub
,
3540 const wide_int
&rh_lb
,
3541 const wide_int
&rh_ub
) const
3543 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3546 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3547 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3548 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3549 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3550 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3551 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3552 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3553 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3554 signop sign
= TYPE_SIGN (type
);
3555 // If the input ranges contain only positive values we can
3556 // truncate the minimum of the result range to the maximum
3557 // of the input range minima.
3558 if (wi::ge_p (lh_lb
, 0, sign
)
3559 && wi::ge_p (rh_lb
, 0, sign
))
3561 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3562 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3564 // If either input range contains only negative values
3565 // we can truncate the minimum of the result range to the
3566 // respective minimum range.
3567 if (wi::lt_p (lh_ub
, 0, sign
))
3568 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3569 if (wi::lt_p (rh_ub
, 0, sign
))
3570 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3571 // If the limits got swapped around, return a conservative range.
3572 if (wi::gt_p (new_lb
, new_ub
, sign
))
3574 // Make sure that nonzero|X is nonzero.
3575 if (wi::gt_p (lh_lb
, 0, sign
)
3576 || wi::gt_p (rh_lb
, 0, sign
)
3577 || wi::lt_p (lh_ub
, 0, sign
)
3578 || wi::lt_p (rh_ub
, 0, sign
))
3579 r
.set_nonzero (type
);
3580 else if (sign
== SIGNED
3581 && wi_optimize_signed_bitwise_op (r
, type
,
3586 r
.set_varying (type
);
3589 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3593 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3596 relation_trio
) const
3598 if (lhs
.undefined_p ())
3600 // If this is really a logical wi_fold, call that.
3601 if (types_compatible_p (type
, boolean_type_node
))
3602 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3606 tree zero
= build_zero_cst (type
);
3607 r
= int_range
<1> (zero
, zero
);
3610 r
.set_varying (type
);
3615 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3618 relation_trio
) const
3620 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3624 class operator_bitwise_xor
: public range_operator
3626 using range_operator::op1_range
;
3627 using range_operator::op2_range
;
3629 virtual void wi_fold (irange
&r
, tree type
,
3630 const wide_int
&lh_lb
,
3631 const wide_int
&lh_ub
,
3632 const wide_int
&rh_lb
,
3633 const wide_int
&rh_ub
) const;
3634 virtual bool op1_range (irange
&r
, tree type
,
3637 relation_trio rel
= TRIO_VARYING
) const;
3638 virtual bool op2_range (irange
&r
, tree type
,
3641 relation_trio rel
= TRIO_VARYING
) const;
3642 virtual bool op1_op2_relation_effect (irange
&lhs_range
,
3644 const irange
&op1_range
,
3645 const irange
&op2_range
,
3646 relation_kind rel
) const;
3650 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3651 const wide_int
&lh_lb
,
3652 const wide_int
&lh_ub
,
3653 const wide_int
&rh_lb
,
3654 const wide_int
&rh_ub
) const
3656 signop sign
= TYPE_SIGN (type
);
3657 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3658 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3659 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3660 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3661 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3662 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3664 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3665 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3666 wide_int result_one_bits
3667 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3668 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3669 wide_int new_ub
= ~result_zero_bits
;
3670 wide_int new_lb
= result_one_bits
;
3672 // If the range has all positive or all negative values, the result
3673 // is better than VARYING.
3674 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3675 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3676 else if (sign
== SIGNED
3677 && wi_optimize_signed_bitwise_op (r
, type
,
3682 r
.set_varying (type
);
3684 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3685 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3686 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3687 || wi::ne_p (result_one_bits
, 0))
3690 tmp
.set_nonzero (type
);
3696 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3700 relation_kind rel
) const
3702 if (rel
== VREL_VARYING
)
3705 int_range
<2> rel_range
;
3710 rel_range
.set_zero (type
);
3713 rel_range
.set_nonzero (type
);
3719 lhs_range
.intersect (rel_range
);
3724 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3727 relation_trio
) const
3729 if (lhs
.undefined_p () || lhs
.varying_p ())
3734 if (types_compatible_p (type
, boolean_type_node
))
3736 switch (get_bool_state (r
, lhs
, type
))
3739 if (op2
.varying_p ())
3740 r
.set_varying (type
);
3741 else if (op2
.zero_p ())
3742 r
= range_true (type
);
3743 // See get_bool_state for the rationale
3744 else if (op2
.contains_p (build_zero_cst (op2
.type ())))
3745 r
= range_true_and_false (type
);
3747 r
= range_false (type
);
3757 r
.set_varying (type
);
3762 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3765 relation_trio
) const
3767 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3770 class operator_trunc_mod
: public range_operator
3772 using range_operator::op1_range
;
3773 using range_operator::op2_range
;
3775 virtual void wi_fold (irange
&r
, tree type
,
3776 const wide_int
&lh_lb
,
3777 const wide_int
&lh_ub
,
3778 const wide_int
&rh_lb
,
3779 const wide_int
&rh_ub
) const;
3780 virtual bool op1_range (irange
&r
, tree type
,
3783 relation_trio
) const;
3784 virtual bool op2_range (irange
&r
, tree type
,
3787 relation_trio
) const;
3791 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
3792 const wide_int
&lh_lb
,
3793 const wide_int
&lh_ub
,
3794 const wide_int
&rh_lb
,
3795 const wide_int
&rh_ub
) const
3797 wide_int new_lb
, new_ub
, tmp
;
3798 signop sign
= TYPE_SIGN (type
);
3799 unsigned prec
= TYPE_PRECISION (type
);
3801 // Mod 0 is undefined.
3802 if (wi_zero_p (type
, rh_lb
, rh_ub
))
3808 // Check for constant and try to fold.
3809 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
3811 wi::overflow_type ov
= wi::OVF_NONE
;
3812 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
3813 if (ov
== wi::OVF_NONE
)
3815 r
= int_range
<2> (type
, tmp
, tmp
);
3820 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3825 new_ub
= wi::smax (new_ub
, tmp
);
3828 if (sign
== UNSIGNED
)
3829 new_lb
= wi::zero (prec
);
3834 if (wi::gts_p (tmp
, 0))
3835 tmp
= wi::zero (prec
);
3836 new_lb
= wi::smax (new_lb
, tmp
);
3839 if (sign
== SIGNED
&& wi::neg_p (tmp
))
3840 tmp
= wi::zero (prec
);
3841 new_ub
= wi::min (new_ub
, tmp
, sign
);
3843 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3847 operator_trunc_mod::op1_range (irange
&r
, tree type
,
3850 relation_trio
) const
3852 if (lhs
.undefined_p ())
3855 signop sign
= TYPE_SIGN (type
);
3856 unsigned prec
= TYPE_PRECISION (type
);
3857 // (a % b) >= x && x > 0 , then a >= x.
3858 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3860 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
3863 // (a % b) <= x && x < 0 , then a <= x.
3864 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3866 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
3873 operator_trunc_mod::op2_range (irange
&r
, tree type
,
3876 relation_trio
) const
3878 if (lhs
.undefined_p ())
3881 signop sign
= TYPE_SIGN (type
);
3882 unsigned prec
= TYPE_PRECISION (type
);
3883 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3884 // or b > x for unsigned.
3885 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3888 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
3889 lhs
.lower_bound (), VR_ANTI_RANGE
);
3890 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
3892 r
= value_range (type
, lhs
.lower_bound () + 1,
3893 wi::max_value (prec
, sign
));
3898 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3899 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3901 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
3902 r
= value_range (type
, lhs
.upper_bound (),
3903 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
3912 class operator_logical_not
: public range_operator
3914 using range_operator::fold_range
;
3915 using range_operator::op1_range
;
3917 virtual bool fold_range (irange
&r
, tree type
,
3920 relation_trio rel
= TRIO_VARYING
) const;
3921 virtual bool op1_range (irange
&r
, tree type
,
3924 relation_trio rel
= TRIO_VARYING
) const;
3927 // Folding a logical NOT, oddly enough, involves doing nothing on the
3928 // forward pass through. During the initial walk backwards, the
3929 // logical NOT reversed the desired outcome on the way back, so on the
3930 // way forward all we do is pass the range forward.
3935 // to determine the TRUE branch, walking backward
3936 // if (b_3) if ([1,1])
3937 // b_3 = !b_2 [1,1] = ![0,0]
3938 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3939 // which is the result we are looking for.. so.. pass it through.
3942 operator_logical_not::fold_range (irange
&r
, tree type
,
3944 const irange
&rh ATTRIBUTE_UNUSED
,
3945 relation_trio
) const
3947 if (empty_range_varying (r
, type
, lh
, rh
))
3951 if (!lh
.varying_p () && !lh
.undefined_p ())
3958 operator_logical_not::op1_range (irange
&r
,
3962 relation_trio
) const
3964 // Logical NOT is involutary...do it again.
3965 return fold_range (r
, type
, lhs
, op2
);
3969 class operator_bitwise_not
: public range_operator
3971 using range_operator::fold_range
;
3972 using range_operator::op1_range
;
3974 virtual bool fold_range (irange
&r
, tree type
,
3977 relation_trio rel
= TRIO_VARYING
) const;
3978 virtual bool op1_range (irange
&r
, tree type
,
3981 relation_trio rel
= TRIO_VARYING
) const;
3985 operator_bitwise_not::fold_range (irange
&r
, tree type
,
3988 relation_trio
) const
3990 if (empty_range_varying (r
, type
, lh
, rh
))
3993 if (types_compatible_p (type
, boolean_type_node
))
3994 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
3996 // ~X is simply -1 - X.
3997 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
3998 wi::minus_one (TYPE_PRECISION (type
)));
3999 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
, minusone
, lh
);
4003 operator_bitwise_not::op1_range (irange
&r
, tree type
,
4006 relation_trio
) const
4008 if (lhs
.undefined_p ())
4010 if (types_compatible_p (type
, boolean_type_node
))
4011 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
4013 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4014 return fold_range (r
, type
, lhs
, op2
);
4018 class operator_cst
: public range_operator
4020 using range_operator::fold_range
;
4022 virtual bool fold_range (irange
&r
, tree type
,
4025 relation_trio rel
= TRIO_VARYING
) const;
4029 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4031 const irange
&rh ATTRIBUTE_UNUSED
,
4032 relation_trio
) const
4039 class operator_identity
: public range_operator
4041 using range_operator::fold_range
;
4042 using range_operator::op1_range
;
4043 using range_operator::lhs_op1_relation
;
4045 virtual bool fold_range (irange
&r
, tree type
,
4048 relation_trio rel
= TRIO_VARYING
) const;
4049 virtual bool op1_range (irange
&r
, tree type
,
4052 relation_trio rel
= TRIO_VARYING
) const;
4053 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
4056 relation_kind rel
) const;
4059 // Determine if there is a relationship between LHS and OP1.
4062 operator_identity::lhs_op1_relation (const irange
&lhs
,
4063 const irange
&op1 ATTRIBUTE_UNUSED
,
4064 const irange
&op2 ATTRIBUTE_UNUSED
,
4065 relation_kind
) const
4067 if (lhs
.undefined_p ())
4068 return VREL_VARYING
;
4069 // Simply a copy, so they are equivalent.
4074 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4076 const irange
&rh ATTRIBUTE_UNUSED
,
4077 relation_trio
) const
4084 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4086 const irange
&op2 ATTRIBUTE_UNUSED
,
4087 relation_trio
) const
4094 class operator_unknown
: public range_operator
4096 using range_operator::fold_range
;
4098 virtual bool fold_range (irange
&r
, tree type
,
4101 relation_trio rel
= TRIO_VARYING
) const;
4105 operator_unknown::fold_range (irange
&r
, tree type
,
4106 const irange
&lh ATTRIBUTE_UNUSED
,
4107 const irange
&rh ATTRIBUTE_UNUSED
,
4108 relation_trio
) const
4110 r
.set_varying (type
);
4115 class operator_abs
: public range_operator
4117 using range_operator::op1_range
;
4119 virtual void wi_fold (irange
&r
, tree type
,
4120 const wide_int
&lh_lb
,
4121 const wide_int
&lh_ub
,
4122 const wide_int
&rh_lb
,
4123 const wide_int
&rh_ub
) const;
4124 virtual bool op1_range (irange
&r
, tree type
,
4127 relation_trio
) const;
4131 operator_abs::wi_fold (irange
&r
, tree type
,
4132 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4133 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4134 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4137 signop sign
= TYPE_SIGN (type
);
4138 unsigned prec
= TYPE_PRECISION (type
);
4140 // Pass through LH for the easy cases.
4141 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
4143 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
4147 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4149 wide_int min_value
= wi::min_value (prec
, sign
);
4150 wide_int max_value
= wi::max_value (prec
, sign
);
4151 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
4153 r
.set_varying (type
);
4157 // ABS_EXPR may flip the range around, if the original range
4158 // included negative values.
4159 if (wi::eq_p (lh_lb
, min_value
))
4161 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4162 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4163 if (wi::eq_p (lh_ub
, min_value
))
4165 r
= int_range
<1> (type
, min_value
, min_value
);
4171 min
= wi::abs (lh_lb
);
4173 if (wi::eq_p (lh_ub
, min_value
))
4176 max
= wi::abs (lh_ub
);
4178 // If the range contains zero then we know that the minimum value in the
4179 // range will be zero.
4180 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4182 if (wi::gt_p (min
, max
, sign
))
4184 min
= wi::zero (prec
);
4188 // If the range was reversed, swap MIN and MAX.
4189 if (wi::gt_p (min
, max
, sign
))
4190 std::swap (min
, max
);
4193 // If the new range has its limits swapped around (MIN > MAX), then
4194 // the operation caused one of them to wrap around. The only thing
4195 // we know is that the result is positive.
4196 if (wi::gt_p (min
, max
, sign
))
4198 min
= wi::zero (prec
);
4201 r
= int_range
<1> (type
, min
, max
);
4205 operator_abs::op1_range (irange
&r
, tree type
,
4208 relation_trio
) const
4210 if (empty_range_varying (r
, type
, lhs
, op2
))
4212 if (TYPE_UNSIGNED (type
))
4217 // Start with the positives because negatives are an impossible result.
4218 int_range_max positives
= range_positives (type
);
4219 positives
.intersect (lhs
);
4221 // Then add the negative of each pair:
4222 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4223 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4224 r
.union_ (int_range
<1> (type
,
4225 -positives
.upper_bound (i
),
4226 -positives
.lower_bound (i
)));
4227 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4228 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4229 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4230 wide_int lb
= lhs
.lower_bound ();
4231 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4232 r
.union_ (int_range
<2> (type
, lb
, lb
));
4237 class operator_absu
: public range_operator
4240 virtual void wi_fold (irange
&r
, tree type
,
4241 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4242 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4246 operator_absu::wi_fold (irange
&r
, tree type
,
4247 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4248 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4249 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4251 wide_int new_lb
, new_ub
;
4253 // Pass through VR0 the easy cases.
4254 if (wi::ges_p (lh_lb
, 0))
4261 new_lb
= wi::abs (lh_lb
);
4262 new_ub
= wi::abs (lh_ub
);
4264 // If the range contains zero then we know that the minimum
4265 // value in the range will be zero.
4266 if (wi::ges_p (lh_ub
, 0))
4268 if (wi::gtu_p (new_lb
, new_ub
))
4270 new_lb
= wi::zero (TYPE_PRECISION (type
));
4273 std::swap (new_lb
, new_ub
);
4276 gcc_checking_assert (TYPE_UNSIGNED (type
));
4277 r
= int_range
<1> (type
, new_lb
, new_ub
);
4281 class operator_negate
: public range_operator
4283 using range_operator::fold_range
;
4284 using range_operator::op1_range
;
4286 virtual bool fold_range (irange
&r
, tree type
,
4289 relation_trio rel
= TRIO_VARYING
) const;
4290 virtual bool op1_range (irange
&r
, tree type
,
4293 relation_trio rel
= TRIO_VARYING
) const;
4297 operator_negate::fold_range (irange
&r
, tree type
,
4300 relation_trio
) const
4302 if (empty_range_varying (r
, type
, lh
, rh
))
4304 // -X is simply 0 - X.
4305 return range_op_handler (MINUS_EXPR
, type
).fold_range (r
, type
,
4306 range_zero (type
), lh
);
4310 operator_negate::op1_range (irange
&r
, tree type
,
4313 relation_trio
) const
4315 // NEGATE is involutory.
4316 return fold_range (r
, type
, lhs
, op2
);
4320 class operator_addr_expr
: public range_operator
4322 using range_operator::fold_range
;
4323 using range_operator::op1_range
;
4325 virtual bool fold_range (irange
&r
, tree type
,
4328 relation_trio rel
= TRIO_VARYING
) const;
4329 virtual bool op1_range (irange
&r
, tree type
,
4332 relation_trio rel
= TRIO_VARYING
) const;
4336 operator_addr_expr::fold_range (irange
&r
, tree type
,
4339 relation_trio
) const
4341 if (empty_range_varying (r
, type
, lh
, rh
))
4344 // Return a non-null pointer of the LHS type (passed in op2).
4346 r
= range_zero (type
);
4347 else if (!lh
.contains_p (build_zero_cst (lh
.type ())))
4348 r
= range_nonzero (type
);
4350 r
.set_varying (type
);
4355 operator_addr_expr::op1_range (irange
&r
, tree type
,
4358 relation_trio
) const
4360 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
4364 class pointer_plus_operator
: public range_operator
4367 virtual void wi_fold (irange
&r
, tree type
,
4368 const wide_int
&lh_lb
,
4369 const wide_int
&lh_ub
,
4370 const wide_int
&rh_lb
,
4371 const wide_int
&rh_ub
) const;
4372 virtual bool op2_range (irange
&r
, tree type
,
4375 relation_trio
= TRIO_VARYING
) const;
4379 pointer_plus_operator::wi_fold (irange
&r
, tree type
,
4380 const wide_int
&lh_lb
,
4381 const wide_int
&lh_ub
,
4382 const wide_int
&rh_lb
,
4383 const wide_int
&rh_ub
) const
4385 // Check for [0,0] + const, and simply return the const.
4386 if (lh_lb
== 0 && lh_ub
== 0 && rh_lb
== rh_ub
)
4388 tree val
= wide_int_to_tree (type
, rh_lb
);
4393 // For pointer types, we are really only interested in asserting
4394 // whether the expression evaluates to non-NULL.
4396 // With -fno-delete-null-pointer-checks we need to be more
4397 // conservative. As some object might reside at address 0,
4398 // then some offset could be added to it and the same offset
4399 // subtracted again and the result would be NULL.
4401 // static int a[12]; where &a[0] is NULL and
4404 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
4405 // where the first range doesn't include zero and the second one
4406 // doesn't either. As the second operand is sizetype (unsigned),
4407 // consider all ranges where the MSB could be set as possible
4408 // subtractions where the result might be NULL.
4409 if ((!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4410 || !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4411 && !TYPE_OVERFLOW_WRAPS (type
)
4412 && (flag_delete_null_pointer_checks
4413 || !wi::sign_mask (rh_ub
)))
4414 r
= range_nonzero (type
);
4415 else if (lh_lb
== lh_ub
&& lh_lb
== 0
4416 && rh_lb
== rh_ub
&& rh_lb
== 0)
4417 r
= range_zero (type
);
4419 r
.set_varying (type
);
4423 pointer_plus_operator::op2_range (irange
&r
, tree type
,
4424 const irange
&lhs ATTRIBUTE_UNUSED
,
4425 const irange
&op1 ATTRIBUTE_UNUSED
,
4426 relation_trio trio
) const
4428 relation_kind rel
= trio
.lhs_op1 ();
4429 r
.set_varying (type
);
4431 // If the LHS and OP1 are equal, the op2 must be zero.
4434 // If the LHS and OP1 are not equal, the offset must be non-zero.
4435 else if (rel
== VREL_NE
)
4436 r
.set_nonzero (type
);
4442 class pointer_min_max_operator
: public range_operator
4445 virtual void wi_fold (irange
& r
, tree type
,
4446 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4447 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4451 pointer_min_max_operator::wi_fold (irange
&r
, tree type
,
4452 const wide_int
&lh_lb
,
4453 const wide_int
&lh_ub
,
4454 const wide_int
&rh_lb
,
4455 const wide_int
&rh_ub
) const
4457 // For MIN/MAX expressions with pointers, we only care about
4458 // nullness. If both are non null, then the result is nonnull.
4459 // If both are null, then the result is null. Otherwise they
4461 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4462 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4463 r
= range_nonzero (type
);
4464 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4465 r
= range_zero (type
);
4467 r
.set_varying (type
);
4471 class pointer_and_operator
: public range_operator
4474 virtual void wi_fold (irange
&r
, tree type
,
4475 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4476 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4480 pointer_and_operator::wi_fold (irange
&r
, tree type
,
4481 const wide_int
&lh_lb
,
4482 const wide_int
&lh_ub
,
4483 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4484 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4486 // For pointer types, we are really only interested in asserting
4487 // whether the expression evaluates to non-NULL.
4488 if (wi_zero_p (type
, lh_lb
, lh_ub
) || wi_zero_p (type
, lh_lb
, lh_ub
))
4489 r
= range_zero (type
);
4491 r
.set_varying (type
);
4495 class pointer_or_operator
: public range_operator
4497 using range_operator::op1_range
;
4498 using range_operator::op2_range
;
4500 virtual bool op1_range (irange
&r
, tree type
,
4503 relation_trio rel
= TRIO_VARYING
) const;
4504 virtual bool op2_range (irange
&r
, tree type
,
4507 relation_trio rel
= TRIO_VARYING
) const;
4508 virtual void wi_fold (irange
&r
, tree type
,
4509 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4510 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4514 pointer_or_operator::op1_range (irange
&r
, tree type
,
4516 const irange
&op2 ATTRIBUTE_UNUSED
,
4517 relation_trio
) const
4519 if (lhs
.undefined_p ())
4523 tree zero
= build_zero_cst (type
);
4524 r
= int_range
<1> (zero
, zero
);
4527 r
.set_varying (type
);
4532 pointer_or_operator::op2_range (irange
&r
, tree type
,
4535 relation_trio
) const
4537 return pointer_or_operator::op1_range (r
, type
, lhs
, op1
);
4541 pointer_or_operator::wi_fold (irange
&r
, tree type
,
4542 const wide_int
&lh_lb
,
4543 const wide_int
&lh_ub
,
4544 const wide_int
&rh_lb
,
4545 const wide_int
&rh_ub
) const
4547 // For pointer types, we are really only interested in asserting
4548 // whether the expression evaluates to non-NULL.
4549 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
4550 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
4551 r
= range_nonzero (type
);
4552 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
4553 r
= range_zero (type
);
4555 r
.set_varying (type
);
4558 // Return a pointer to the range_operator instance, if there is one
4559 // associated with tree_code CODE.
4562 range_op_table::operator[] (enum tree_code code
)
4564 gcc_checking_assert (code
> 0 && code
< MAX_TREE_CODES
);
4565 return m_range_tree
[code
];
4568 // Add OP to the handler table for CODE.
4571 range_op_table::set (enum tree_code code
, range_operator
&op
)
4573 gcc_checking_assert (m_range_tree
[code
] == NULL
);
4574 m_range_tree
[code
] = &op
;
4575 gcc_checking_assert (op
.m_code
== ERROR_MARK
|| op
.m_code
== code
);
4579 // Shared operators that require separate instantiations because they
4580 // do not share a common tree code.
4581 static operator_cast op_nop
, op_convert
;
4582 static operator_identity op_ssa
, op_paren
, op_obj_type
;
4583 static operator_unknown op_realpart
, op_imagpart
;
4584 static pointer_min_max_operator op_ptr_min
, op_ptr_max
;
4585 static operator_div op_trunc_div
;
4586 static operator_div op_floor_div
;
4587 static operator_div op_round_div
;
4588 static operator_div op_ceil_div
;
4590 // Instantiate a range op table for integral operations.
4592 class integral_table
: public range_op_table
4596 } integral_tree_table
;
4598 integral_table::integral_table ()
4600 set (EQ_EXPR
, op_equal
);
4601 set (NE_EXPR
, op_not_equal
);
4602 set (LT_EXPR
, op_lt
);
4603 set (LE_EXPR
, op_le
);
4604 set (GT_EXPR
, op_gt
);
4605 set (GE_EXPR
, op_ge
);
4606 set (PLUS_EXPR
, op_plus
);
4607 set (MINUS_EXPR
, op_minus
);
4608 set (MIN_EXPR
, op_min
);
4609 set (MAX_EXPR
, op_max
);
4610 set (MULT_EXPR
, op_mult
);
4611 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4612 set (FLOOR_DIV_EXPR
, op_floor_div
);
4613 set (ROUND_DIV_EXPR
, op_round_div
);
4614 set (CEIL_DIV_EXPR
, op_ceil_div
);
4615 set (EXACT_DIV_EXPR
, op_exact_div
);
4616 set (LSHIFT_EXPR
, op_lshift
);
4617 set (RSHIFT_EXPR
, op_rshift
);
4618 set (NOP_EXPR
, op_nop
);
4619 set (CONVERT_EXPR
, op_convert
);
4620 set (TRUTH_AND_EXPR
, op_logical_and
);
4621 set (BIT_AND_EXPR
, op_bitwise_and
);
4622 set (TRUTH_OR_EXPR
, op_logical_or
);
4623 set (BIT_IOR_EXPR
, op_bitwise_or
);
4624 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4625 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4626 set (TRUTH_NOT_EXPR
, op_logical_not
);
4627 set (BIT_NOT_EXPR
, op_bitwise_not
);
4628 set (INTEGER_CST
, op_integer_cst
);
4629 set (SSA_NAME
, op_ssa
);
4630 set (PAREN_EXPR
, op_paren
);
4631 set (OBJ_TYPE_REF
, op_obj_type
);
4632 set (IMAGPART_EXPR
, op_imagpart
);
4633 set (REALPART_EXPR
, op_realpart
);
4634 set (POINTER_DIFF_EXPR
, op_pointer_diff
);
4635 set (ABS_EXPR
, op_abs
);
4636 set (ABSU_EXPR
, op_absu
);
4637 set (NEGATE_EXPR
, op_negate
);
4638 set (ADDR_EXPR
, op_addr
);
4641 // Instantiate a range op table for pointer operations.
4643 class pointer_table
: public range_op_table
4647 } pointer_tree_table
;
4649 pointer_table::pointer_table ()
4651 set (BIT_AND_EXPR
, op_pointer_and
);
4652 set (BIT_IOR_EXPR
, op_pointer_or
);
4653 set (MIN_EXPR
, op_ptr_min
);
4654 set (MAX_EXPR
, op_ptr_max
);
4655 set (POINTER_PLUS_EXPR
, op_pointer_plus
);
4657 set (EQ_EXPR
, op_equal
);
4658 set (NE_EXPR
, op_not_equal
);
4659 set (LT_EXPR
, op_lt
);
4660 set (LE_EXPR
, op_le
);
4661 set (GT_EXPR
, op_gt
);
4662 set (GE_EXPR
, op_ge
);
4663 set (SSA_NAME
, op_ssa
);
4664 set (INTEGER_CST
, op_integer_cst
);
4665 set (ADDR_EXPR
, op_addr
);
4666 set (NOP_EXPR
, op_nop
);
4667 set (CONVERT_EXPR
, op_convert
);
4669 set (BIT_NOT_EXPR
, op_bitwise_not
);
4670 set (BIT_XOR_EXPR
, op_bitwise_xor
);
4673 // The tables are hidden and accessed via a simple extern function.
4675 static inline range_operator
*
4676 get_handler (enum tree_code code
, tree type
)
4678 // First check if there is a pointer specialization.
4679 if (POINTER_TYPE_P (type
))
4680 return pointer_tree_table
[code
];
4681 if (INTEGRAL_TYPE_P (type
))
4682 return integral_tree_table
[code
];
4686 // Return the floating point operator for CODE or NULL if none available.
4688 static inline range_operator_float
*
4689 get_float_handler (enum tree_code code
, tree
)
4691 return (*floating_tree_table
)[code
];
4695 range_op_handler::set_op_handler (tree_code code
, tree type
)
4697 if (irange::supports_p (type
))
4700 m_int
= get_handler (code
, type
);
4701 m_valid
= m_int
!= NULL
;
4703 else if (frange::supports_p (type
))
4706 m_float
= get_float_handler (code
, type
);
4707 m_valid
= m_float
!= NULL
;
4717 range_op_handler::range_op_handler ()
4724 range_op_handler::range_op_handler (tree_code code
, tree type
)
4726 set_op_handler (code
, type
);
4731 range_op_handler::fold_range (vrange
&r
, tree type
,
4734 relation_trio rel
) const
4736 gcc_checking_assert (m_valid
);
4738 return m_int
->fold_range (as_a
<irange
> (r
), type
,
4740 as_a
<irange
> (rh
), rel
);
4742 if (is_a
<irange
> (r
))
4744 if (is_a
<irange
> (rh
))
4745 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4747 as_a
<irange
> (rh
), rel
);
4749 return m_float
->fold_range (as_a
<irange
> (r
), type
,
4751 as_a
<frange
> (rh
), rel
);
4753 return m_float
->fold_range (as_a
<frange
> (r
), type
,
4755 as_a
<frange
> (rh
), rel
);
4759 range_op_handler::op1_range (vrange
&r
, tree type
,
4762 relation_trio rel
) const
4764 gcc_checking_assert (m_valid
);
4766 if (lhs
.undefined_p ())
4769 return m_int
->op1_range (as_a
<irange
> (r
), type
,
4770 as_a
<irange
> (lhs
),
4771 as_a
<irange
> (op2
), rel
);
4773 if (is_a
<irange
> (lhs
))
4774 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4775 as_a
<irange
> (lhs
),
4776 as_a
<frange
> (op2
), rel
);
4777 return m_float
->op1_range (as_a
<frange
> (r
), type
,
4778 as_a
<frange
> (lhs
),
4779 as_a
<frange
> (op2
), rel
);
4783 range_op_handler::op2_range (vrange
&r
, tree type
,
4786 relation_trio rel
) const
4788 gcc_checking_assert (m_valid
);
4789 if (lhs
.undefined_p ())
4792 return m_int
->op2_range (as_a
<irange
> (r
), type
,
4793 as_a
<irange
> (lhs
),
4794 as_a
<irange
> (op1
), rel
);
4796 if (is_a
<irange
> (lhs
))
4797 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4798 as_a
<irange
> (lhs
),
4799 as_a
<frange
> (op1
), rel
);
4800 return m_float
->op2_range (as_a
<frange
> (r
), type
,
4801 as_a
<frange
> (lhs
),
4802 as_a
<frange
> (op1
), rel
);
4806 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
4809 relation_kind rel
) const
4811 gcc_checking_assert (m_valid
);
4813 return m_int
->lhs_op1_relation (as_a
<irange
> (lhs
),
4814 as_a
<irange
> (op1
),
4815 as_a
<irange
> (op2
), rel
);
4817 if (is_a
<irange
> (lhs
))
4818 return m_float
->lhs_op1_relation (as_a
<irange
> (lhs
),
4819 as_a
<frange
> (op1
),
4820 as_a
<frange
> (op2
), rel
);
4821 return m_float
->lhs_op1_relation (as_a
<frange
> (lhs
),
4822 as_a
<frange
> (op1
),
4823 as_a
<frange
> (op2
), rel
);
4827 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
4830 relation_kind rel
) const
4832 gcc_checking_assert (m_valid
);
4834 return m_int
->lhs_op2_relation (as_a
<irange
> (lhs
),
4835 as_a
<irange
> (op1
),
4836 as_a
<irange
> (op2
), rel
);
4838 if (is_a
<irange
> (lhs
))
4839 return m_float
->lhs_op2_relation (as_a
<irange
> (lhs
),
4840 as_a
<frange
> (op1
),
4841 as_a
<frange
> (op2
), rel
);
4842 return m_float
->lhs_op2_relation (as_a
<frange
> (lhs
),
4843 as_a
<frange
> (op1
),
4844 as_a
<frange
> (op2
), rel
);
4848 range_op_handler::op1_op2_relation (const vrange
&lhs
) const
4850 gcc_checking_assert (m_valid
);
4852 return m_int
->op1_op2_relation (as_a
<irange
> (lhs
));
4853 if (is_a
<irange
> (lhs
))
4854 return m_float
->op1_op2_relation (as_a
<irange
> (lhs
));
4855 return m_float
->op1_op2_relation (as_a
<frange
> (lhs
));
4858 // Cast the range in R to TYPE.
4861 range_cast (vrange
&r
, tree type
)
4863 Value_Range
tmp (r
);
4864 Value_Range
varying (type
);
4865 varying
.set_varying (type
);
4866 range_op_handler
op (CONVERT_EXPR
, type
);
4867 // Call op_convert, if it fails, the result is varying.
4868 if (!op
|| !op
.fold_range (r
, type
, tmp
, varying
))
4870 r
.set_varying (type
);
4877 #include "selftest.h"
4881 #define INT(N) build_int_cst (integer_type_node, (N))
4882 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
4883 #define INT16(N) build_int_cst (short_integer_type_node, (N))
4884 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
4885 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
4886 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
4889 range_op_cast_tests ()
4891 int_range
<1> r0
, r1
, r2
, rold
;
4892 r0
.set_varying (integer_type_node
);
4893 tree maxint
= wide_int_to_tree (integer_type_node
, r0
.upper_bound ());
4895 // If a range is in any way outside of the range for the converted
4896 // to range, default to the range for the new type.
4897 r0
.set_varying (short_integer_type_node
);
4898 tree minshort
= wide_int_to_tree (short_integer_type_node
, r0
.lower_bound ());
4899 tree maxshort
= wide_int_to_tree (short_integer_type_node
, r0
.upper_bound ());
4900 if (TYPE_PRECISION (TREE_TYPE (maxint
))
4901 > TYPE_PRECISION (short_integer_type_node
))
4903 r1
= int_range
<1> (integer_zero_node
, maxint
);
4904 range_cast (r1
, short_integer_type_node
);
4905 ASSERT_TRUE (r1
.lower_bound () == wi::to_wide (minshort
)
4906 && r1
.upper_bound() == wi::to_wide (maxshort
));
4909 // (unsigned char)[-5,-1] => [251,255].
4910 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (-1));
4911 range_cast (r0
, unsigned_char_type_node
);
4912 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (251), UCHAR (255)));
4913 range_cast (r0
, signed_char_type_node
);
4914 ASSERT_TRUE (r0
== rold
);
4916 // (signed char)[15, 150] => [-128,-106][15,127].
4917 r0
= rold
= int_range
<1> (UCHAR (15), UCHAR (150));
4918 range_cast (r0
, signed_char_type_node
);
4919 r1
= int_range
<1> (SCHAR (15), SCHAR (127));
4920 r2
= int_range
<1> (SCHAR (-128), SCHAR (-106));
4922 ASSERT_TRUE (r1
== r0
);
4923 range_cast (r0
, unsigned_char_type_node
);
4924 ASSERT_TRUE (r0
== rold
);
4926 // (unsigned char)[-5, 5] => [0,5][251,255].
4927 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (5));
4928 range_cast (r0
, unsigned_char_type_node
);
4929 r1
= int_range
<1> (UCHAR (251), UCHAR (255));
4930 r2
= int_range
<1> (UCHAR (0), UCHAR (5));
4932 ASSERT_TRUE (r0
== r1
);
4933 range_cast (r0
, signed_char_type_node
);
4934 ASSERT_TRUE (r0
== rold
);
4936 // (unsigned char)[-5,5] => [0,5][251,255].
4937 r0
= int_range
<1> (INT (-5), INT (5));
4938 range_cast (r0
, unsigned_char_type_node
);
4939 r1
= int_range
<1> (UCHAR (0), UCHAR (5));
4940 r1
.union_ (int_range
<1> (UCHAR (251), UCHAR (255)));
4941 ASSERT_TRUE (r0
== r1
);
4943 // (unsigned char)[5U,1974U] => [0,255].
4944 r0
= int_range
<1> (UINT (5), UINT (1974));
4945 range_cast (r0
, unsigned_char_type_node
);
4946 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (0), UCHAR (255)));
4947 range_cast (r0
, integer_type_node
);
4948 // Going to a wider range should not sign extend.
4949 ASSERT_TRUE (r0
== int_range
<1> (INT (0), INT (255)));
4951 // (unsigned char)[-350,15] => [0,255].
4952 r0
= int_range
<1> (INT (-350), INT (15));
4953 range_cast (r0
, unsigned_char_type_node
);
4954 ASSERT_TRUE (r0
== (int_range
<1>
4955 (TYPE_MIN_VALUE (unsigned_char_type_node
),
4956 TYPE_MAX_VALUE (unsigned_char_type_node
))));
4958 // Casting [-120,20] from signed char to unsigned short.
4959 // => [0, 20][0xff88, 0xffff].
4960 r0
= int_range
<1> (SCHAR (-120), SCHAR (20));
4961 range_cast (r0
, short_unsigned_type_node
);
4962 r1
= int_range
<1> (UINT16 (0), UINT16 (20));
4963 r2
= int_range
<1> (UINT16 (0xff88), UINT16 (0xffff));
4965 ASSERT_TRUE (r0
== r1
);
4966 // A truncating cast back to signed char will work because [-120, 20]
4967 // is representable in signed char.
4968 range_cast (r0
, signed_char_type_node
);
4969 ASSERT_TRUE (r0
== int_range
<1> (SCHAR (-120), SCHAR (20)));
4971 // unsigned char -> signed short
4972 // (signed short)[(unsigned char)25, (unsigned char)250]
4973 // => [(signed short)25, (signed short)250]
4974 r0
= rold
= int_range
<1> (UCHAR (25), UCHAR (250));
4975 range_cast (r0
, short_integer_type_node
);
4976 r1
= int_range
<1> (INT16 (25), INT16 (250));
4977 ASSERT_TRUE (r0
== r1
);
4978 range_cast (r0
, unsigned_char_type_node
);
4979 ASSERT_TRUE (r0
== rold
);
4981 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4982 r0
= int_range
<1> (TYPE_MIN_VALUE (long_long_integer_type_node
),
4983 TYPE_MAX_VALUE (long_long_integer_type_node
));
4984 range_cast (r0
, short_unsigned_type_node
);
4985 r1
= int_range
<1> (TYPE_MIN_VALUE (short_unsigned_type_node
),
4986 TYPE_MAX_VALUE (short_unsigned_type_node
));
4987 ASSERT_TRUE (r0
== r1
);
4989 // Casting NONZERO to a narrower type will wrap/overflow so
4990 // it's just the entire range for the narrower type.
4992 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4993 // is outside of the range of a smaller range, return the full
4995 if (TYPE_PRECISION (integer_type_node
)
4996 > TYPE_PRECISION (short_integer_type_node
))
4998 r0
= range_nonzero (integer_type_node
);
4999 range_cast (r0
, short_integer_type_node
);
5000 r1
= int_range
<1> (TYPE_MIN_VALUE (short_integer_type_node
),
5001 TYPE_MAX_VALUE (short_integer_type_node
));
5002 ASSERT_TRUE (r0
== r1
);
5005 // Casting NONZERO from a narrower signed to a wider signed.
5007 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
5008 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
5009 r0
= range_nonzero (short_integer_type_node
);
5010 range_cast (r0
, integer_type_node
);
5011 r1
= int_range
<1> (INT (-32768), INT (-1));
5012 r2
= int_range
<1> (INT (1), INT (32767));
5014 ASSERT_TRUE (r0
== r1
);
5018 range_op_lshift_tests ()
5020 // Test that 0x808.... & 0x8.... still contains 0x8....
5021 // for a large set of numbers.
5024 tree big_type
= long_long_unsigned_type_node
;
5025 // big_num = 0x808,0000,0000,0000
5026 tree big_num
= fold_build2 (LSHIFT_EXPR
, big_type
,
5027 build_int_cst (big_type
, 0x808),
5028 build_int_cst (big_type
, 48));
5029 op_bitwise_and
.fold_range (res
, big_type
,
5030 int_range
<1> (big_type
),
5031 int_range
<1> (big_num
, big_num
));
5032 // val = 0x8,0000,0000,0000
5033 tree val
= fold_build2 (LSHIFT_EXPR
, big_type
,
5034 build_int_cst (big_type
, 0x8),
5035 build_int_cst (big_type
, 48));
5036 ASSERT_TRUE (res
.contains_p (val
));
5039 if (TYPE_PRECISION (unsigned_type_node
) > 31)
5041 // unsigned VARYING = op1 << 1 should be VARYING.
5042 int_range
<2> lhs (unsigned_type_node
);
5043 int_range
<2> shift (INT (1), INT (1));
5045 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
5046 ASSERT_TRUE (op1
.varying_p ());
5048 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
5049 int_range
<2> zero (UINT (0), UINT (0));
5050 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
5051 ASSERT_TRUE (op1
.num_pairs () == 2);
5052 // Remove the [0,0] range.
5053 op1
.intersect (zero
);
5054 ASSERT_TRUE (op1
.num_pairs () == 1);
5055 // op1 << 1 should be [0x8000,0x8000] << 1,
5056 // which should result in [0,0].
5057 int_range_max result
;
5058 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
5059 ASSERT_TRUE (result
== zero
);
5061 // signed VARYING = op1 << 1 should be VARYING.
5062 if (TYPE_PRECISION (integer_type_node
) > 31)
5064 // unsigned VARYING = op1 << 1 should be VARYING.
5065 int_range
<2> lhs (integer_type_node
);
5066 int_range
<2> shift (INT (1), INT (1));
5068 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
5069 ASSERT_TRUE (op1
.varying_p ());
5071 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
5072 int_range
<2> zero (INT (0), INT (0));
5073 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
5074 ASSERT_TRUE (op1
.num_pairs () == 2);
5075 // Remove the [0,0] range.
5076 op1
.intersect (zero
);
5077 ASSERT_TRUE (op1
.num_pairs () == 1);
5078 // op1 << 1 should be [0x8000,0x8000] << 1,
5079 // which should result in [0,0].
5080 int_range_max result
;
5081 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
5082 ASSERT_TRUE (result
== zero
);
5087 range_op_rshift_tests ()
5089 // unsigned: [3, MAX] = OP1 >> 1
5091 int_range_max
lhs (build_int_cst (unsigned_type_node
, 3),
5092 TYPE_MAX_VALUE (unsigned_type_node
));
5093 int_range_max
one (build_one_cst (unsigned_type_node
),
5094 build_one_cst (unsigned_type_node
));
5096 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
5097 ASSERT_FALSE (op1
.contains_p (UINT (3)));
5100 // signed: [3, MAX] = OP1 >> 1
5102 int_range_max
lhs (INT (3), TYPE_MAX_VALUE (integer_type_node
));
5103 int_range_max
one (INT (1), INT (1));
5105 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
5106 ASSERT_FALSE (op1
.contains_p (INT (-2)));
5109 // This is impossible, so OP1 should be [].
5110 // signed: [MIN, MIN] = OP1 >> 1
5112 int_range_max
lhs (TYPE_MIN_VALUE (integer_type_node
),
5113 TYPE_MIN_VALUE (integer_type_node
));
5114 int_range_max
one (INT (1), INT (1));
5116 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
5117 ASSERT_TRUE (op1
.undefined_p ());
5120 // signed: ~[-1] = OP1 >> 31
5121 if (TYPE_PRECISION (integer_type_node
) > 31)
5123 int_range_max
lhs (INT (-1), INT (-1), VR_ANTI_RANGE
);
5124 int_range_max
shift (INT (31), INT (31));
5126 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
5127 int_range_max negatives
= range_negatives (integer_type_node
);
5128 negatives
.intersect (op1
);
5129 ASSERT_TRUE (negatives
.undefined_p ());
5134 range_op_bitwise_and_tests ()
5137 tree min
= vrp_val_min (integer_type_node
);
5138 tree max
= vrp_val_max (integer_type_node
);
5139 tree tiny
= fold_build2 (PLUS_EXPR
, integer_type_node
, min
,
5140 build_one_cst (integer_type_node
));
5141 int_range_max
i1 (tiny
, max
);
5142 int_range_max
i2 (build_int_cst (integer_type_node
, 255),
5143 build_int_cst (integer_type_node
, 255));
5145 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
5146 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
5147 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
5149 // VARYING = OP1 & 255: OP1 is VARYING
5150 i1
= int_range
<1> (integer_type_node
);
5151 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
5152 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
5154 // For 0 = x & MASK, x is ~MASK.
5156 int_range
<2> zero (integer_zero_node
, integer_zero_node
);
5157 int_range
<2> mask
= int_range
<2> (INT (7), INT (7));
5158 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
5159 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
5160 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
5163 // (NONZERO | X) is nonzero.
5164 i1
.set_nonzero (integer_type_node
);
5165 i2
.set_varying (integer_type_node
);
5166 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
5167 ASSERT_TRUE (res
.nonzero_p ());
5169 // (NEGATIVE | X) is nonzero.
5170 i1
= int_range
<1> (INT (-5), INT (-3));
5171 i2
.set_varying (integer_type_node
);
5172 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
5173 ASSERT_FALSE (res
.contains_p (INT (0)));
5177 range_relational_tests ()
5179 int_range
<2> lhs (unsigned_char_type_node
);
5180 int_range
<2> op1 (UCHAR (8), UCHAR (10));
5181 int_range
<2> op2 (UCHAR (20), UCHAR (20));
5183 // Never wrapping additions mean LHS > OP1.
5184 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
5185 ASSERT_TRUE (code
== VREL_GT
);
5187 // Most wrapping additions mean nothing...
5188 op1
= int_range
<2> (UCHAR (8), UCHAR (10));
5189 op2
= int_range
<2> (UCHAR (0), UCHAR (255));
5190 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
5191 ASSERT_TRUE (code
== VREL_VARYING
);
5193 // However, always wrapping additions mean LHS < OP1.
5194 op1
= int_range
<2> (UCHAR (1), UCHAR (255));
5195 op2
= int_range
<2> (UCHAR (255), UCHAR (255));
5196 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
5197 ASSERT_TRUE (code
== VREL_LT
);
5203 range_op_rshift_tests ();
5204 range_op_lshift_tests ();
5205 range_op_bitwise_and_tests ();
5206 range_op_cast_tests ();
5207 range_relational_tests ();
5209 extern void range_op_float_tests ();
5210 range_op_float_tests ();
5213 } // namespace selftest
5215 #endif // CHECKING_P