1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal
;
55 operator_not_equal op_not_equal
;
60 operator_identity op_ident
;
62 operator_cast op_cast
;
63 operator_plus op_plus
;
65 operator_minus op_minus
;
66 operator_negate op_negate
;
67 operator_mult op_mult
;
68 operator_addr_expr op_addr
;
69 operator_bitwise_not op_bitwise_not
;
70 operator_bitwise_xor op_bitwise_xor
;
71 operator_bitwise_and op_bitwise_and
;
72 operator_bitwise_or op_bitwise_or
;
76 // Instantaite a range operator table.
77 range_op_table operator_table
;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR
, op_equal
);
88 set (NE_EXPR
, op_not_equal
);
93 set (SSA_NAME
, op_ident
);
94 set (PAREN_EXPR
, op_ident
);
95 set (OBJ_TYPE_REF
, op_ident
);
96 set (REAL_CST
, op_cst
);
97 set (INTEGER_CST
, op_cst
);
98 set (NOP_EXPR
, op_cast
);
99 set (CONVERT_EXPR
, op_cast
);
100 set (PLUS_EXPR
, op_plus
);
101 set (ABS_EXPR
, op_abs
);
102 set (MINUS_EXPR
, op_minus
);
103 set (NEGATE_EXPR
, op_negate
);
104 set (MULT_EXPR
, op_mult
);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR
, op_addr
);
109 set (BIT_NOT_EXPR
, op_bitwise_not
);
110 set (BIT_XOR_EXPR
, op_bitwise_xor
);
112 // These are in both integer and pointer tables, but pointer has a different
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator
;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator
= &default_operator
;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code
)
140 m_operator
= operator_table
[code
];
142 m_operator
= &default_operator
;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator
!= &default_operator
;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
157 range_op_handler::range_op () const
159 if (m_operator
!= &default_operator
)
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
169 dispatch_trio (unsigned lhs
, unsigned op1
, unsigned op2
)
171 return ((lhs
<< 8) + (op1
<< 4) + (op2
));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III
= dispatch_trio (VR_IRANGE
, VR_IRANGE
, VR_IRANGE
);
179 const unsigned RO_IFI
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_IRANGE
);
180 const unsigned RO_IFF
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_FRANGE
);
181 const unsigned RO_FFF
= dispatch_trio (VR_FRANGE
, VR_FRANGE
, VR_FRANGE
);
182 const unsigned RO_FIF
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_FRANGE
);
183 const unsigned RO_FII
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_IRANGE
);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
188 range_op_handler::dispatch_kind (const vrange
&lhs
, const vrange
&op1
,
189 const vrange
& op2
) const
191 return dispatch_trio (lhs
.m_discriminator
, op1
.m_discriminator
,
192 op2
.m_discriminator
);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
198 range_op_handler::fold_range (vrange
&r
, tree type
,
201 relation_trio rel
) const
203 gcc_checking_assert (m_operator
);
204 switch (dispatch_kind (r
, lh
, rh
))
207 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
209 as_a
<irange
> (rh
), rel
);
211 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
213 as_a
<irange
> (rh
), rel
);
215 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
217 as_a
<frange
> (rh
), rel
);
219 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
221 as_a
<frange
> (rh
), rel
);
223 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
225 as_a
<irange
> (rh
), rel
);
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
234 range_op_handler::op1_range (vrange
&r
, tree type
,
237 relation_trio rel
) const
239 gcc_checking_assert (m_operator
);
241 if (lhs
.undefined_p ())
243 switch (dispatch_kind (r
, lhs
, op2
))
246 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
248 as_a
<irange
> (op2
), rel
);
250 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
252 as_a
<frange
> (op2
), rel
);
254 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
256 as_a
<frange
> (op2
), rel
);
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
265 range_op_handler::op2_range (vrange
&r
, tree type
,
268 relation_trio rel
) const
270 gcc_checking_assert (m_operator
);
271 if (lhs
.undefined_p ())
274 switch (dispatch_kind (r
, lhs
, op1
))
277 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
279 as_a
<irange
> (op1
), rel
);
281 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
283 as_a
<frange
> (op1
), rel
);
285 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
287 as_a
<frange
> (op1
), rel
);
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
296 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
299 relation_kind rel
) const
301 gcc_checking_assert (m_operator
);
303 switch (dispatch_kind (lhs
, op1
, op2
))
306 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
308 as_a
<irange
> (op2
), rel
);
310 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
312 as_a
<frange
> (op2
), rel
);
314 return m_operator
->lhs_op1_relation (as_a
<frange
> (lhs
),
316 as_a
<frange
> (op2
), rel
);
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
325 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
328 relation_kind rel
) const
330 gcc_checking_assert (m_operator
);
331 switch (dispatch_kind (lhs
, op1
, op2
))
334 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
336 as_a
<irange
> (op2
), rel
);
338 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
340 as_a
<frange
> (op2
), rel
);
342 return m_operator
->lhs_op2_relation (as_a
<frange
> (lhs
),
344 as_a
<frange
> (op2
), rel
);
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
353 range_op_handler::op1_op2_relation (const vrange
&lhs
) const
355 gcc_checking_assert (m_operator
);
356 switch (dispatch_kind (lhs
, lhs
, lhs
))
359 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
));
362 return m_operator
->op1_op2_relation (as_a
<frange
> (lhs
));
370 // Update the known bitmasks in R when applying the operation CODE to
374 update_known_bitmask (irange
&r
, tree_code code
,
375 const irange
&lh
, const irange
&rh
)
377 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ()
381 widest_int widest_value
, widest_mask
;
382 tree type
= r
.type ();
383 signop sign
= TYPE_SIGN (type
);
384 int prec
= TYPE_PRECISION (type
);
385 irange_bitmask lh_bits
= lh
.get_bitmask ();
386 irange_bitmask rh_bits
= rh
.get_bitmask ();
388 bit_value_binop (code
, sign
, prec
, &widest_value
, &widest_mask
,
389 TYPE_SIGN (lh
.type ()),
390 TYPE_PRECISION (lh
.type ()),
391 widest_int::from (lh_bits
.value (), sign
),
392 widest_int::from (lh_bits
.mask (), sign
),
393 TYPE_SIGN (rh
.type ()),
394 TYPE_PRECISION (rh
.type ()),
395 widest_int::from (rh_bits
.value (), sign
),
396 widest_int::from (rh_bits
.mask (), sign
));
398 wide_int mask
= wide_int::from (widest_mask
, prec
, sign
);
399 wide_int value
= wide_int::from (widest_value
, prec
, sign
);
400 // Bitmasks must have the unknown value bits cleared.
402 irange_bitmask
bm (value
, mask
);
403 r
.update_bitmask (bm
);
406 // Return the upper limit for a type.
408 static inline wide_int
409 max_limit (const_tree type
)
411 return irange_val_max (type
);
414 // Return the lower limit for a type.
416 static inline wide_int
417 min_limit (const_tree type
)
419 return irange_val_min (type
);
422 // Return false if shifting by OP is undefined behavior. Otherwise, return
423 // true and the range it is to be shifted by. This allows trimming out of
424 // undefined ranges, leaving only valid ranges if there are any.
427 get_shift_range (irange
&r
, tree type
, const irange
&op
)
429 if (op
.undefined_p ())
432 // Build valid range and intersect it with the shift range.
433 r
= value_range (op
.type (),
434 wi::shwi (0, TYPE_PRECISION (op
.type ())),
435 wi::shwi (TYPE_PRECISION (type
) - 1, TYPE_PRECISION (op
.type ())));
438 // If there are no valid ranges in the shift range, returned false.
439 if (r
.undefined_p ())
444 // Default wide_int fold operation returns [MIN, MAX].
447 range_operator::wi_fold (irange
&r
, tree type
,
448 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
449 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
450 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
451 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
453 gcc_checking_assert (r
.supports_type_p (type
));
454 r
.set_varying (type
);
457 // Call wi_fold when both op1 and op2 are equivalent. Further split small
458 // subranges into constants. This can provide better precision.
459 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
460 // [0,0][2, 2][4,4][6, 6][8, 8]
461 // LIMIT is the maximum number of elements in range allowed before we
462 // do not process them individually.
465 range_operator::wi_fold_in_parts_equiv (irange
&r
, tree type
,
466 const wide_int
&lh_lb
,
467 const wide_int
&lh_ub
,
468 unsigned limit
) const
471 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
472 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
473 // if there are 1 to 8 values in the LH range, split them up.
475 if (lh_range
>= 0 && lh_range
< limit
)
477 for (unsigned x
= 0; x
<= lh_range
; x
++)
479 wide_int val
= lh_lb
+ x
;
480 wi_fold (tmp
, type
, val
, val
, val
, val
);
484 // Otherwise just call wi_fold.
486 wi_fold (r
, type
, lh_lb
, lh_ub
, lh_lb
, lh_ub
);
489 // Call wi_fold, except further split small subranges into constants.
490 // This can provide better precision. For something 8 >> [0,1]
491 // Instead of [8, 16], we will produce [8,8][16,16]
494 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
495 const wide_int
&lh_lb
,
496 const wide_int
&lh_ub
,
497 const wide_int
&rh_lb
,
498 const wide_int
&rh_ub
) const
501 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
502 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
503 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
504 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
505 // If there are 2, 3, or 4 values in the RH range, do them separately.
506 // Call wi_fold_in_parts to check the RH side.
507 if (rh_range
> 0 && rh_range
< 4)
509 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
512 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
516 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
520 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
523 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
524 // The RH side has been checked, so no recursion needed.
525 else if (lh_range
> 0 && lh_range
< 4)
527 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
530 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
534 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
538 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
541 // Otherwise just call wi_fold.
543 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
546 // The default for fold is to break all ranges into sub-ranges and
547 // invoke the wi_fold method on each sub-range pair.
550 range_operator::fold_range (irange
&r
, tree type
,
553 relation_trio trio
) const
555 gcc_checking_assert (r
.supports_type_p (type
));
556 if (empty_range_varying (r
, type
, lh
, rh
))
559 relation_kind rel
= trio
.op1_op2 ();
560 unsigned num_lh
= lh
.num_pairs ();
561 unsigned num_rh
= rh
.num_pairs ();
563 // If op1 and op2 are equivalences, then we don't need a complete cross
564 // product, just pairs of matching elements.
565 if (relation_equiv_p (rel
) && lh
== rh
)
569 for (unsigned x
= 0; x
< num_lh
; ++x
)
571 // If the number of subranges is too high, limit subrange creation.
572 unsigned limit
= (r
.num_pairs () > 32) ? 0 : 8;
573 wide_int lh_lb
= lh
.lower_bound (x
);
574 wide_int lh_ub
= lh
.upper_bound (x
);
575 wi_fold_in_parts_equiv (tmp
, type
, lh_lb
, lh_ub
, limit
);
580 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
581 update_bitmask (r
, lh
, rh
);
585 // If both ranges are single pairs, fold directly into the result range.
586 // If the number of subranges grows too high, produce a summary result as the
587 // loop becomes exponential with little benefit. See PR 103821.
588 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
590 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
591 rh
.lower_bound (), rh
.upper_bound ());
592 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
593 update_bitmask (r
, lh
, rh
);
599 for (unsigned x
= 0; x
< num_lh
; ++x
)
600 for (unsigned y
= 0; y
< num_rh
; ++y
)
602 wide_int lh_lb
= lh
.lower_bound (x
);
603 wide_int lh_ub
= lh
.upper_bound (x
);
604 wide_int rh_lb
= rh
.lower_bound (y
);
605 wide_int rh_ub
= rh
.upper_bound (y
);
606 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
610 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
611 update_bitmask (r
, lh
, rh
);
615 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
616 update_bitmask (r
, lh
, rh
);
620 // The default for op1_range is to return false.
623 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
624 tree type ATTRIBUTE_UNUSED
,
625 const irange
&lhs ATTRIBUTE_UNUSED
,
626 const irange
&op2 ATTRIBUTE_UNUSED
,
632 // The default for op2_range is to return false.
635 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
636 tree type ATTRIBUTE_UNUSED
,
637 const irange
&lhs ATTRIBUTE_UNUSED
,
638 const irange
&op1 ATTRIBUTE_UNUSED
,
644 // The default relation routines return VREL_VARYING.
647 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
648 const irange
&op1 ATTRIBUTE_UNUSED
,
649 const irange
&op2 ATTRIBUTE_UNUSED
,
650 relation_kind rel ATTRIBUTE_UNUSED
) const
656 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
657 const irange
&op1 ATTRIBUTE_UNUSED
,
658 const irange
&op2 ATTRIBUTE_UNUSED
,
659 relation_kind rel ATTRIBUTE_UNUSED
) const
665 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
) const
670 // Default is no relation affects the LHS.
673 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
674 tree type ATTRIBUTE_UNUSED
,
675 const irange
&op1_range ATTRIBUTE_UNUSED
,
676 const irange
&op2_range ATTRIBUTE_UNUSED
,
677 relation_kind rel ATTRIBUTE_UNUSED
) const
682 // Apply any known bitmask updates based on this operator.
685 range_operator::update_bitmask (irange
&, const irange
&,
686 const irange
&) const
690 // Create and return a range from a pair of wide-ints that are known
691 // to have overflowed (or underflowed).
694 value_range_from_overflowed_bounds (irange
&r
, tree type
,
695 const wide_int
&wmin
,
696 const wide_int
&wmax
)
698 const signop sgn
= TYPE_SIGN (type
);
699 const unsigned int prec
= TYPE_PRECISION (type
);
701 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
702 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
707 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
710 if (wi::cmp (tmax
, tem
, sgn
) > 0)
713 // If the anti-range would cover nothing, drop to varying.
714 // Likewise if the anti-range bounds are outside of the types
716 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
717 r
.set_varying (type
);
719 r
.set (type
, tmin
, tmax
, VR_ANTI_RANGE
);
722 // Create and return a range from a pair of wide-ints. MIN_OVF and
723 // MAX_OVF describe any overflow that might have occurred while
724 // calculating WMIN and WMAX respectively.
727 value_range_with_overflow (irange
&r
, tree type
,
728 const wide_int
&wmin
, const wide_int
&wmax
,
729 wi::overflow_type min_ovf
= wi::OVF_NONE
,
730 wi::overflow_type max_ovf
= wi::OVF_NONE
)
732 const signop sgn
= TYPE_SIGN (type
);
733 const unsigned int prec
= TYPE_PRECISION (type
);
734 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
736 // For one bit precision if max != min, then the range covers all
738 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
740 r
.set_varying (type
);
746 // If overflow wraps, truncate the values and adjust the range,
747 // kind, and bounds appropriately.
748 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
750 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
751 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
752 // If the limits are swapped, we wrapped around and cover
754 if (wi::gt_p (tmin
, tmax
, sgn
))
755 r
.set_varying (type
);
757 // No overflow or both overflow or underflow. The range
758 // kind stays normal.
759 r
.set (type
, tmin
, tmax
);
763 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
764 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
765 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
767 // Other underflow and/or overflow, drop to VR_VARYING.
768 r
.set_varying (type
);
772 // If both bounds either underflowed or overflowed, then the result
774 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
775 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
781 // If overflow does not wrap, saturate to [MIN, MAX].
782 wide_int new_lb
, new_ub
;
783 if (min_ovf
== wi::OVF_UNDERFLOW
)
784 new_lb
= wi::min_value (prec
, sgn
);
785 else if (min_ovf
== wi::OVF_OVERFLOW
)
786 new_lb
= wi::max_value (prec
, sgn
);
790 if (max_ovf
== wi::OVF_UNDERFLOW
)
791 new_ub
= wi::min_value (prec
, sgn
);
792 else if (max_ovf
== wi::OVF_OVERFLOW
)
793 new_ub
= wi::max_value (prec
, sgn
);
797 r
.set (type
, new_lb
, new_ub
);
801 // Create and return a range from a pair of wide-ints. Canonicalize
802 // the case where the bounds are swapped. In which case, we transform
803 // [10,5] into [MIN,5][10,MAX].
806 create_possibly_reversed_range (irange
&r
, tree type
,
807 const wide_int
&new_lb
, const wide_int
&new_ub
)
809 signop s
= TYPE_SIGN (type
);
810 // If the bounds are swapped, treat the result as if an overflow occurred.
811 if (wi::gt_p (new_lb
, new_ub
, s
))
812 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
814 // Otherwise it's just a normal range.
815 r
.set (type
, new_lb
, new_ub
);
818 // Return the summary information about boolean range LHS. If EMPTY/FULL,
819 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
822 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
824 // If there is no result, then this is unexecutable.
825 if (lhs
.undefined_p ())
834 // For TRUE, we can't just test for [1,1] because Ada can have
835 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
836 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
838 r
.set_varying (val_type
);
845 // ------------------------------------------------------------------------
848 operator_equal::update_bitmask (irange
&r
, const irange
&lh
,
849 const irange
&rh
) const
851 update_known_bitmask (r
, EQ_EXPR
, lh
, rh
);
854 // Check if the LHS range indicates a relation between OP1 and OP2.
857 operator_equal::op1_op2_relation (const irange
&lhs
) const
859 if (lhs
.undefined_p ())
860 return VREL_UNDEFINED
;
862 // FALSE = op1 == op2 indicates NE_EXPR.
866 // TRUE = op1 == op2 indicates EQ_EXPR.
867 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
873 operator_equal::fold_range (irange
&r
, tree type
,
876 relation_trio rel
) const
878 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
881 // We can be sure the values are always equal or not if both ranges
882 // consist of a single value, and then compare them.
883 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
884 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
886 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
887 r
= range_true (type
);
889 r
= range_false (type
);
893 // If ranges do not intersect, we know the range is not equal,
894 // otherwise we don't know anything for sure.
895 int_range_max tmp
= op1
;
897 if (tmp
.undefined_p ())
898 r
= range_false (type
);
900 r
= range_true_and_false (type
);
906 operator_equal::op1_range (irange
&r
, tree type
,
911 switch (get_bool_state (r
, lhs
, type
))
914 // If it's true, the result is the same as OP2.
919 // If the result is false, the only time we know anything is
920 // if OP2 is a constant.
921 if (!op2
.undefined_p ()
922 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
928 r
.set_varying (type
);
938 operator_equal::op2_range (irange
&r
, tree type
,
941 relation_trio rel
) const
943 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
946 // -------------------------------------------------------------------------
949 operator_not_equal::update_bitmask (irange
&r
, const irange
&lh
,
950 const irange
&rh
) const
952 update_known_bitmask (r
, NE_EXPR
, lh
, rh
);
955 // Check if the LHS range indicates a relation between OP1 and OP2.
958 operator_not_equal::op1_op2_relation (const irange
&lhs
) const
960 if (lhs
.undefined_p ())
961 return VREL_UNDEFINED
;
963 // FALSE = op1 != op2 indicates EQ_EXPR.
967 // TRUE = op1 != op2 indicates NE_EXPR.
968 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
974 operator_not_equal::fold_range (irange
&r
, tree type
,
977 relation_trio rel
) const
979 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
982 // We can be sure the values are always equal or not if both ranges
983 // consist of a single value, and then compare them.
984 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
985 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
987 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
988 r
= range_true (type
);
990 r
= range_false (type
);
994 // If ranges do not intersect, we know the range is not equal,
995 // otherwise we don't know anything for sure.
996 int_range_max tmp
= op1
;
998 if (tmp
.undefined_p ())
999 r
= range_true (type
);
1001 r
= range_true_and_false (type
);
1007 operator_not_equal::op1_range (irange
&r
, tree type
,
1010 relation_trio
) const
1012 switch (get_bool_state (r
, lhs
, type
))
1015 // If the result is true, the only time we know anything is if
1016 // OP2 is a constant.
1017 if (!op2
.undefined_p ()
1018 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1024 r
.set_varying (type
);
1028 // If it's false, the result is the same as OP2.
1040 operator_not_equal::op2_range (irange
&r
, tree type
,
1043 relation_trio rel
) const
1045 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1048 // (X < VAL) produces the range of [MIN, VAL - 1].
1051 build_lt (irange
&r
, tree type
, const wide_int
&val
)
1053 wi::overflow_type ov
;
1055 signop sgn
= TYPE_SIGN (type
);
1057 // Signed 1 bit cannot represent 1 for subtraction.
1059 lim
= wi::add (val
, -1, sgn
, &ov
);
1061 lim
= wi::sub (val
, 1, sgn
, &ov
);
1063 // If val - 1 underflows, check if X < MIN, which is an empty range.
1067 r
= int_range
<1> (type
, min_limit (type
), lim
);
1070 // (X <= VAL) produces the range of [MIN, VAL].
1073 build_le (irange
&r
, tree type
, const wide_int
&val
)
1075 r
= int_range
<1> (type
, min_limit (type
), val
);
1078 // (X > VAL) produces the range of [VAL + 1, MAX].
1081 build_gt (irange
&r
, tree type
, const wide_int
&val
)
1083 wi::overflow_type ov
;
1085 signop sgn
= TYPE_SIGN (type
);
1087 // Signed 1 bit cannot represent 1 for addition.
1089 lim
= wi::sub (val
, -1, sgn
, &ov
);
1091 lim
= wi::add (val
, 1, sgn
, &ov
);
1092 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1096 r
= int_range
<1> (type
, lim
, max_limit (type
));
1099 // (X >= val) produces the range of [VAL, MAX].
1102 build_ge (irange
&r
, tree type
, const wide_int
&val
)
1104 r
= int_range
<1> (type
, val
, max_limit (type
));
1109 operator_lt::update_bitmask (irange
&r
, const irange
&lh
,
1110 const irange
&rh
) const
1112 update_known_bitmask (r
, LT_EXPR
, lh
, rh
);
1115 // Check if the LHS range indicates a relation between OP1 and OP2.
1118 operator_lt::op1_op2_relation (const irange
&lhs
) const
1120 if (lhs
.undefined_p ())
1121 return VREL_UNDEFINED
;
1123 // FALSE = op1 < op2 indicates GE_EXPR.
1127 // TRUE = op1 < op2 indicates LT_EXPR.
1128 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
1130 return VREL_VARYING
;
1134 operator_lt::fold_range (irange
&r
, tree type
,
1137 relation_trio rel
) const
1139 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
1142 signop sign
= TYPE_SIGN (op1
.type ());
1143 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1145 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1146 r
= range_true (type
);
1147 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1148 r
= range_false (type
);
1149 // Use nonzero bits to determine if < 0 is false.
1150 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
1151 r
= range_false (type
);
1153 r
= range_true_and_false (type
);
1158 operator_lt::op1_range (irange
&r
, tree type
,
1161 relation_trio
) const
1163 if (op2
.undefined_p ())
1166 switch (get_bool_state (r
, lhs
, type
))
1169 build_lt (r
, type
, op2
.upper_bound ());
1173 build_ge (r
, type
, op2
.lower_bound ());
1183 operator_lt::op2_range (irange
&r
, tree type
,
1186 relation_trio
) const
1188 if (op1
.undefined_p ())
1191 switch (get_bool_state (r
, lhs
, type
))
1194 build_gt (r
, type
, op1
.lower_bound ());
1198 build_le (r
, type
, op1
.upper_bound ());
1209 operator_le::update_bitmask (irange
&r
, const irange
&lh
,
1210 const irange
&rh
) const
1212 update_known_bitmask (r
, LE_EXPR
, lh
, rh
);
1215 // Check if the LHS range indicates a relation between OP1 and OP2.
1218 operator_le::op1_op2_relation (const irange
&lhs
) const
1220 if (lhs
.undefined_p ())
1221 return VREL_UNDEFINED
;
1223 // FALSE = op1 <= op2 indicates GT_EXPR.
1227 // TRUE = op1 <= op2 indicates LE_EXPR.
1228 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
1230 return VREL_VARYING
;
1234 operator_le::fold_range (irange
&r
, tree type
,
1237 relation_trio rel
) const
1239 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
1242 signop sign
= TYPE_SIGN (op1
.type ());
1243 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1245 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1246 r
= range_true (type
);
1247 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1248 r
= range_false (type
);
1250 r
= range_true_and_false (type
);
1255 operator_le::op1_range (irange
&r
, tree type
,
1258 relation_trio
) const
1260 if (op2
.undefined_p ())
1263 switch (get_bool_state (r
, lhs
, type
))
1266 build_le (r
, type
, op2
.upper_bound ());
1270 build_gt (r
, type
, op2
.lower_bound ());
1280 operator_le::op2_range (irange
&r
, tree type
,
1283 relation_trio
) const
1285 if (op1
.undefined_p ())
1288 switch (get_bool_state (r
, lhs
, type
))
1291 build_ge (r
, type
, op1
.lower_bound ());
1295 build_lt (r
, type
, op1
.upper_bound ());
1306 operator_gt::update_bitmask (irange
&r
, const irange
&lh
,
1307 const irange
&rh
) const
1309 update_known_bitmask (r
, GT_EXPR
, lh
, rh
);
1312 // Check if the LHS range indicates a relation between OP1 and OP2.
1315 operator_gt::op1_op2_relation (const irange
&lhs
) const
1317 if (lhs
.undefined_p ())
1318 return VREL_UNDEFINED
;
1320 // FALSE = op1 > op2 indicates LE_EXPR.
1324 // TRUE = op1 > op2 indicates GT_EXPR.
1325 if (!contains_zero_p (lhs
))
1327 return VREL_VARYING
;
1331 operator_gt::fold_range (irange
&r
, tree type
,
1332 const irange
&op1
, const irange
&op2
,
1333 relation_trio rel
) const
1335 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1338 signop sign
= TYPE_SIGN (op1
.type ());
1339 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1341 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1342 r
= range_true (type
);
1343 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1344 r
= range_false (type
);
1346 r
= range_true_and_false (type
);
1351 operator_gt::op1_range (irange
&r
, tree type
,
1352 const irange
&lhs
, const irange
&op2
,
1353 relation_trio
) const
1355 if (op2
.undefined_p ())
1358 switch (get_bool_state (r
, lhs
, type
))
1361 build_gt (r
, type
, op2
.lower_bound ());
1365 build_le (r
, type
, op2
.upper_bound ());
1375 operator_gt::op2_range (irange
&r
, tree type
,
1378 relation_trio
) const
1380 if (op1
.undefined_p ())
1383 switch (get_bool_state (r
, lhs
, type
))
1386 build_lt (r
, type
, op1
.upper_bound ());
1390 build_ge (r
, type
, op1
.lower_bound ());
1401 operator_ge::update_bitmask (irange
&r
, const irange
&lh
,
1402 const irange
&rh
) const
1404 update_known_bitmask (r
, GE_EXPR
, lh
, rh
);
1407 // Check if the LHS range indicates a relation between OP1 and OP2.
1410 operator_ge::op1_op2_relation (const irange
&lhs
) const
1412 if (lhs
.undefined_p ())
1413 return VREL_UNDEFINED
;
1415 // FALSE = op1 >= op2 indicates LT_EXPR.
1419 // TRUE = op1 >= op2 indicates GE_EXPR.
1420 if (!contains_zero_p (lhs
))
1422 return VREL_VARYING
;
1426 operator_ge::fold_range (irange
&r
, tree type
,
1429 relation_trio rel
) const
1431 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1434 signop sign
= TYPE_SIGN (op1
.type ());
1435 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1437 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1438 r
= range_true (type
);
1439 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1440 r
= range_false (type
);
1442 r
= range_true_and_false (type
);
1447 operator_ge::op1_range (irange
&r
, tree type
,
1450 relation_trio
) const
1452 if (op2
.undefined_p ())
1455 switch (get_bool_state (r
, lhs
, type
))
1458 build_ge (r
, type
, op2
.lower_bound ());
1462 build_lt (r
, type
, op2
.upper_bound ());
1472 operator_ge::op2_range (irange
&r
, tree type
,
1475 relation_trio
) const
1477 if (op1
.undefined_p ())
1480 switch (get_bool_state (r
, lhs
, type
))
1483 build_le (r
, type
, op1
.upper_bound ());
1487 build_gt (r
, type
, op1
.lower_bound ());
1498 operator_plus::update_bitmask (irange
&r
, const irange
&lh
,
1499 const irange
&rh
) const
1501 update_known_bitmask (r
, PLUS_EXPR
, lh
, rh
);
1504 // Check to see if the range of OP2 indicates anything about the relation
1505 // between LHS and OP1.
1508 operator_plus::lhs_op1_relation (const irange
&lhs
,
1511 relation_kind
) const
1513 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1514 return VREL_VARYING
;
1516 tree type
= lhs
.type ();
1517 unsigned prec
= TYPE_PRECISION (type
);
1518 wi::overflow_type ovf1
, ovf2
;
1519 signop sign
= TYPE_SIGN (type
);
1521 // LHS = OP1 + 0 indicates LHS == OP1.
1525 if (TYPE_OVERFLOW_WRAPS (type
))
1527 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1528 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1531 ovf1
= ovf2
= wi::OVF_NONE
;
1533 // Never wrapping additions.
1536 // Positive op2 means lhs > op1.
1537 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1539 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1542 // Negative op2 means lhs < op1.
1543 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1545 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1548 // Always wrapping additions.
1549 else if (ovf1
&& ovf1
== ovf2
)
1551 // Positive op2 means lhs < op1.
1552 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1554 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1557 // Negative op2 means lhs > op1.
1558 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1560 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1564 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1565 if (!range_includes_zero_p (&op2
))
1568 return VREL_VARYING
;
1571 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1575 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1576 const irange
&op2
, relation_kind rel
) const
1578 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1582 operator_plus::wi_fold (irange
&r
, tree type
,
1583 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1584 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1586 wi::overflow_type ov_lb
, ov_ub
;
1587 signop s
= TYPE_SIGN (type
);
1588 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1589 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1590 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1593 // Given addition or subtraction, determine the possible NORMAL ranges and
1594 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1595 // Return the relation that exists between the LHS and OP1 in order for the
1596 // NORMAL range to apply.
1597 // a return value of VREL_VARYING means no ranges were applicable.
1599 static relation_kind
1600 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1603 relation_kind kind
= VREL_VARYING
;
1604 // For now, only deal with constant adds. This could be extended to ranges
1605 // when someone is so motivated.
1606 if (!offset
.singleton_p () || offset
.zero_p ())
1609 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1610 wide_int off
= offset
.lower_bound ();
1611 if (wi::neg_p (off
, SIGNED
))
1614 off
= wi::neg (off
);
1617 wi::overflow_type ov
;
1618 tree type
= offset
.type ();
1619 unsigned prec
= TYPE_PRECISION (type
);
1622 // calculate the normal range and relation for the operation.
1626 lb
= wi::zero (prec
);
1627 ub
= wi::sub (irange_val_max (type
), off
, UNSIGNED
, &ov
);
1634 ub
= irange_val_max (type
);
1637 int_range
<2> normal_range (type
, lb
, ub
);
1638 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1641 r_normal
= normal_range
;
1645 // Once op1 has been calculated by operator_plus or operator_minus, check
1646 // to see if the relation passed causes any part of the calculation to
1647 // be not possible. ie
1648 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1649 // and that further refines a_2 to [0, 0].
1650 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1651 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1652 // MINUS. IF any adjustment can be made, R will reflect it.
1655 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1658 if (r
.undefined_p ())
1660 tree type
= r
.type ();
1661 // Check for unsigned overflow and calculate the overflow part.
1662 signop s
= TYPE_SIGN (type
);
1663 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1666 // Only work with <, <=, >, >= relations.
1667 if (!relation_lt_le_gt_ge_p (rel
))
1670 // Get the ranges for this offset.
1671 int_range_max normal
, overflow
;
1672 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1674 // VREL_VARYING means there are no adjustments.
1675 if (k
== VREL_VARYING
)
1678 // If the relations match use the normal range, otherwise use overflow range.
1679 if (relation_intersect (k
, rel
) == k
)
1680 r
.intersect (normal
);
1682 r
.intersect (overflow
);
1687 operator_plus::op1_range (irange
&r
, tree type
,
1690 relation_trio trio
) const
1692 if (lhs
.undefined_p ())
1694 // Start with the default operation.
1695 range_op_handler
minus (MINUS_EXPR
);
1698 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1699 relation_kind rel
= trio
.lhs_op1 ();
1700 // Check for a relation refinement.
1702 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1707 operator_plus::op2_range (irange
&r
, tree type
,
1710 relation_trio rel
) const
1712 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1715 class operator_widen_plus_signed
: public range_operator
1718 virtual void wi_fold (irange
&r
, tree type
,
1719 const wide_int
&lh_lb
,
1720 const wide_int
&lh_ub
,
1721 const wide_int
&rh_lb
,
1722 const wide_int
&rh_ub
) const;
1723 } op_widen_plus_signed
;
1726 operator_widen_plus_signed::wi_fold (irange
&r
, tree type
,
1727 const wide_int
&lh_lb
,
1728 const wide_int
&lh_ub
,
1729 const wide_int
&rh_lb
,
1730 const wide_int
&rh_ub
) const
1732 wi::overflow_type ov_lb
, ov_ub
;
1733 signop s
= TYPE_SIGN (type
);
1736 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
1738 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
1739 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1740 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1742 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1743 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1745 r
= int_range
<2> (type
, new_lb
, new_ub
);
1748 class operator_widen_plus_unsigned
: public range_operator
1751 virtual void wi_fold (irange
&r
, tree type
,
1752 const wide_int
&lh_lb
,
1753 const wide_int
&lh_ub
,
1754 const wide_int
&rh_lb
,
1755 const wide_int
&rh_ub
) const;
1756 } op_widen_plus_unsigned
;
1759 operator_widen_plus_unsigned::wi_fold (irange
&r
, tree type
,
1760 const wide_int
&lh_lb
,
1761 const wide_int
&lh_ub
,
1762 const wide_int
&rh_lb
,
1763 const wide_int
&rh_ub
) const
1765 wi::overflow_type ov_lb
, ov_ub
;
1766 signop s
= TYPE_SIGN (type
);
1769 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
1771 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
1772 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1773 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1775 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1776 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1778 r
= int_range
<2> (type
, new_lb
, new_ub
);
1782 operator_minus::update_bitmask (irange
&r
, const irange
&lh
,
1783 const irange
&rh
) const
1785 update_known_bitmask (r
, MINUS_EXPR
, lh
, rh
);
1789 operator_minus::wi_fold (irange
&r
, tree type
,
1790 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1791 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1793 wi::overflow_type ov_lb
, ov_ub
;
1794 signop s
= TYPE_SIGN (type
);
1795 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
1796 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
1797 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1801 // Return the relation between LHS and OP1 based on the relation between
1805 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
1806 const irange
&, relation_kind rel
) const
1808 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
1817 return VREL_VARYING
;
1820 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1821 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1822 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1825 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1826 const irange
&op1_range ATTRIBUTE_UNUSED
,
1827 const irange
&op2_range ATTRIBUTE_UNUSED
,
1830 if (rel
== VREL_VARYING
)
1833 int_range
<2> rel_range
;
1834 unsigned prec
= TYPE_PRECISION (type
);
1835 signop sgn
= TYPE_SIGN (type
);
1837 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1839 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
1840 else if (rel
== VREL_NE
)
1841 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1843 else if (TYPE_OVERFLOW_WRAPS (type
))
1847 // For wrapping signed values and unsigned, if op1 > op2 or
1848 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1851 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1862 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1864 rel_range
= int_range
<2> (type
, wi::one (prec
),
1865 wi::max_value (prec
, sgn
));
1867 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1869 rel_range
= int_range
<2> (type
, wi::zero (prec
),
1870 wi::max_value (prec
, sgn
));
1872 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1874 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1875 wi::minus_one (prec
));
1877 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1879 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1886 lhs_range
.intersect (rel_range
);
1891 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1892 const irange
&op1_range
,
1893 const irange
&op2_range
,
1894 relation_kind rel
) const
1896 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1901 operator_minus::op1_range (irange
&r
, tree type
,
1904 relation_trio trio
) const
1906 if (lhs
.undefined_p ())
1908 // Start with the default operation.
1909 range_op_handler
minus (PLUS_EXPR
);
1912 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1913 relation_kind rel
= trio
.lhs_op1 ();
1915 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
1921 operator_minus::op2_range (irange
&r
, tree type
,
1924 relation_trio
) const
1926 if (lhs
.undefined_p ())
1928 return fold_range (r
, type
, op1
, lhs
);
1932 operator_min::update_bitmask (irange
&r
, const irange
&lh
,
1933 const irange
&rh
) const
1935 update_known_bitmask (r
, MIN_EXPR
, lh
, rh
);
1939 operator_min::wi_fold (irange
&r
, tree type
,
1940 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1941 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1943 signop s
= TYPE_SIGN (type
);
1944 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1945 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1946 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1951 operator_max::update_bitmask (irange
&r
, const irange
&lh
,
1952 const irange
&rh
) const
1954 update_known_bitmask (r
, MAX_EXPR
, lh
, rh
);
1958 operator_max::wi_fold (irange
&r
, tree type
,
1959 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1960 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1962 signop s
= TYPE_SIGN (type
);
1963 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1964 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1965 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1969 // Calculate the cross product of two sets of ranges and return it.
1971 // Multiplications, divisions and shifts are a bit tricky to handle,
1972 // depending on the mix of signs we have in the two ranges, we need to
1973 // operate on different values to get the minimum and maximum values
1974 // for the new range. One approach is to figure out all the
1975 // variations of range combinations and do the operations.
1977 // However, this involves several calls to compare_values and it is
1978 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1979 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1980 // figure the smallest and largest values to form the new range.
1983 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1984 const wide_int
&lh_lb
,
1985 const wide_int
&lh_ub
,
1986 const wide_int
&rh_lb
,
1987 const wide_int
&rh_ub
) const
1989 wide_int cp1
, cp2
, cp3
, cp4
;
1990 // Default to varying.
1991 r
.set_varying (type
);
1993 // Compute the 4 cross operations, bailing if we get an overflow we
1995 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1997 if (wi::eq_p (lh_lb
, lh_ub
))
1999 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
2001 if (wi::eq_p (rh_lb
, rh_ub
))
2003 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
2005 if (wi::eq_p (lh_lb
, lh_ub
))
2007 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
2011 signop sign
= TYPE_SIGN (type
);
2012 if (wi::gt_p (cp1
, cp2
, sign
))
2013 std::swap (cp1
, cp2
);
2014 if (wi::gt_p (cp3
, cp4
, sign
))
2015 std::swap (cp3
, cp4
);
2017 // Choose min and max from the ordered pairs.
2018 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
2019 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
2020 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2025 operator_mult::update_bitmask (irange
&r
, const irange
&lh
,
2026 const irange
&rh
) const
2028 update_known_bitmask (r
, MULT_EXPR
, lh
, rh
);
2032 operator_mult::op1_range (irange
&r
, tree type
,
2033 const irange
&lhs
, const irange
&op2
,
2034 relation_trio
) const
2036 if (lhs
.undefined_p ())
2039 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2040 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2041 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2042 if (TYPE_OVERFLOW_WRAPS (type
))
2046 if (op2
.singleton_p (offset
) && offset
!= 0)
2047 return range_op_handler (TRUNC_DIV_EXPR
).fold_range (r
, type
, lhs
, op2
);
2052 operator_mult::op2_range (irange
&r
, tree type
,
2053 const irange
&lhs
, const irange
&op1
,
2054 relation_trio rel
) const
2056 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
2060 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
2061 const wide_int
&w0
, const wide_int
&w1
) const
2063 wi::overflow_type overflow
= wi::OVF_NONE
;
2064 signop sign
= TYPE_SIGN (type
);
2065 res
= wi::mul (w0
, w1
, sign
, &overflow
);
2066 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2068 // For multiplication, the sign of the overflow is given
2069 // by the comparison of the signs of the operands.
2070 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
2071 res
= wi::max_value (w0
.get_precision (), sign
);
2073 res
= wi::min_value (w0
.get_precision (), sign
);
2080 operator_mult::wi_fold (irange
&r
, tree type
,
2081 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2082 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2084 if (TYPE_OVERFLOW_UNDEFINED (type
))
2086 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2090 // Multiply the ranges when overflow wraps. This is basically fancy
2091 // code so we don't drop to varying with an unsigned
2094 // This test requires 2*prec bits if both operands are signed and
2095 // 2*prec + 2 bits if either is not. Therefore, extend the values
2096 // using the sign of the result to PREC2. From here on out,
2097 // everything is just signed math no matter what the input types
2100 signop sign
= TYPE_SIGN (type
);
2101 unsigned prec
= TYPE_PRECISION (type
);
2102 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
2103 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
2104 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
2105 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
2106 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
2107 widest2_int size
= sizem1
+ 1;
2109 // Canonicalize the intervals.
2110 if (sign
== UNSIGNED
)
2112 if (wi::ltu_p (size
, min0
+ max0
))
2117 if (wi::ltu_p (size
, min1
+ max1
))
2124 // Sort the 4 products so that min is in prod0 and max is in
2126 widest2_int prod0
= min0
* min1
;
2127 widest2_int prod1
= min0
* max1
;
2128 widest2_int prod2
= max0
* min1
;
2129 widest2_int prod3
= max0
* max1
;
2131 // min0min1 > max0max1
2133 std::swap (prod0
, prod3
);
2135 // min0max1 > max0min1
2137 std::swap (prod1
, prod2
);
2140 std::swap (prod0
, prod1
);
2143 std::swap (prod2
, prod3
);
2146 prod2
= prod3
- prod0
;
2147 if (wi::geu_p (prod2
, sizem1
))
2149 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2150 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
2151 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
2153 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
2155 zero
.set_zero (type
);
2159 // The range covers all values.
2160 r
.set_varying (type
);
2164 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
2165 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
2166 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
2170 class operator_widen_mult_signed
: public range_operator
2173 virtual void wi_fold (irange
&r
, tree type
,
2174 const wide_int
&lh_lb
,
2175 const wide_int
&lh_ub
,
2176 const wide_int
&rh_lb
,
2177 const wide_int
&rh_ub
)
2179 } op_widen_mult_signed
;
2182 operator_widen_mult_signed::wi_fold (irange
&r
, tree type
,
2183 const wide_int
&lh_lb
,
2184 const wide_int
&lh_ub
,
2185 const wide_int
&rh_lb
,
2186 const wide_int
&rh_ub
) const
2188 signop s
= TYPE_SIGN (type
);
2190 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
2191 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
2192 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2193 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2195 /* We don't expect a widening multiplication to be able to overflow but range
2196 calculations for multiplications are complicated. After widening the
2197 operands lets call the base class. */
2198 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2202 class operator_widen_mult_unsigned
: public range_operator
2205 virtual void wi_fold (irange
&r
, tree type
,
2206 const wide_int
&lh_lb
,
2207 const wide_int
&lh_ub
,
2208 const wide_int
&rh_lb
,
2209 const wide_int
&rh_ub
)
2211 } op_widen_mult_unsigned
;
2214 operator_widen_mult_unsigned::wi_fold (irange
&r
, tree type
,
2215 const wide_int
&lh_lb
,
2216 const wide_int
&lh_ub
,
2217 const wide_int
&rh_lb
,
2218 const wide_int
&rh_ub
) const
2220 signop s
= TYPE_SIGN (type
);
2222 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
2223 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
2224 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2225 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2227 /* We don't expect a widening multiplication to be able to overflow but range
2228 calculations for multiplications are complicated. After widening the
2229 operands lets call the base class. */
2230 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2233 class operator_div
: public cross_product_operator
2236 operator_div (tree_code div_kind
) { m_code
= div_kind
; }
2237 virtual void wi_fold (irange
&r
, tree type
,
2238 const wide_int
&lh_lb
,
2239 const wide_int
&lh_ub
,
2240 const wide_int
&rh_lb
,
2241 const wide_int
&rh_ub
) const final override
;
2242 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
2243 const wide_int
&, const wide_int
&)
2244 const final override
;
2245 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
2246 { update_known_bitmask (r
, m_code
, lh
, rh
); }
2251 static operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
2252 static operator_div
op_floor_div (FLOOR_DIV_EXPR
);
2253 static operator_div
op_round_div (ROUND_DIV_EXPR
);
2254 static operator_div
op_ceil_div (CEIL_DIV_EXPR
);
2257 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2258 const wide_int
&w0
, const wide_int
&w1
) const
2263 wi::overflow_type overflow
= wi::OVF_NONE
;
2264 signop sign
= TYPE_SIGN (type
);
2268 case EXACT_DIV_EXPR
:
2269 case TRUNC_DIV_EXPR
:
2270 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2272 case FLOOR_DIV_EXPR
:
2273 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2275 case ROUND_DIV_EXPR
:
2276 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2279 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2285 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2287 // For division, the only case is -INF / -1 = +INF.
2288 res
= wi::max_value (w0
.get_precision (), sign
);
2295 operator_div::wi_fold (irange
&r
, tree type
,
2296 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2297 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2299 const wide_int dividend_min
= lh_lb
;
2300 const wide_int dividend_max
= lh_ub
;
2301 const wide_int divisor_min
= rh_lb
;
2302 const wide_int divisor_max
= rh_ub
;
2303 signop sign
= TYPE_SIGN (type
);
2304 unsigned prec
= TYPE_PRECISION (type
);
2305 wide_int extra_min
, extra_max
;
2307 // If we know we won't divide by zero, just do the division.
2308 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2310 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2311 divisor_min
, divisor_max
);
2315 // If we're definitely dividing by zero, there's nothing to do.
2316 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2322 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2323 // skip any division by zero.
2325 // First divide by the negative numbers, if any.
2326 if (wi::neg_p (divisor_min
, sign
))
2327 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2328 divisor_min
, wi::minus_one (prec
));
2332 // Then divide by the non-zero positive numbers, if any.
2333 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2336 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2337 wi::one (prec
), divisor_max
);
2340 // We shouldn't still have undefined here.
2341 gcc_checking_assert (!r
.undefined_p ());
2345 class operator_exact_divide
: public operator_div
2347 using range_operator::op1_range
;
2349 operator_exact_divide () : operator_div (EXACT_DIV_EXPR
) { }
2350 virtual bool op1_range (irange
&r
, tree type
,
2353 relation_trio
) const;
2358 operator_exact_divide::op1_range (irange
&r
, tree type
,
2361 relation_trio
) const
2363 if (lhs
.undefined_p ())
2366 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2367 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2368 // We wont bother trying to enumerate all the in between stuff :-P
2369 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2370 // the time however.
2371 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2372 if (op2
.singleton_p (offset
) && offset
!= 0)
2373 return range_op_handler (MULT_EXPR
).fold_range (r
, type
, lhs
, op2
);
2378 class operator_lshift
: public cross_product_operator
2380 using range_operator::fold_range
;
2381 using range_operator::op1_range
;
2383 virtual bool op1_range (irange
&r
, tree type
,
2386 relation_trio rel
= TRIO_VARYING
) const;
2387 virtual bool fold_range (irange
&r
, tree type
,
2390 relation_trio rel
= TRIO_VARYING
) const;
2392 virtual void wi_fold (irange
&r
, tree type
,
2393 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2394 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2395 virtual bool wi_op_overflows (wide_int
&res
,
2398 const wide_int
&) const;
2399 void update_bitmask (irange
&r
, const irange
&lh
,
2400 const irange
&rh
) const final override
2401 { update_known_bitmask (r
, LSHIFT_EXPR
, lh
, rh
); }
2404 class operator_rshift
: public cross_product_operator
2406 using range_operator::fold_range
;
2407 using range_operator::op1_range
;
2408 using range_operator::lhs_op1_relation
;
2410 virtual bool fold_range (irange
&r
, tree type
,
2413 relation_trio rel
= TRIO_VARYING
) const;
2414 virtual void wi_fold (irange
&r
, tree type
,
2415 const wide_int
&lh_lb
,
2416 const wide_int
&lh_ub
,
2417 const wide_int
&rh_lb
,
2418 const wide_int
&rh_ub
) const;
2419 virtual bool wi_op_overflows (wide_int
&res
,
2422 const wide_int
&w1
) const;
2423 virtual bool op1_range (irange
&, tree type
,
2426 relation_trio rel
= TRIO_VARYING
) const;
2427 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2430 relation_kind rel
) const;
2431 void update_bitmask (irange
&r
, const irange
&lh
,
2432 const irange
&rh
) const final override
2433 { update_known_bitmask (r
, RSHIFT_EXPR
, lh
, rh
); }
2438 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2441 relation_kind
) const
2443 // If both operands range are >= 0, then the LHS <= op1.
2444 if (!op1
.undefined_p () && !op2
.undefined_p ()
2445 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2446 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2448 return VREL_VARYING
;
2452 operator_lshift::fold_range (irange
&r
, tree type
,
2455 relation_trio rel
) const
2457 int_range_max shift_range
;
2458 if (!get_shift_range (shift_range
, type
, op2
))
2460 if (op2
.undefined_p ())
2467 // Transform left shifts by constants into multiplies.
2468 if (shift_range
.singleton_p ())
2470 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2471 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2472 int_range
<1> mult (type
, tmp
, tmp
);
2474 // Force wrapping multiplication.
2475 bool saved_flag_wrapv
= flag_wrapv
;
2476 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2478 flag_wrapv_pointer
= 1;
2479 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2480 flag_wrapv
= saved_flag_wrapv
;
2481 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2485 // Otherwise, invoke the generic fold routine.
2486 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2490 operator_lshift::wi_fold (irange
&r
, tree type
,
2491 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2492 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2494 signop sign
= TYPE_SIGN (type
);
2495 unsigned prec
= TYPE_PRECISION (type
);
2496 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2497 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2498 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2499 // overflow. However, for that to happen, rh.max needs to be zero,
2500 // which means rh is a singleton range of zero, which means we simply return
2501 // [lh_lb, lh_ub] as the range.
2502 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2504 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2508 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2509 wide_int complement
= ~(bound
- 1);
2510 wide_int low_bound
, high_bound
;
2511 bool in_bounds
= false;
2513 if (sign
== UNSIGNED
)
2516 high_bound
= complement
;
2517 if (wi::ltu_p (lh_ub
, low_bound
))
2519 // [5, 6] << [1, 2] == [10, 24].
2520 // We're shifting out only zeroes, the value increases
2524 else if (wi::ltu_p (high_bound
, lh_lb
))
2526 // [0xffffff00, 0xffffffff] << [1, 2]
2527 // == [0xfffffc00, 0xfffffffe].
2528 // We're shifting out only ones, the value decreases
2535 // [-1, 1] << [1, 2] == [-4, 4]
2536 low_bound
= complement
;
2538 if (wi::lts_p (lh_ub
, high_bound
)
2539 && wi::lts_p (low_bound
, lh_lb
))
2541 // For non-negative numbers, we're shifting out only zeroes,
2542 // the value increases monotonically. For negative numbers,
2543 // we're shifting out only ones, the value decreases
2550 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2552 r
.set_varying (type
);
2556 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2557 const wide_int
&w0
, const wide_int
&w1
) const
2559 signop sign
= TYPE_SIGN (type
);
2562 // It's unclear from the C standard whether shifts can overflow.
2563 // The following code ignores overflow; perhaps a C standard
2564 // interpretation ruling is needed.
2565 res
= wi::rshift (w0
, -w1
, sign
);
2568 res
= wi::lshift (w0
, w1
);
2573 operator_lshift::op1_range (irange
&r
,
2577 relation_trio
) const
2579 if (lhs
.undefined_p ())
2582 if (!contains_zero_p (lhs
))
2583 r
.set_nonzero (type
);
2585 r
.set_varying (type
);
2588 if (op2
.singleton_p (shift
))
2590 if (wi::lt_p (shift
, 0, SIGNED
))
2592 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2593 TYPE_PRECISION (op2
.type ())),
2602 // Work completely in unsigned mode to start.
2604 int_range_max tmp_range
;
2605 if (TYPE_SIGN (type
) == SIGNED
)
2607 int_range_max tmp
= lhs
;
2608 utype
= unsigned_type_for (type
);
2609 range_cast (tmp
, utype
);
2610 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2613 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2615 // Start with ranges which can produce the LHS by right shifting the
2616 // result by the shift amount.
2617 // ie [0x08, 0xF0] = op1 << 2 will start with
2618 // [00001000, 11110000] = op1 << 2
2619 // [0x02, 0x4C] aka [00000010, 00111100]
2621 // Then create a range from the LB with the least significant upper bit
2622 // set, to the upper bound with all the bits set.
2623 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2625 // Ideally we do this for each subrange, but just lump them all for now.
2626 unsigned low_bits
= TYPE_PRECISION (utype
) - shift
.to_uhwi ();
2627 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2628 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2629 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2630 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2631 tmp_range
.union_ (fill_range
);
2634 range_cast (tmp_range
, type
);
2636 r
.intersect (tmp_range
);
2640 return !r
.varying_p ();
2644 operator_rshift::op1_range (irange
&r
,
2648 relation_trio
) const
2650 if (lhs
.undefined_p ())
2653 if (op2
.singleton_p (shift
))
2655 // Ignore nonsensical shifts.
2656 unsigned prec
= TYPE_PRECISION (type
);
2657 if (wi::ge_p (shift
,
2658 wi::uhwi (prec
, TYPE_PRECISION (op2
.type ())),
2667 // Folding the original operation may discard some impossible
2668 // ranges from the LHS.
2669 int_range_max lhs_refined
;
2670 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2671 lhs_refined
.intersect (lhs
);
2672 if (lhs_refined
.undefined_p ())
2677 int_range_max
shift_range (op2
.type (), shift
, shift
);
2678 int_range_max lb
, ub
;
2679 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2681 // 0000 0111 = OP1 >> 3
2683 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2684 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2685 // right hand side (0x07).
2686 wide_int mask
= wi::bit_not (wi::lshift (wi::minus_one (prec
), shift
));
2687 int_range_max
mask_range (type
,
2688 wi::zero (TYPE_PRECISION (type
)),
2690 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2693 if (!contains_zero_p (lhs_refined
))
2695 mask_range
.invert ();
2696 r
.intersect (mask_range
);
2704 operator_rshift::wi_op_overflows (wide_int
&res
,
2707 const wide_int
&w1
) const
2709 signop sign
= TYPE_SIGN (type
);
2711 res
= wi::lshift (w0
, -w1
);
2714 // It's unclear from the C standard whether shifts can overflow.
2715 // The following code ignores overflow; perhaps a C standard
2716 // interpretation ruling is needed.
2717 res
= wi::rshift (w0
, w1
, sign
);
2723 operator_rshift::fold_range (irange
&r
, tree type
,
2726 relation_trio rel
) const
2728 int_range_max shift
;
2729 if (!get_shift_range (shift
, type
, op2
))
2731 if (op2
.undefined_p ())
2738 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2742 operator_rshift::wi_fold (irange
&r
, tree type
,
2743 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2744 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2746 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2750 // Add a partial equivalence between the LHS and op1 for casts.
2753 operator_cast::lhs_op1_relation (const irange
&lhs
,
2755 const irange
&op2 ATTRIBUTE_UNUSED
,
2756 relation_kind
) const
2758 if (lhs
.undefined_p () || op1
.undefined_p ())
2759 return VREL_VARYING
;
2760 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2761 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2762 // If the result gets sign extended into a larger type check first if this
2763 // qualifies as a partial equivalence.
2764 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2766 // If the result is sign extended, and the LHS is larger than op1,
2767 // check if op1's range can be negative as the sign extension will
2768 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2769 int_range
<3> negs
= range_negatives (op1
.type ());
2770 negs
.intersect (op1
);
2771 if (!negs
.undefined_p ())
2772 return VREL_VARYING
;
2775 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2776 return bits_to_pe (prec
);
2779 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2782 operator_cast::truncating_cast_p (const irange
&inner
,
2783 const irange
&outer
) const
2785 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2788 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2791 operator_cast::inside_domain_p (const wide_int
&min
,
2792 const wide_int
&max
,
2793 const irange
&range
) const
2795 wide_int domain_min
= irange_val_min (range
.type ());
2796 wide_int domain_max
= irange_val_max (range
.type ());
2797 signop domain_sign
= TYPE_SIGN (range
.type ());
2798 return (wi::le_p (min
, domain_max
, domain_sign
)
2799 && wi::le_p (max
, domain_max
, domain_sign
)
2800 && wi::ge_p (min
, domain_min
, domain_sign
)
2801 && wi::ge_p (max
, domain_min
, domain_sign
));
2805 // Helper for fold_range which work on a pair at a time.
2808 operator_cast::fold_pair (irange
&r
, unsigned index
,
2809 const irange
&inner
,
2810 const irange
&outer
) const
2812 tree inner_type
= inner
.type ();
2813 tree outer_type
= outer
.type ();
2814 signop inner_sign
= TYPE_SIGN (inner_type
);
2815 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2817 // check to see if casting from INNER to OUTER is a conversion that
2818 // fits in the resulting OUTER type.
2819 wide_int inner_lb
= inner
.lower_bound (index
);
2820 wide_int inner_ub
= inner
.upper_bound (index
);
2821 if (truncating_cast_p (inner
, outer
))
2823 // We may be able to accommodate a truncating cast if the
2824 // resulting range can be represented in the target type...
2825 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
2826 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
2829 r
.set_varying (outer_type
);
2833 // ...but we must still verify that the final range fits in the
2834 // domain. This catches -fstrict-enum restrictions where the domain
2835 // range is smaller than what fits in the underlying type.
2836 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
2837 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
2838 if (inside_domain_p (min
, max
, outer
))
2839 create_possibly_reversed_range (r
, outer_type
, min
, max
);
2841 r
.set_varying (outer_type
);
2846 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2847 const irange
&inner
,
2848 const irange
&outer
,
2849 relation_trio
) const
2851 if (empty_range_varying (r
, type
, inner
, outer
))
2854 gcc_checking_assert (outer
.varying_p ());
2855 gcc_checking_assert (inner
.num_pairs () > 0);
2857 // Avoid a temporary by folding the first pair directly into the result.
2858 fold_pair (r
, 0, inner
, outer
);
2860 // Then process any additional pairs by unioning with their results.
2861 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
2864 fold_pair (tmp
, x
, inner
, outer
);
2870 // Update the bitmask. Truncating casts are problematic unless
2871 // the conversion fits in the resulting outer type.
2872 irange_bitmask bm
= inner
.get_bitmask ();
2873 if (truncating_cast_p (inner
, outer
)
2874 && wi::rshift (bm
.mask (),
2875 wi::uhwi (TYPE_PRECISION (outer
.type ()),
2876 TYPE_PRECISION (inner
.type ())),
2877 TYPE_SIGN (inner
.type ())) != 0)
2879 unsigned prec
= TYPE_PRECISION (type
);
2880 signop sign
= TYPE_SIGN (inner
.type ());
2881 bm
= irange_bitmask (wide_int::from (bm
.value (), prec
, sign
),
2882 wide_int::from (bm
.mask (), prec
, sign
));
2883 r
.update_bitmask (bm
);
2889 operator_cast::op1_range (irange
&r
, tree type
,
2892 relation_trio
) const
2894 if (lhs
.undefined_p ())
2896 tree lhs_type
= lhs
.type ();
2897 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
2899 // If we are calculating a pointer, shortcut to what we really care about.
2900 if (POINTER_TYPE_P (type
))
2902 // Conversion from other pointers or a constant (including 0/NULL)
2903 // are straightforward.
2904 if (POINTER_TYPE_P (lhs
.type ())
2905 || (lhs
.singleton_p ()
2906 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
2909 range_cast (r
, type
);
2913 // If the LHS is not a pointer nor a singleton, then it is
2914 // either VARYING or non-zero.
2915 if (!contains_zero_p (lhs
))
2916 r
.set_nonzero (type
);
2918 r
.set_varying (type
);
2924 if (truncating_cast_p (op2
, lhs
))
2926 if (lhs
.varying_p ())
2927 r
.set_varying (type
);
2930 // We want to insert the LHS as an unsigned value since it
2931 // would not trigger the signed bit of the larger type.
2932 int_range_max converted_lhs
= lhs
;
2933 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
2934 range_cast (converted_lhs
, type
);
2935 // Start by building the positive signed outer range for the type.
2936 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
2937 TYPE_PRECISION (type
));
2938 create_possibly_reversed_range (r
, type
, lim
,
2939 wi::max_value (TYPE_PRECISION (type
),
2941 // For the signed part, we need to simply union the 2 ranges now.
2942 r
.union_ (converted_lhs
);
2944 // Create maximal negative number outside of LHS bits.
2945 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
2946 TYPE_PRECISION (type
));
2947 // Add this to the unsigned LHS range(s).
2948 int_range_max
lim_range (type
, lim
, lim
);
2949 int_range_max lhs_neg
;
2950 range_op_handler (PLUS_EXPR
).fold_range (lhs_neg
, type
,
2951 converted_lhs
, lim_range
);
2952 // lhs_neg now has all the negative versions of the LHS.
2953 // Now union in all the values from SIGNED MIN (0x80000) to
2954 // lim-1 in order to fill in all the ranges with the upper
2957 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2958 // we don't need to create a range from min to lim-1
2959 // calculate neg range traps trying to create [lim, lim - 1].
2960 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
2963 int_range_max
neg (type
,
2964 wi::min_value (TYPE_PRECISION (type
),
2967 lhs_neg
.union_ (neg
);
2969 // And finally, munge the signed and unsigned portions.
2972 // And intersect with any known value passed in the extra operand.
2978 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
2982 // The cast is not truncating, and the range is restricted to
2983 // the range of the RHS by this assignment.
2985 // Cast the range of the RHS to the type of the LHS.
2986 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
2987 // Intersect this with the LHS range will produce the range,
2988 // which will be cast to the RHS type before returning.
2989 tmp
.intersect (lhs
);
2992 // Cast the calculated range to the type of the RHS.
2993 fold_range (r
, type
, tmp
, int_range
<1> (type
));
2998 class operator_logical_and
: public range_operator
3000 using range_operator::fold_range
;
3001 using range_operator::op1_range
;
3002 using range_operator::op2_range
;
3004 virtual bool fold_range (irange
&r
, tree type
,
3007 relation_trio rel
= TRIO_VARYING
) const;
3008 virtual bool op1_range (irange
&r
, tree type
,
3011 relation_trio rel
= TRIO_VARYING
) const;
3012 virtual bool op2_range (irange
&r
, tree type
,
3015 relation_trio rel
= TRIO_VARYING
) const;
3020 operator_logical_and::fold_range (irange
&r
, tree type
,
3023 relation_trio
) const
3025 if (empty_range_varying (r
, type
, lh
, rh
))
3028 // 0 && anything is 0.
3029 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
3030 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
3031 r
= range_false (type
);
3032 else if (contains_zero_p (lh
) || contains_zero_p (rh
))
3033 // To reach this point, there must be a logical 1 on each side, and
3034 // the only remaining question is whether there is a zero or not.
3035 r
= range_true_and_false (type
);
3037 r
= range_true (type
);
3042 operator_logical_and::op1_range (irange
&r
, tree type
,
3044 const irange
&op2 ATTRIBUTE_UNUSED
,
3045 relation_trio
) const
3047 switch (get_bool_state (r
, lhs
, type
))
3050 // A true result means both sides of the AND must be true.
3051 r
= range_true (type
);
3054 // Any other result means only one side has to be false, the
3055 // other side can be anything. So we cannot be sure of any
3057 r
= range_true_and_false (type
);
3064 operator_logical_and::op2_range (irange
&r
, tree type
,
3067 relation_trio
) const
3069 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
3074 operator_bitwise_and::update_bitmask (irange
&r
, const irange
&lh
,
3075 const irange
&rh
) const
3077 update_known_bitmask (r
, BIT_AND_EXPR
, lh
, rh
);
3080 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3081 // by considering the number of leading redundant sign bit copies.
3082 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3083 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3085 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
3086 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3087 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3089 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
3090 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
3091 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
3094 int type_prec
= TYPE_PRECISION (type
);
3095 int rprec
= (type_prec
- new_clrsb
) - 1;
3096 value_range_with_overflow (r
, type
,
3097 wi::mask (rprec
, true, type_prec
),
3098 wi::mask (rprec
, false, type_prec
));
3102 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3106 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
3109 relation_kind
) const
3111 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
3112 return VREL_VARYING
;
3113 if (!op2
.singleton_p ())
3114 return VREL_VARYING
;
3115 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3116 int prec1
= TYPE_PRECISION (op1
.type ());
3117 int prec2
= TYPE_PRECISION (op2
.type ());
3119 wide_int mask
= op2
.lower_bound ();
3120 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
3122 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
3124 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
3126 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
3128 return bits_to_pe (MIN (prec1
, mask_prec
));
3131 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3132 // possible. Basically, see if we can optimize:
3136 // [LB op Z, UB op Z]
3138 // If the optimization was successful, accumulate the range in R and
3142 wi_optimize_and_or (irange
&r
,
3143 enum tree_code code
,
3145 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3146 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3148 // Calculate the singleton mask among the ranges, if any.
3149 wide_int lower_bound
, upper_bound
, mask
;
3150 if (wi::eq_p (rh_lb
, rh_ub
))
3153 lower_bound
= lh_lb
;
3154 upper_bound
= lh_ub
;
3156 else if (wi::eq_p (lh_lb
, lh_ub
))
3159 lower_bound
= rh_lb
;
3160 upper_bound
= rh_ub
;
3165 // If Z is a constant which (for op | its bitwise not) has n
3166 // consecutive least significant bits cleared followed by m 1
3167 // consecutive bits set immediately above it and either
3168 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3170 // The least significant n bits of all the values in the range are
3171 // cleared or set, the m bits above it are preserved and any bits
3172 // above these are required to be the same for all values in the
3176 if (code
== BIT_IOR_EXPR
)
3178 if (wi::eq_p (w
, 0))
3179 n
= w
.get_precision ();
3183 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3184 if (wi::eq_p (w
, 0))
3185 m
= w
.get_precision () - n
;
3187 m
= wi::ctz (w
) - n
;
3189 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3190 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3193 wide_int res_lb
, res_ub
;
3194 if (code
== BIT_AND_EXPR
)
3196 res_lb
= wi::bit_and (lower_bound
, mask
);
3197 res_ub
= wi::bit_and (upper_bound
, mask
);
3199 else if (code
== BIT_IOR_EXPR
)
3201 res_lb
= wi::bit_or (lower_bound
, mask
);
3202 res_ub
= wi::bit_or (upper_bound
, mask
);
3206 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3208 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3209 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3212 tmp
.set_nonzero (type
);
3218 // For range [LB, UB] compute two wide_int bit masks.
3220 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3221 // for all numbers in the range the bit is 0, otherwise it might be 0
3224 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3225 // for all numbers in the range the bit is 1, otherwise it might be 0
3229 wi_set_zero_nonzero_bits (tree type
,
3230 const wide_int
&lb
, const wide_int
&ub
,
3231 wide_int
&maybe_nonzero
,
3232 wide_int
&mustbe_nonzero
)
3234 signop sign
= TYPE_SIGN (type
);
3236 if (wi::eq_p (lb
, ub
))
3237 maybe_nonzero
= mustbe_nonzero
= lb
;
3238 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3240 wide_int xor_mask
= lb
^ ub
;
3241 maybe_nonzero
= lb
| ub
;
3242 mustbe_nonzero
= lb
& ub
;
3245 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3246 maybe_nonzero
.get_precision ());
3247 maybe_nonzero
= maybe_nonzero
| mask
;
3248 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3253 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3254 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3259 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3260 const wide_int
&lh_lb
,
3261 const wide_int
&lh_ub
,
3262 const wide_int
&rh_lb
,
3263 const wide_int
&rh_ub
) const
3265 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3268 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3269 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3270 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3271 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3272 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3273 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3275 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3276 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3277 signop sign
= TYPE_SIGN (type
);
3278 unsigned prec
= TYPE_PRECISION (type
);
3279 // If both input ranges contain only negative values, we can
3280 // truncate the result range maximum to the minimum of the
3281 // input range maxima.
3282 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3284 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3285 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3287 // If either input range contains only non-negative values
3288 // we can truncate the result range maximum to the respective
3289 // maximum of the input range.
3290 if (wi::ge_p (lh_lb
, 0, sign
))
3291 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3292 if (wi::ge_p (rh_lb
, 0, sign
))
3293 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3294 // PR68217: In case of signed & sign-bit-CST should
3295 // result in [-INF, 0] instead of [-INF, INF].
3296 if (wi::gt_p (new_lb
, new_ub
, sign
))
3298 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3300 && ((wi::eq_p (lh_lb
, lh_ub
)
3301 && !wi::cmps (lh_lb
, sign_bit
))
3302 || (wi::eq_p (rh_lb
, rh_ub
)
3303 && !wi::cmps (rh_lb
, sign_bit
))))
3305 new_lb
= wi::min_value (prec
, sign
);
3306 new_ub
= wi::zero (prec
);
3309 // If the limits got swapped around, return varying.
3310 if (wi::gt_p (new_lb
, new_ub
,sign
))
3313 && wi_optimize_signed_bitwise_op (r
, type
,
3317 r
.set_varying (type
);
3320 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3324 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3326 if (!contains_zero_p (lhs
))
3327 r
= range_nonzero (type
);
3329 r
.set_varying (type
);
3332 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3333 (otherwise return VAL). VAL and MASK must be zero-extended for
3334 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3335 (to transform signed values into unsigned) and at the end xor
3339 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3340 const wide_int
&sgnbit
, unsigned int prec
)
3342 wide_int bit
= wi::one (prec
), res
;
3345 wide_int val
= val_in
^ sgnbit
;
3346 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3349 if ((res
& bit
) == 0)
3352 res
= wi::bit_and_not (val
+ bit
, res
);
3354 if (wi::gtu_p (res
, val
))
3355 return res
^ sgnbit
;
3357 return val
^ sgnbit
;
3360 // This was shamelessly stolen from register_edge_assert_for_2 and
3361 // adjusted to work with iranges.
3364 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3366 const irange
&op2
) const
3368 if (!op2
.singleton_p ())
3370 set_nonzero_range_from_mask (r
, type
, lhs
);
3373 unsigned int nprec
= TYPE_PRECISION (type
);
3374 wide_int cst2v
= op2
.lower_bound ();
3375 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3378 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3380 sgnbit
= wi::zero (nprec
);
3382 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3384 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3385 // maximum unsigned value is ~0. For signed comparison, if CST2
3386 // doesn't have the most significant bit set, handle it similarly. If
3387 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3388 wide_int valv
= lhs
.lower_bound ();
3389 wide_int minv
= valv
& cst2v
, maxv
;
3390 bool we_know_nothing
= false;
3393 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3394 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3397 // If we can't determine anything on this bound, fall
3398 // through and conservatively solve for the other end point.
3399 we_know_nothing
= true;
3402 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3403 if (we_know_nothing
)
3404 r
.set_varying (type
);
3406 create_possibly_reversed_range (r
, type
, minv
, maxv
);
3408 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3410 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3411 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3413 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3415 // For signed comparison, if CST2 doesn't have most significant bit
3416 // set, handle it similarly. If CST2 has MSB set, the maximum is
3417 // the same and minimum is INT_MIN.
3418 valv
= lhs
.upper_bound ();
3419 minv
= valv
& cst2v
;
3424 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3427 // If we couldn't determine anything on either bound, return
3429 if (we_know_nothing
)
3437 int_range
<2> upper_bits
;
3438 create_possibly_reversed_range (upper_bits
, type
, minv
, maxv
);
3439 r
.intersect (upper_bits
);
3443 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3446 relation_trio
) const
3448 if (lhs
.undefined_p ())
3450 if (types_compatible_p (type
, boolean_type_node
))
3451 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3454 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3456 int_range_max
chunk (lhs
.type (),
3457 lhs
.lower_bound (i
),
3458 lhs
.upper_bound (i
));
3460 simple_op1_range_solver (res
, type
, chunk
, op2
);
3463 if (r
.undefined_p ())
3464 set_nonzero_range_from_mask (r
, type
, lhs
);
3466 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3468 if (lhs
== op2
&& lhs
.singleton_p (mask
))
3470 r
.update_bitmask (irange_bitmask (mask
, ~mask
));
3474 // For 0 = op1 & MASK, op1 is ~MASK.
3475 if (lhs
.zero_p () && op2
.singleton_p ())
3477 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3478 int_range
<2> tmp (type
);
3479 tmp
.set_nonzero_bits (nz
);
3486 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3489 relation_trio
) const
3491 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3495 class operator_logical_or
: public range_operator
3497 using range_operator::fold_range
;
3498 using range_operator::op1_range
;
3499 using range_operator::op2_range
;
3501 virtual bool fold_range (irange
&r
, tree type
,
3504 relation_trio rel
= TRIO_VARYING
) const;
3505 virtual bool op1_range (irange
&r
, tree type
,
3508 relation_trio rel
= TRIO_VARYING
) const;
3509 virtual bool op2_range (irange
&r
, tree type
,
3512 relation_trio rel
= TRIO_VARYING
) const;
3516 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3519 relation_trio
) const
3521 if (empty_range_varying (r
, type
, lh
, rh
))
3530 operator_logical_or::op1_range (irange
&r
, tree type
,
3532 const irange
&op2 ATTRIBUTE_UNUSED
,
3533 relation_trio
) const
3535 switch (get_bool_state (r
, lhs
, type
))
3538 // A false result means both sides of the OR must be false.
3539 r
= range_false (type
);
3542 // Any other result means only one side has to be true, the
3543 // other side can be anything. so we can't be sure of any result
3545 r
= range_true_and_false (type
);
3552 operator_logical_or::op2_range (irange
&r
, tree type
,
3555 relation_trio
) const
3557 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3562 operator_bitwise_or::update_bitmask (irange
&r
, const irange
&lh
,
3563 const irange
&rh
) const
3565 update_known_bitmask (r
, BIT_IOR_EXPR
, lh
, rh
);
3569 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3570 const wide_int
&lh_lb
,
3571 const wide_int
&lh_ub
,
3572 const wide_int
&rh_lb
,
3573 const wide_int
&rh_ub
) const
3575 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3578 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3579 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3580 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3581 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3582 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3583 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3584 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3585 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3586 signop sign
= TYPE_SIGN (type
);
3587 // If the input ranges contain only positive values we can
3588 // truncate the minimum of the result range to the maximum
3589 // of the input range minima.
3590 if (wi::ge_p (lh_lb
, 0, sign
)
3591 && wi::ge_p (rh_lb
, 0, sign
))
3593 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3594 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3596 // If either input range contains only negative values
3597 // we can truncate the minimum of the result range to the
3598 // respective minimum range.
3599 if (wi::lt_p (lh_ub
, 0, sign
))
3600 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3601 if (wi::lt_p (rh_ub
, 0, sign
))
3602 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3603 // If the limits got swapped around, return a conservative range.
3604 if (wi::gt_p (new_lb
, new_ub
, sign
))
3606 // Make sure that nonzero|X is nonzero.
3607 if (wi::gt_p (lh_lb
, 0, sign
)
3608 || wi::gt_p (rh_lb
, 0, sign
)
3609 || wi::lt_p (lh_ub
, 0, sign
)
3610 || wi::lt_p (rh_ub
, 0, sign
))
3611 r
.set_nonzero (type
);
3612 else if (sign
== SIGNED
3613 && wi_optimize_signed_bitwise_op (r
, type
,
3618 r
.set_varying (type
);
3621 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3625 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3628 relation_trio
) const
3630 if (lhs
.undefined_p ())
3632 // If this is really a logical wi_fold, call that.
3633 if (types_compatible_p (type
, boolean_type_node
))
3634 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3641 r
.set_varying (type
);
3646 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3649 relation_trio
) const
3651 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3655 operator_bitwise_xor::update_bitmask (irange
&r
, const irange
&lh
,
3656 const irange
&rh
) const
3658 update_known_bitmask (r
, BIT_XOR_EXPR
, lh
, rh
);
3662 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3663 const wide_int
&lh_lb
,
3664 const wide_int
&lh_ub
,
3665 const wide_int
&rh_lb
,
3666 const wide_int
&rh_ub
) const
3668 signop sign
= TYPE_SIGN (type
);
3669 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3670 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3671 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3672 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3673 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3674 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3676 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3677 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3678 wide_int result_one_bits
3679 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3680 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3681 wide_int new_ub
= ~result_zero_bits
;
3682 wide_int new_lb
= result_one_bits
;
3684 // If the range has all positive or all negative values, the result
3685 // is better than VARYING.
3686 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3687 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3688 else if (sign
== SIGNED
3689 && wi_optimize_signed_bitwise_op (r
, type
,
3694 r
.set_varying (type
);
3696 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3697 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3698 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3699 || wi::ne_p (result_one_bits
, 0))
3702 tmp
.set_nonzero (type
);
3708 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3712 relation_kind rel
) const
3714 if (rel
== VREL_VARYING
)
3717 int_range
<2> rel_range
;
3722 rel_range
.set_zero (type
);
3725 rel_range
.set_nonzero (type
);
3731 lhs_range
.intersect (rel_range
);
3736 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3739 relation_trio
) const
3741 if (lhs
.undefined_p () || lhs
.varying_p ())
3746 if (types_compatible_p (type
, boolean_type_node
))
3748 switch (get_bool_state (r
, lhs
, type
))
3751 if (op2
.varying_p ())
3752 r
.set_varying (type
);
3753 else if (op2
.zero_p ())
3754 r
= range_true (type
);
3755 // See get_bool_state for the rationale
3756 else if (contains_zero_p (op2
))
3757 r
= range_true_and_false (type
);
3759 r
= range_false (type
);
3769 r
.set_varying (type
);
3774 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3777 relation_trio
) const
3779 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3782 class operator_trunc_mod
: public range_operator
3784 using range_operator::op1_range
;
3785 using range_operator::op2_range
;
3787 virtual void wi_fold (irange
&r
, tree type
,
3788 const wide_int
&lh_lb
,
3789 const wide_int
&lh_ub
,
3790 const wide_int
&rh_lb
,
3791 const wide_int
&rh_ub
) const;
3792 virtual bool op1_range (irange
&r
, tree type
,
3795 relation_trio
) const;
3796 virtual bool op2_range (irange
&r
, tree type
,
3799 relation_trio
) const;
3800 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
3801 { update_known_bitmask (r
, TRUNC_MOD_EXPR
, lh
, rh
); }
3805 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
3806 const wide_int
&lh_lb
,
3807 const wide_int
&lh_ub
,
3808 const wide_int
&rh_lb
,
3809 const wide_int
&rh_ub
) const
3811 wide_int new_lb
, new_ub
, tmp
;
3812 signop sign
= TYPE_SIGN (type
);
3813 unsigned prec
= TYPE_PRECISION (type
);
3815 // Mod 0 is undefined.
3816 if (wi_zero_p (type
, rh_lb
, rh_ub
))
3822 // Check for constant and try to fold.
3823 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
3825 wi::overflow_type ov
= wi::OVF_NONE
;
3826 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
3827 if (ov
== wi::OVF_NONE
)
3829 r
= int_range
<2> (type
, tmp
, tmp
);
3834 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3839 new_ub
= wi::smax (new_ub
, tmp
);
3842 if (sign
== UNSIGNED
)
3843 new_lb
= wi::zero (prec
);
3848 if (wi::gts_p (tmp
, 0))
3849 tmp
= wi::zero (prec
);
3850 new_lb
= wi::smax (new_lb
, tmp
);
3853 if (sign
== SIGNED
&& wi::neg_p (tmp
))
3854 tmp
= wi::zero (prec
);
3855 new_ub
= wi::min (new_ub
, tmp
, sign
);
3857 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3861 operator_trunc_mod::op1_range (irange
&r
, tree type
,
3864 relation_trio
) const
3866 if (lhs
.undefined_p ())
3869 signop sign
= TYPE_SIGN (type
);
3870 unsigned prec
= TYPE_PRECISION (type
);
3871 // (a % b) >= x && x > 0 , then a >= x.
3872 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3874 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
3877 // (a % b) <= x && x < 0 , then a <= x.
3878 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3880 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
3887 operator_trunc_mod::op2_range (irange
&r
, tree type
,
3890 relation_trio
) const
3892 if (lhs
.undefined_p ())
3895 signop sign
= TYPE_SIGN (type
);
3896 unsigned prec
= TYPE_PRECISION (type
);
3897 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3898 // or b > x for unsigned.
3899 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3902 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
3903 lhs
.lower_bound (), VR_ANTI_RANGE
);
3904 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
3906 r
= value_range (type
, lhs
.lower_bound () + 1,
3907 wi::max_value (prec
, sign
));
3912 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3913 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3915 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
3916 r
= value_range (type
, lhs
.upper_bound (),
3917 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
3926 class operator_logical_not
: public range_operator
3928 using range_operator::fold_range
;
3929 using range_operator::op1_range
;
3931 virtual bool fold_range (irange
&r
, tree type
,
3934 relation_trio rel
= TRIO_VARYING
) const;
3935 virtual bool op1_range (irange
&r
, tree type
,
3938 relation_trio rel
= TRIO_VARYING
) const;
3941 // Folding a logical NOT, oddly enough, involves doing nothing on the
3942 // forward pass through. During the initial walk backwards, the
3943 // logical NOT reversed the desired outcome on the way back, so on the
3944 // way forward all we do is pass the range forward.
3949 // to determine the TRUE branch, walking backward
3950 // if (b_3) if ([1,1])
3951 // b_3 = !b_2 [1,1] = ![0,0]
3952 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3953 // which is the result we are looking for.. so.. pass it through.
3956 operator_logical_not::fold_range (irange
&r
, tree type
,
3958 const irange
&rh ATTRIBUTE_UNUSED
,
3959 relation_trio
) const
3961 if (empty_range_varying (r
, type
, lh
, rh
))
3965 if (!lh
.varying_p () && !lh
.undefined_p ())
3972 operator_logical_not::op1_range (irange
&r
,
3976 relation_trio
) const
3978 // Logical NOT is involutary...do it again.
3979 return fold_range (r
, type
, lhs
, op2
);
3984 operator_bitwise_not::fold_range (irange
&r
, tree type
,
3987 relation_trio
) const
3989 if (empty_range_varying (r
, type
, lh
, rh
))
3992 if (types_compatible_p (type
, boolean_type_node
))
3993 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
3995 // ~X is simply -1 - X.
3996 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
3997 wi::minus_one (TYPE_PRECISION (type
)));
3998 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, minusone
, lh
);
4002 operator_bitwise_not::op1_range (irange
&r
, tree type
,
4005 relation_trio
) const
4007 if (lhs
.undefined_p ())
4009 if (types_compatible_p (type
, boolean_type_node
))
4010 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
4012 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4013 return fold_range (r
, type
, lhs
, op2
);
4018 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4020 const irange
&rh ATTRIBUTE_UNUSED
,
4021 relation_trio
) const
4028 // Determine if there is a relationship between LHS and OP1.
4031 operator_identity::lhs_op1_relation (const irange
&lhs
,
4032 const irange
&op1 ATTRIBUTE_UNUSED
,
4033 const irange
&op2 ATTRIBUTE_UNUSED
,
4034 relation_kind
) const
4036 if (lhs
.undefined_p ())
4037 return VREL_VARYING
;
4038 // Simply a copy, so they are equivalent.
4043 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4045 const irange
&rh ATTRIBUTE_UNUSED
,
4046 relation_trio
) const
4053 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4055 const irange
&op2 ATTRIBUTE_UNUSED
,
4056 relation_trio
) const
4063 class operator_unknown
: public range_operator
4065 using range_operator::fold_range
;
4067 virtual bool fold_range (irange
&r
, tree type
,
4070 relation_trio rel
= TRIO_VARYING
) const;
4074 operator_unknown::fold_range (irange
&r
, tree type
,
4075 const irange
&lh ATTRIBUTE_UNUSED
,
4076 const irange
&rh ATTRIBUTE_UNUSED
,
4077 relation_trio
) const
4079 r
.set_varying (type
);
4085 operator_abs::wi_fold (irange
&r
, tree type
,
4086 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4087 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4088 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4091 signop sign
= TYPE_SIGN (type
);
4092 unsigned prec
= TYPE_PRECISION (type
);
4094 // Pass through LH for the easy cases.
4095 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
4097 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
4101 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4103 wide_int min_value
= wi::min_value (prec
, sign
);
4104 wide_int max_value
= wi::max_value (prec
, sign
);
4105 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
4107 r
.set_varying (type
);
4111 // ABS_EXPR may flip the range around, if the original range
4112 // included negative values.
4113 if (wi::eq_p (lh_lb
, min_value
))
4115 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4116 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4117 if (wi::eq_p (lh_ub
, min_value
))
4119 r
= int_range
<1> (type
, min_value
, min_value
);
4125 min
= wi::abs (lh_lb
);
4127 if (wi::eq_p (lh_ub
, min_value
))
4130 max
= wi::abs (lh_ub
);
4132 // If the range contains zero then we know that the minimum value in the
4133 // range will be zero.
4134 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4136 if (wi::gt_p (min
, max
, sign
))
4138 min
= wi::zero (prec
);
4142 // If the range was reversed, swap MIN and MAX.
4143 if (wi::gt_p (min
, max
, sign
))
4144 std::swap (min
, max
);
4147 // If the new range has its limits swapped around (MIN > MAX), then
4148 // the operation caused one of them to wrap around. The only thing
4149 // we know is that the result is positive.
4150 if (wi::gt_p (min
, max
, sign
))
4152 min
= wi::zero (prec
);
4155 r
= int_range
<1> (type
, min
, max
);
4159 operator_abs::op1_range (irange
&r
, tree type
,
4162 relation_trio
) const
4164 if (empty_range_varying (r
, type
, lhs
, op2
))
4166 if (TYPE_UNSIGNED (type
))
4171 // Start with the positives because negatives are an impossible result.
4172 int_range_max positives
= range_positives (type
);
4173 positives
.intersect (lhs
);
4175 // Then add the negative of each pair:
4176 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4177 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4178 r
.union_ (int_range
<1> (type
,
4179 -positives
.upper_bound (i
),
4180 -positives
.lower_bound (i
)));
4181 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4182 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4183 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4184 wide_int lb
= lhs
.lower_bound ();
4185 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4186 r
.union_ (int_range
<2> (type
, lb
, lb
));
4191 class operator_absu
: public range_operator
4194 virtual void wi_fold (irange
&r
, tree type
,
4195 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4196 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4200 operator_absu::wi_fold (irange
&r
, tree type
,
4201 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4202 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4203 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4205 wide_int new_lb
, new_ub
;
4207 // Pass through VR0 the easy cases.
4208 if (wi::ges_p (lh_lb
, 0))
4215 new_lb
= wi::abs (lh_lb
);
4216 new_ub
= wi::abs (lh_ub
);
4218 // If the range contains zero then we know that the minimum
4219 // value in the range will be zero.
4220 if (wi::ges_p (lh_ub
, 0))
4222 if (wi::gtu_p (new_lb
, new_ub
))
4224 new_lb
= wi::zero (TYPE_PRECISION (type
));
4227 std::swap (new_lb
, new_ub
);
4230 gcc_checking_assert (TYPE_UNSIGNED (type
));
4231 r
= int_range
<1> (type
, new_lb
, new_ub
);
4236 operator_negate::fold_range (irange
&r
, tree type
,
4239 relation_trio
) const
4241 if (empty_range_varying (r
, type
, lh
, rh
))
4243 // -X is simply 0 - X.
4244 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
,
4245 range_zero (type
), lh
);
4249 operator_negate::op1_range (irange
&r
, tree type
,
4252 relation_trio
) const
4254 // NEGATE is involutory.
4255 return fold_range (r
, type
, lhs
, op2
);
4260 operator_addr_expr::fold_range (irange
&r
, tree type
,
4263 relation_trio
) const
4265 if (empty_range_varying (r
, type
, lh
, rh
))
4268 // Return a non-null pointer of the LHS type (passed in op2).
4270 r
= range_zero (type
);
4271 else if (!contains_zero_p (lh
))
4272 r
= range_nonzero (type
);
4274 r
.set_varying (type
);
4279 operator_addr_expr::op1_range (irange
&r
, tree type
,
4282 relation_trio
) const
4284 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
4287 // Initialize any integral operators to the primary table
4290 range_op_table::initialize_integral_ops ()
4292 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4293 set (FLOOR_DIV_EXPR
, op_floor_div
);
4294 set (ROUND_DIV_EXPR
, op_round_div
);
4295 set (CEIL_DIV_EXPR
, op_ceil_div
);
4296 set (EXACT_DIV_EXPR
, op_exact_div
);
4297 set (LSHIFT_EXPR
, op_lshift
);
4298 set (RSHIFT_EXPR
, op_rshift
);
4299 set (TRUTH_AND_EXPR
, op_logical_and
);
4300 set (TRUTH_OR_EXPR
, op_logical_or
);
4301 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4302 set (TRUTH_NOT_EXPR
, op_logical_not
);
4303 set (IMAGPART_EXPR
, op_unknown
);
4304 set (REALPART_EXPR
, op_unknown
);
4305 set (ABSU_EXPR
, op_absu
);
4306 set (OP_WIDEN_MULT_SIGNED
, op_widen_mult_signed
);
4307 set (OP_WIDEN_MULT_UNSIGNED
, op_widen_mult_unsigned
);
4308 set (OP_WIDEN_PLUS_SIGNED
, op_widen_plus_signed
);
4309 set (OP_WIDEN_PLUS_UNSIGNED
, op_widen_plus_unsigned
);
4314 #include "selftest.h"
4318 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4319 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4320 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4321 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4322 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4323 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4326 range_op_cast_tests ()
4328 int_range
<2> r0
, r1
, r2
, rold
;
4329 r0
.set_varying (integer_type_node
);
4330 wide_int maxint
= r0
.upper_bound ();
4332 // If a range is in any way outside of the range for the converted
4333 // to range, default to the range for the new type.
4334 r0
.set_varying (short_integer_type_node
);
4335 wide_int minshort
= r0
.lower_bound ();
4336 wide_int maxshort
= r0
.upper_bound ();
4337 if (TYPE_PRECISION (integer_type_node
)
4338 > TYPE_PRECISION (short_integer_type_node
))
4340 r1
= int_range
<1> (integer_type_node
,
4341 wi::zero (TYPE_PRECISION (integer_type_node
)),
4343 range_cast (r1
, short_integer_type_node
);
4344 ASSERT_TRUE (r1
.lower_bound () == minshort
4345 && r1
.upper_bound() == maxshort
);
4348 // (unsigned char)[-5,-1] => [251,255].
4349 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (-1));
4350 range_cast (r0
, unsigned_char_type_node
);
4351 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
,
4352 UCHAR (251), UCHAR (255)));
4353 range_cast (r0
, signed_char_type_node
);
4354 ASSERT_TRUE (r0
== rold
);
4356 // (signed char)[15, 150] => [-128,-106][15,127].
4357 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (15), UCHAR (150));
4358 range_cast (r0
, signed_char_type_node
);
4359 r1
= int_range
<1> (signed_char_type_node
, SCHAR (15), SCHAR (127));
4360 r2
= int_range
<1> (signed_char_type_node
, SCHAR (-128), SCHAR (-106));
4362 ASSERT_TRUE (r1
== r0
);
4363 range_cast (r0
, unsigned_char_type_node
);
4364 ASSERT_TRUE (r0
== rold
);
4366 // (unsigned char)[-5, 5] => [0,5][251,255].
4367 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (5));
4368 range_cast (r0
, unsigned_char_type_node
);
4369 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255));
4370 r2
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4372 ASSERT_TRUE (r0
== r1
);
4373 range_cast (r0
, signed_char_type_node
);
4374 ASSERT_TRUE (r0
== rold
);
4376 // (unsigned char)[-5,5] => [0,5][251,255].
4377 r0
= int_range
<1> (integer_type_node
, INT (-5), INT (5));
4378 range_cast (r0
, unsigned_char_type_node
);
4379 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4380 r1
.union_ (int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255)));
4381 ASSERT_TRUE (r0
== r1
);
4383 // (unsigned char)[5U,1974U] => [0,255].
4384 r0
= int_range
<1> (unsigned_type_node
, UINT (5), UINT (1974));
4385 range_cast (r0
, unsigned_char_type_node
);
4386 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (255)));
4387 range_cast (r0
, integer_type_node
);
4388 // Going to a wider range should not sign extend.
4389 ASSERT_TRUE (r0
== int_range
<1> (integer_type_node
, INT (0), INT (255)));
4391 // (unsigned char)[-350,15] => [0,255].
4392 r0
= int_range
<1> (integer_type_node
, INT (-350), INT (15));
4393 range_cast (r0
, unsigned_char_type_node
);
4394 ASSERT_TRUE (r0
== (int_range
<1>
4395 (unsigned_char_type_node
,
4396 min_limit (unsigned_char_type_node
),
4397 max_limit (unsigned_char_type_node
))));
4399 // Casting [-120,20] from signed char to unsigned short.
4400 // => [0, 20][0xff88, 0xffff].
4401 r0
= int_range
<1> (signed_char_type_node
, SCHAR (-120), SCHAR (20));
4402 range_cast (r0
, short_unsigned_type_node
);
4403 r1
= int_range
<1> (short_unsigned_type_node
, UINT16 (0), UINT16 (20));
4404 r2
= int_range
<1> (short_unsigned_type_node
,
4405 UINT16 (0xff88), UINT16 (0xffff));
4407 ASSERT_TRUE (r0
== r1
);
4408 // A truncating cast back to signed char will work because [-120, 20]
4409 // is representable in signed char.
4410 range_cast (r0
, signed_char_type_node
);
4411 ASSERT_TRUE (r0
== int_range
<1> (signed_char_type_node
,
4412 SCHAR (-120), SCHAR (20)));
4414 // unsigned char -> signed short
4415 // (signed short)[(unsigned char)25, (unsigned char)250]
4416 // => [(signed short)25, (signed short)250]
4417 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (25), UCHAR (250));
4418 range_cast (r0
, short_integer_type_node
);
4419 r1
= int_range
<1> (short_integer_type_node
, INT16 (25), INT16 (250));
4420 ASSERT_TRUE (r0
== r1
);
4421 range_cast (r0
, unsigned_char_type_node
);
4422 ASSERT_TRUE (r0
== rold
);
4424 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4425 r0
= int_range
<1> (long_long_integer_type_node
,
4426 min_limit (long_long_integer_type_node
),
4427 max_limit (long_long_integer_type_node
));
4428 range_cast (r0
, short_unsigned_type_node
);
4429 r1
= int_range
<1> (short_unsigned_type_node
,
4430 min_limit (short_unsigned_type_node
),
4431 max_limit (short_unsigned_type_node
));
4432 ASSERT_TRUE (r0
== r1
);
4434 // Casting NONZERO to a narrower type will wrap/overflow so
4435 // it's just the entire range for the narrower type.
4437 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4438 // is outside of the range of a smaller range, return the full
4440 if (TYPE_PRECISION (integer_type_node
)
4441 > TYPE_PRECISION (short_integer_type_node
))
4443 r0
= range_nonzero (integer_type_node
);
4444 range_cast (r0
, short_integer_type_node
);
4445 r1
= int_range
<1> (short_integer_type_node
,
4446 min_limit (short_integer_type_node
),
4447 max_limit (short_integer_type_node
));
4448 ASSERT_TRUE (r0
== r1
);
4451 // Casting NONZERO from a narrower signed to a wider signed.
4453 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4454 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4455 r0
= range_nonzero (short_integer_type_node
);
4456 range_cast (r0
, integer_type_node
);
4457 r1
= int_range
<1> (integer_type_node
, INT (-32768), INT (-1));
4458 r2
= int_range
<1> (integer_type_node
, INT (1), INT (32767));
4460 ASSERT_TRUE (r0
== r1
);
4464 range_op_lshift_tests ()
4466 // Test that 0x808.... & 0x8.... still contains 0x8....
4467 // for a large set of numbers.
4470 tree big_type
= long_long_unsigned_type_node
;
4471 unsigned big_prec
= TYPE_PRECISION (big_type
);
4472 // big_num = 0x808,0000,0000,0000
4473 wide_int big_num
= wi::lshift (wi::uhwi (0x808, big_prec
),
4474 wi::uhwi (48, big_prec
));
4475 op_bitwise_and
.fold_range (res
, big_type
,
4476 int_range
<1> (big_type
),
4477 int_range
<1> (big_type
, big_num
, big_num
));
4478 // val = 0x8,0000,0000,0000
4479 wide_int val
= wi::lshift (wi::uhwi (8, big_prec
),
4480 wi::uhwi (48, big_prec
));
4481 ASSERT_TRUE (res
.contains_p (val
));
4484 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4486 // unsigned VARYING = op1 << 1 should be VARYING.
4487 int_range
<2> lhs (unsigned_type_node
);
4488 int_range
<2> shift (unsigned_type_node
, INT (1), INT (1));
4490 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4491 ASSERT_TRUE (op1
.varying_p ());
4493 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4494 int_range
<2> zero (unsigned_type_node
, UINT (0), UINT (0));
4495 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4496 ASSERT_TRUE (op1
.num_pairs () == 2);
4497 // Remove the [0,0] range.
4498 op1
.intersect (zero
);
4499 ASSERT_TRUE (op1
.num_pairs () == 1);
4500 // op1 << 1 should be [0x8000,0x8000] << 1,
4501 // which should result in [0,0].
4502 int_range_max result
;
4503 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4504 ASSERT_TRUE (result
== zero
);
4506 // signed VARYING = op1 << 1 should be VARYING.
4507 if (TYPE_PRECISION (integer_type_node
) > 31)
4509 // unsigned VARYING = op1 << 1 should be VARYING.
4510 int_range
<2> lhs (integer_type_node
);
4511 int_range
<2> shift (integer_type_node
, INT (1), INT (1));
4513 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4514 ASSERT_TRUE (op1
.varying_p ());
4516 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4517 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4518 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4519 ASSERT_TRUE (op1
.num_pairs () == 2);
4520 // Remove the [0,0] range.
4521 op1
.intersect (zero
);
4522 ASSERT_TRUE (op1
.num_pairs () == 1);
4523 // op1 << 1 should be [0x8000,0x8000] << 1,
4524 // which should result in [0,0].
4525 int_range_max result
;
4526 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4527 ASSERT_TRUE (result
== zero
);
4532 range_op_rshift_tests ()
4534 // unsigned: [3, MAX] = OP1 >> 1
4536 int_range_max
lhs (unsigned_type_node
,
4537 UINT (3), max_limit (unsigned_type_node
));
4538 int_range_max
one (unsigned_type_node
,
4539 wi::one (TYPE_PRECISION (unsigned_type_node
)),
4540 wi::one (TYPE_PRECISION (unsigned_type_node
)));
4542 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4543 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4546 // signed: [3, MAX] = OP1 >> 1
4548 int_range_max
lhs (integer_type_node
,
4549 INT (3), max_limit (integer_type_node
));
4550 int_range_max
one (integer_type_node
, INT (1), INT (1));
4552 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4553 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4556 // This is impossible, so OP1 should be [].
4557 // signed: [MIN, MIN] = OP1 >> 1
4559 int_range_max
lhs (integer_type_node
,
4560 min_limit (integer_type_node
),
4561 min_limit (integer_type_node
));
4562 int_range_max
one (integer_type_node
, INT (1), INT (1));
4564 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4565 ASSERT_TRUE (op1
.undefined_p ());
4568 // signed: ~[-1] = OP1 >> 31
4569 if (TYPE_PRECISION (integer_type_node
) > 31)
4571 int_range_max
lhs (integer_type_node
, INT (-1), INT (-1), VR_ANTI_RANGE
);
4572 int_range_max
shift (integer_type_node
, INT (31), INT (31));
4574 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4575 int_range_max negatives
= range_negatives (integer_type_node
);
4576 negatives
.intersect (op1
);
4577 ASSERT_TRUE (negatives
.undefined_p ());
4582 range_op_bitwise_and_tests ()
4585 wide_int min
= min_limit (integer_type_node
);
4586 wide_int max
= max_limit (integer_type_node
);
4587 wide_int tiny
= wi::add (min
, wi::one (TYPE_PRECISION (integer_type_node
)));
4588 int_range_max
i1 (integer_type_node
, tiny
, max
);
4589 int_range_max
i2 (integer_type_node
, INT (255), INT (255));
4591 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4592 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4593 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4595 // VARYING = OP1 & 255: OP1 is VARYING
4596 i1
= int_range
<1> (integer_type_node
);
4597 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4598 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4600 // For 0 = x & MASK, x is ~MASK.
4602 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4603 int_range
<2> mask
= int_range
<2> (integer_type_node
, INT (7), INT (7));
4604 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4605 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4606 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4609 // (NONZERO | X) is nonzero.
4610 i1
.set_nonzero (integer_type_node
);
4611 i2
.set_varying (integer_type_node
);
4612 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4613 ASSERT_TRUE (res
.nonzero_p ());
4615 // (NEGATIVE | X) is nonzero.
4616 i1
= int_range
<1> (integer_type_node
, INT (-5), INT (-3));
4617 i2
.set_varying (integer_type_node
);
4618 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4619 ASSERT_FALSE (res
.contains_p (INT (0)));
4623 range_relational_tests ()
4625 int_range
<2> lhs (unsigned_char_type_node
);
4626 int_range
<2> op1 (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4627 int_range
<2> op2 (unsigned_char_type_node
, UCHAR (20), UCHAR (20));
4629 // Never wrapping additions mean LHS > OP1.
4630 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4631 ASSERT_TRUE (code
== VREL_GT
);
4633 // Most wrapping additions mean nothing...
4634 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4635 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (0), UCHAR (255));
4636 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4637 ASSERT_TRUE (code
== VREL_VARYING
);
4639 // However, always wrapping additions mean LHS < OP1.
4640 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (1), UCHAR (255));
4641 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (255), UCHAR (255));
4642 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4643 ASSERT_TRUE (code
== VREL_LT
);
4649 range_op_rshift_tests ();
4650 range_op_lshift_tests ();
4651 range_op_bitwise_and_tests ();
4652 range_op_cast_tests ();
4653 range_relational_tests ();
4655 extern void range_op_float_tests ();
4656 range_op_float_tests ();
4659 } // namespace selftest
4661 #endif // CHECKING_P