1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal
;
55 operator_not_equal op_not_equal
;
60 operator_identity op_ident
;
62 operator_cast op_cast
;
63 operator_plus op_plus
;
65 operator_minus op_minus
;
66 operator_negate op_negate
;
67 operator_mult op_mult
;
68 operator_addr_expr op_addr
;
69 operator_bitwise_not op_bitwise_not
;
70 operator_bitwise_xor op_bitwise_xor
;
71 operator_bitwise_and op_bitwise_and
;
72 operator_bitwise_or op_bitwise_or
;
76 // Instantaite a range operator table.
77 range_op_table operator_table
;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR
, op_equal
);
88 set (NE_EXPR
, op_not_equal
);
93 set (SSA_NAME
, op_ident
);
94 set (PAREN_EXPR
, op_ident
);
95 set (OBJ_TYPE_REF
, op_ident
);
96 set (REAL_CST
, op_cst
);
97 set (INTEGER_CST
, op_cst
);
98 set (NOP_EXPR
, op_cast
);
99 set (CONVERT_EXPR
, op_cast
);
100 set (PLUS_EXPR
, op_plus
);
101 set (ABS_EXPR
, op_abs
);
102 set (MINUS_EXPR
, op_minus
);
103 set (NEGATE_EXPR
, op_negate
);
104 set (MULT_EXPR
, op_mult
);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR
, op_addr
);
109 set (BIT_NOT_EXPR
, op_bitwise_not
);
110 set (BIT_XOR_EXPR
, op_bitwise_xor
);
112 // These are in both integer and pointer tables, but pointer has a different
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator
;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator
= &default_operator
;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code
)
140 m_operator
= operator_table
[code
];
142 m_operator
= &default_operator
;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator
!= &default_operator
;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
157 range_op_handler::range_op () const
159 if (m_operator
!= &default_operator
)
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
169 dispatch_trio (unsigned lhs
, unsigned op1
, unsigned op2
)
171 return ((lhs
<< 8) + (op1
<< 4) + (op2
));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III
= dispatch_trio (VR_IRANGE
, VR_IRANGE
, VR_IRANGE
);
179 const unsigned RO_IFI
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_IRANGE
);
180 const unsigned RO_IFF
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_FRANGE
);
181 const unsigned RO_FFF
= dispatch_trio (VR_FRANGE
, VR_FRANGE
, VR_FRANGE
);
182 const unsigned RO_FIF
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_FRANGE
);
183 const unsigned RO_FII
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_IRANGE
);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
188 range_op_handler::dispatch_kind (const vrange
&lhs
, const vrange
&op1
,
189 const vrange
& op2
) const
191 return dispatch_trio (lhs
.m_discriminator
, op1
.m_discriminator
,
192 op2
.m_discriminator
);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
198 range_op_handler::fold_range (vrange
&r
, tree type
,
201 relation_trio rel
) const
203 gcc_checking_assert (m_operator
);
204 switch (dispatch_kind (r
, lh
, rh
))
207 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
209 as_a
<irange
> (rh
), rel
);
211 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
213 as_a
<irange
> (rh
), rel
);
215 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
217 as_a
<frange
> (rh
), rel
);
219 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
221 as_a
<frange
> (rh
), rel
);
223 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
225 as_a
<irange
> (rh
), rel
);
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
234 range_op_handler::op1_range (vrange
&r
, tree type
,
237 relation_trio rel
) const
239 gcc_checking_assert (m_operator
);
241 if (lhs
.undefined_p ())
243 switch (dispatch_kind (r
, lhs
, op2
))
246 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
248 as_a
<irange
> (op2
), rel
);
250 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
252 as_a
<frange
> (op2
), rel
);
254 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
256 as_a
<frange
> (op2
), rel
);
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
265 range_op_handler::op2_range (vrange
&r
, tree type
,
268 relation_trio rel
) const
270 gcc_checking_assert (m_operator
);
271 if (lhs
.undefined_p ())
274 switch (dispatch_kind (r
, lhs
, op1
))
277 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
279 as_a
<irange
> (op1
), rel
);
281 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
283 as_a
<frange
> (op1
), rel
);
285 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
287 as_a
<frange
> (op1
), rel
);
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
296 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
299 relation_kind rel
) const
301 gcc_checking_assert (m_operator
);
303 switch (dispatch_kind (lhs
, op1
, op2
))
306 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
308 as_a
<irange
> (op2
), rel
);
310 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
312 as_a
<frange
> (op2
), rel
);
314 return m_operator
->lhs_op1_relation (as_a
<frange
> (lhs
),
316 as_a
<frange
> (op2
), rel
);
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
325 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
328 relation_kind rel
) const
330 gcc_checking_assert (m_operator
);
331 switch (dispatch_kind (lhs
, op1
, op2
))
334 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
336 as_a
<irange
> (op2
), rel
);
338 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
340 as_a
<frange
> (op2
), rel
);
342 return m_operator
->lhs_op2_relation (as_a
<frange
> (lhs
),
344 as_a
<frange
> (op2
), rel
);
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
353 range_op_handler::op1_op2_relation (const vrange
&lhs
) const
355 gcc_checking_assert (m_operator
);
356 switch (dispatch_kind (lhs
, lhs
, lhs
))
359 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
));
362 return m_operator
->op1_op2_relation (as_a
<frange
> (lhs
));
370 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
373 irange_to_masked_value (const irange
&r
, widest_int
&value
, widest_int
&mask
)
375 if (r
.singleton_p ())
378 value
= widest_int::from (r
.lower_bound (), TYPE_SIGN (r
.type ()));
382 mask
= widest_int::from (r
.get_nonzero_bits (), TYPE_SIGN (r
.type ()));
387 // Update the known bitmasks in R when applying the operation CODE to
391 update_known_bitmask (irange
&r
, tree_code code
,
392 const irange
&lh
, const irange
&rh
)
394 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ())
397 widest_int value
, mask
, lh_mask
, rh_mask
, lh_value
, rh_value
;
398 tree type
= r
.type ();
399 signop sign
= TYPE_SIGN (type
);
400 int prec
= TYPE_PRECISION (type
);
401 signop lh_sign
= TYPE_SIGN (lh
.type ());
402 signop rh_sign
= TYPE_SIGN (rh
.type ());
403 int lh_prec
= TYPE_PRECISION (lh
.type ());
404 int rh_prec
= TYPE_PRECISION (rh
.type ());
406 irange_to_masked_value (lh
, lh_value
, lh_mask
);
407 irange_to_masked_value (rh
, rh_value
, rh_mask
);
408 bit_value_binop (code
, sign
, prec
, &value
, &mask
,
409 lh_sign
, lh_prec
, lh_value
, lh_mask
,
410 rh_sign
, rh_prec
, rh_value
, rh_mask
);
411 wide_int tmp
= wide_int::from (value
| mask
, prec
, sign
);
412 r
.set_nonzero_bits (tmp
);
415 // Return the upper limit for a type.
417 static inline wide_int
418 max_limit (const_tree type
)
420 return irange_val_max (type
);
423 // Return the lower limit for a type.
425 static inline wide_int
426 min_limit (const_tree type
)
428 return irange_val_min (type
);
431 // Return false if shifting by OP is undefined behavior. Otherwise, return
432 // true and the range it is to be shifted by. This allows trimming out of
433 // undefined ranges, leaving only valid ranges if there are any.
436 get_shift_range (irange
&r
, tree type
, const irange
&op
)
438 if (op
.undefined_p ())
441 // Build valid range and intersect it with the shift range.
442 r
= value_range (op
.type (),
443 wi::shwi (0, TYPE_PRECISION (op
.type ())),
444 wi::shwi (TYPE_PRECISION (type
) - 1, TYPE_PRECISION (op
.type ())));
447 // If there are no valid ranges in the shift range, returned false.
448 if (r
.undefined_p ())
453 // Default wide_int fold operation returns [MIN, MAX].
456 range_operator::wi_fold (irange
&r
, tree type
,
457 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
458 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
459 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
460 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
462 gcc_checking_assert (r
.supports_type_p (type
));
463 r
.set_varying (type
);
466 // Call wi_fold when both op1 and op2 are equivalent. Further split small
467 // subranges into constants. This can provide better precision.
468 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
469 // [0,0][2, 2][4,4][6, 6][8, 8]
470 // LIMIT is the maximum number of elements in range allowed before we
471 // do not process them individually.
474 range_operator::wi_fold_in_parts_equiv (irange
&r
, tree type
,
475 const wide_int
&lh_lb
,
476 const wide_int
&lh_ub
,
477 unsigned limit
) const
480 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
481 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
482 // if there are 1 to 8 values in the LH range, split them up.
484 if (lh_range
>= 0 && lh_range
< limit
)
486 for (unsigned x
= 0; x
<= lh_range
; x
++)
488 wide_int val
= lh_lb
+ x
;
489 wi_fold (tmp
, type
, val
, val
, val
, val
);
493 // Otherwise just call wi_fold.
495 wi_fold (r
, type
, lh_lb
, lh_ub
, lh_lb
, lh_ub
);
498 // Call wi_fold, except further split small subranges into constants.
499 // This can provide better precision. For something 8 >> [0,1]
500 // Instead of [8, 16], we will produce [8,8][16,16]
503 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
504 const wide_int
&lh_lb
,
505 const wide_int
&lh_ub
,
506 const wide_int
&rh_lb
,
507 const wide_int
&rh_ub
) const
510 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
511 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
512 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
513 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
514 // If there are 2, 3, or 4 values in the RH range, do them separately.
515 // Call wi_fold_in_parts to check the RH side.
516 if (rh_range
> 0 && rh_range
< 4)
518 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
521 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
525 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
529 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
532 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
533 // The RH side has been checked, so no recursion needed.
534 else if (lh_range
> 0 && lh_range
< 4)
536 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
539 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
543 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
547 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
550 // Otherwise just call wi_fold.
552 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
555 // The default for fold is to break all ranges into sub-ranges and
556 // invoke the wi_fold method on each sub-range pair.
559 range_operator::fold_range (irange
&r
, tree type
,
562 relation_trio trio
) const
564 gcc_checking_assert (r
.supports_type_p (type
));
565 if (empty_range_varying (r
, type
, lh
, rh
))
568 relation_kind rel
= trio
.op1_op2 ();
569 unsigned num_lh
= lh
.num_pairs ();
570 unsigned num_rh
= rh
.num_pairs ();
572 // If op1 and op2 are equivalences, then we don't need a complete cross
573 // product, just pairs of matching elements.
574 if (relation_equiv_p (rel
) && lh
== rh
)
578 for (unsigned x
= 0; x
< num_lh
; ++x
)
580 // If the number of subranges is too high, limit subrange creation.
581 unsigned limit
= (r
.num_pairs () > 32) ? 0 : 8;
582 wide_int lh_lb
= lh
.lower_bound (x
);
583 wide_int lh_ub
= lh
.upper_bound (x
);
584 wi_fold_in_parts_equiv (tmp
, type
, lh_lb
, lh_ub
, limit
);
589 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
590 update_bitmask (r
, lh
, rh
);
594 // If both ranges are single pairs, fold directly into the result range.
595 // If the number of subranges grows too high, produce a summary result as the
596 // loop becomes exponential with little benefit. See PR 103821.
597 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
599 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
600 rh
.lower_bound (), rh
.upper_bound ());
601 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
602 update_bitmask (r
, lh
, rh
);
608 for (unsigned x
= 0; x
< num_lh
; ++x
)
609 for (unsigned y
= 0; y
< num_rh
; ++y
)
611 wide_int lh_lb
= lh
.lower_bound (x
);
612 wide_int lh_ub
= lh
.upper_bound (x
);
613 wide_int rh_lb
= rh
.lower_bound (y
);
614 wide_int rh_ub
= rh
.upper_bound (y
);
615 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
619 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
620 update_bitmask (r
, lh
, rh
);
624 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
625 update_bitmask (r
, lh
, rh
);
629 // The default for op1_range is to return false.
632 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
633 tree type ATTRIBUTE_UNUSED
,
634 const irange
&lhs ATTRIBUTE_UNUSED
,
635 const irange
&op2 ATTRIBUTE_UNUSED
,
641 // The default for op2_range is to return false.
644 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
645 tree type ATTRIBUTE_UNUSED
,
646 const irange
&lhs ATTRIBUTE_UNUSED
,
647 const irange
&op1 ATTRIBUTE_UNUSED
,
653 // The default relation routines return VREL_VARYING.
656 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
657 const irange
&op1 ATTRIBUTE_UNUSED
,
658 const irange
&op2 ATTRIBUTE_UNUSED
,
659 relation_kind rel ATTRIBUTE_UNUSED
) const
665 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
666 const irange
&op1 ATTRIBUTE_UNUSED
,
667 const irange
&op2 ATTRIBUTE_UNUSED
,
668 relation_kind rel ATTRIBUTE_UNUSED
) const
674 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
) const
679 // Default is no relation affects the LHS.
682 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
683 tree type ATTRIBUTE_UNUSED
,
684 const irange
&op1_range ATTRIBUTE_UNUSED
,
685 const irange
&op2_range ATTRIBUTE_UNUSED
,
686 relation_kind rel ATTRIBUTE_UNUSED
) const
691 // Apply any known bitmask updates based on this operator.
694 range_operator::update_bitmask (irange
&, const irange
&,
695 const irange
&) const
699 // Create and return a range from a pair of wide-ints that are known
700 // to have overflowed (or underflowed).
703 value_range_from_overflowed_bounds (irange
&r
, tree type
,
704 const wide_int
&wmin
,
705 const wide_int
&wmax
)
707 const signop sgn
= TYPE_SIGN (type
);
708 const unsigned int prec
= TYPE_PRECISION (type
);
710 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
711 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
716 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
719 if (wi::cmp (tmax
, tem
, sgn
) > 0)
722 // If the anti-range would cover nothing, drop to varying.
723 // Likewise if the anti-range bounds are outside of the types
725 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
726 r
.set_varying (type
);
728 r
.set (type
, tmin
, tmax
, VR_ANTI_RANGE
);
731 // Create and return a range from a pair of wide-ints. MIN_OVF and
732 // MAX_OVF describe any overflow that might have occurred while
733 // calculating WMIN and WMAX respectively.
736 value_range_with_overflow (irange
&r
, tree type
,
737 const wide_int
&wmin
, const wide_int
&wmax
,
738 wi::overflow_type min_ovf
= wi::OVF_NONE
,
739 wi::overflow_type max_ovf
= wi::OVF_NONE
)
741 const signop sgn
= TYPE_SIGN (type
);
742 const unsigned int prec
= TYPE_PRECISION (type
);
743 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
745 // For one bit precision if max != min, then the range covers all
747 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
749 r
.set_varying (type
);
755 // If overflow wraps, truncate the values and adjust the range,
756 // kind, and bounds appropriately.
757 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
759 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
760 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
761 // If the limits are swapped, we wrapped around and cover
763 if (wi::gt_p (tmin
, tmax
, sgn
))
764 r
.set_varying (type
);
766 // No overflow or both overflow or underflow. The range
767 // kind stays normal.
768 r
.set (type
, tmin
, tmax
);
772 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
773 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
774 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
776 // Other underflow and/or overflow, drop to VR_VARYING.
777 r
.set_varying (type
);
781 // If both bounds either underflowed or overflowed, then the result
783 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
784 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
790 // If overflow does not wrap, saturate to [MIN, MAX].
791 wide_int new_lb
, new_ub
;
792 if (min_ovf
== wi::OVF_UNDERFLOW
)
793 new_lb
= wi::min_value (prec
, sgn
);
794 else if (min_ovf
== wi::OVF_OVERFLOW
)
795 new_lb
= wi::max_value (prec
, sgn
);
799 if (max_ovf
== wi::OVF_UNDERFLOW
)
800 new_ub
= wi::min_value (prec
, sgn
);
801 else if (max_ovf
== wi::OVF_OVERFLOW
)
802 new_ub
= wi::max_value (prec
, sgn
);
806 r
.set (type
, new_lb
, new_ub
);
810 // Create and return a range from a pair of wide-ints. Canonicalize
811 // the case where the bounds are swapped. In which case, we transform
812 // [10,5] into [MIN,5][10,MAX].
815 create_possibly_reversed_range (irange
&r
, tree type
,
816 const wide_int
&new_lb
, const wide_int
&new_ub
)
818 signop s
= TYPE_SIGN (type
);
819 // If the bounds are swapped, treat the result as if an overflow occurred.
820 if (wi::gt_p (new_lb
, new_ub
, s
))
821 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
823 // Otherwise it's just a normal range.
824 r
.set (type
, new_lb
, new_ub
);
827 // Return the summary information about boolean range LHS. If EMPTY/FULL,
828 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
831 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
833 // If there is no result, then this is unexecutable.
834 if (lhs
.undefined_p ())
843 // For TRUE, we can't just test for [1,1] because Ada can have
844 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
845 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
847 r
.set_varying (val_type
);
854 // ------------------------------------------------------------------------
857 operator_equal::update_bitmask (irange
&r
, const irange
&lh
,
858 const irange
&rh
) const
860 update_known_bitmask (r
, EQ_EXPR
, lh
, rh
);
863 // Check if the LHS range indicates a relation between OP1 and OP2.
866 operator_equal::op1_op2_relation (const irange
&lhs
) const
868 if (lhs
.undefined_p ())
869 return VREL_UNDEFINED
;
871 // FALSE = op1 == op2 indicates NE_EXPR.
875 // TRUE = op1 == op2 indicates EQ_EXPR.
876 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
882 operator_equal::fold_range (irange
&r
, tree type
,
885 relation_trio rel
) const
887 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
890 // We can be sure the values are always equal or not if both ranges
891 // consist of a single value, and then compare them.
892 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
893 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
895 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
896 r
= range_true (type
);
898 r
= range_false (type
);
902 // If ranges do not intersect, we know the range is not equal,
903 // otherwise we don't know anything for sure.
904 int_range_max tmp
= op1
;
906 if (tmp
.undefined_p ())
907 r
= range_false (type
);
909 r
= range_true_and_false (type
);
915 operator_equal::op1_range (irange
&r
, tree type
,
920 switch (get_bool_state (r
, lhs
, type
))
923 // If it's true, the result is the same as OP2.
928 // If the result is false, the only time we know anything is
929 // if OP2 is a constant.
930 if (!op2
.undefined_p ()
931 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
937 r
.set_varying (type
);
947 operator_equal::op2_range (irange
&r
, tree type
,
950 relation_trio rel
) const
952 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
955 // -------------------------------------------------------------------------
958 operator_not_equal::update_bitmask (irange
&r
, const irange
&lh
,
959 const irange
&rh
) const
961 update_known_bitmask (r
, NE_EXPR
, lh
, rh
);
964 // Check if the LHS range indicates a relation between OP1 and OP2.
967 operator_not_equal::op1_op2_relation (const irange
&lhs
) const
969 if (lhs
.undefined_p ())
970 return VREL_UNDEFINED
;
972 // FALSE = op1 != op2 indicates EQ_EXPR.
976 // TRUE = op1 != op2 indicates NE_EXPR.
977 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
983 operator_not_equal::fold_range (irange
&r
, tree type
,
986 relation_trio rel
) const
988 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
991 // We can be sure the values are always equal or not if both ranges
992 // consist of a single value, and then compare them.
993 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
994 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
996 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
997 r
= range_true (type
);
999 r
= range_false (type
);
1003 // If ranges do not intersect, we know the range is not equal,
1004 // otherwise we don't know anything for sure.
1005 int_range_max tmp
= op1
;
1006 tmp
.intersect (op2
);
1007 if (tmp
.undefined_p ())
1008 r
= range_true (type
);
1010 r
= range_true_and_false (type
);
1016 operator_not_equal::op1_range (irange
&r
, tree type
,
1019 relation_trio
) const
1021 switch (get_bool_state (r
, lhs
, type
))
1024 // If the result is true, the only time we know anything is if
1025 // OP2 is a constant.
1026 if (!op2
.undefined_p ()
1027 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1033 r
.set_varying (type
);
1037 // If it's false, the result is the same as OP2.
1049 operator_not_equal::op2_range (irange
&r
, tree type
,
1052 relation_trio rel
) const
1054 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1057 // (X < VAL) produces the range of [MIN, VAL - 1].
1060 build_lt (irange
&r
, tree type
, const wide_int
&val
)
1062 wi::overflow_type ov
;
1064 signop sgn
= TYPE_SIGN (type
);
1066 // Signed 1 bit cannot represent 1 for subtraction.
1068 lim
= wi::add (val
, -1, sgn
, &ov
);
1070 lim
= wi::sub (val
, 1, sgn
, &ov
);
1072 // If val - 1 underflows, check if X < MIN, which is an empty range.
1076 r
= int_range
<1> (type
, min_limit (type
), lim
);
1079 // (X <= VAL) produces the range of [MIN, VAL].
1082 build_le (irange
&r
, tree type
, const wide_int
&val
)
1084 r
= int_range
<1> (type
, min_limit (type
), val
);
1087 // (X > VAL) produces the range of [VAL + 1, MAX].
1090 build_gt (irange
&r
, tree type
, const wide_int
&val
)
1092 wi::overflow_type ov
;
1094 signop sgn
= TYPE_SIGN (type
);
1096 // Signed 1 bit cannot represent 1 for addition.
1098 lim
= wi::sub (val
, -1, sgn
, &ov
);
1100 lim
= wi::add (val
, 1, sgn
, &ov
);
1101 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1105 r
= int_range
<1> (type
, lim
, max_limit (type
));
1108 // (X >= val) produces the range of [VAL, MAX].
1111 build_ge (irange
&r
, tree type
, const wide_int
&val
)
1113 r
= int_range
<1> (type
, val
, max_limit (type
));
1118 operator_lt::update_bitmask (irange
&r
, const irange
&lh
,
1119 const irange
&rh
) const
1121 update_known_bitmask (r
, LT_EXPR
, lh
, rh
);
1124 // Check if the LHS range indicates a relation between OP1 and OP2.
1127 operator_lt::op1_op2_relation (const irange
&lhs
) const
1129 if (lhs
.undefined_p ())
1130 return VREL_UNDEFINED
;
1132 // FALSE = op1 < op2 indicates GE_EXPR.
1136 // TRUE = op1 < op2 indicates LT_EXPR.
1137 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
1139 return VREL_VARYING
;
1143 operator_lt::fold_range (irange
&r
, tree type
,
1146 relation_trio rel
) const
1148 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
1151 signop sign
= TYPE_SIGN (op1
.type ());
1152 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1154 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1155 r
= range_true (type
);
1156 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1157 r
= range_false (type
);
1158 // Use nonzero bits to determine if < 0 is false.
1159 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
1160 r
= range_false (type
);
1162 r
= range_true_and_false (type
);
1167 operator_lt::op1_range (irange
&r
, tree type
,
1170 relation_trio
) const
1172 if (op2
.undefined_p ())
1175 switch (get_bool_state (r
, lhs
, type
))
1178 build_lt (r
, type
, op2
.upper_bound ());
1182 build_ge (r
, type
, op2
.lower_bound ());
1192 operator_lt::op2_range (irange
&r
, tree type
,
1195 relation_trio
) const
1197 if (op1
.undefined_p ())
1200 switch (get_bool_state (r
, lhs
, type
))
1203 build_gt (r
, type
, op1
.lower_bound ());
1207 build_le (r
, type
, op1
.upper_bound ());
1218 operator_le::update_bitmask (irange
&r
, const irange
&lh
,
1219 const irange
&rh
) const
1221 update_known_bitmask (r
, LE_EXPR
, lh
, rh
);
1224 // Check if the LHS range indicates a relation between OP1 and OP2.
1227 operator_le::op1_op2_relation (const irange
&lhs
) const
1229 if (lhs
.undefined_p ())
1230 return VREL_UNDEFINED
;
1232 // FALSE = op1 <= op2 indicates GT_EXPR.
1236 // TRUE = op1 <= op2 indicates LE_EXPR.
1237 if (lhs
.undefined_p () || !contains_zero_p (lhs
))
1239 return VREL_VARYING
;
1243 operator_le::fold_range (irange
&r
, tree type
,
1246 relation_trio rel
) const
1248 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
1251 signop sign
= TYPE_SIGN (op1
.type ());
1252 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1254 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1255 r
= range_true (type
);
1256 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1257 r
= range_false (type
);
1259 r
= range_true_and_false (type
);
1264 operator_le::op1_range (irange
&r
, tree type
,
1267 relation_trio
) const
1269 if (op2
.undefined_p ())
1272 switch (get_bool_state (r
, lhs
, type
))
1275 build_le (r
, type
, op2
.upper_bound ());
1279 build_gt (r
, type
, op2
.lower_bound ());
1289 operator_le::op2_range (irange
&r
, tree type
,
1292 relation_trio
) const
1294 if (op1
.undefined_p ())
1297 switch (get_bool_state (r
, lhs
, type
))
1300 build_ge (r
, type
, op1
.lower_bound ());
1304 build_lt (r
, type
, op1
.upper_bound ());
1315 operator_gt::update_bitmask (irange
&r
, const irange
&lh
,
1316 const irange
&rh
) const
1318 update_known_bitmask (r
, GT_EXPR
, lh
, rh
);
1321 // Check if the LHS range indicates a relation between OP1 and OP2.
1324 operator_gt::op1_op2_relation (const irange
&lhs
) const
1326 if (lhs
.undefined_p ())
1327 return VREL_UNDEFINED
;
1329 // FALSE = op1 > op2 indicates LE_EXPR.
1333 // TRUE = op1 > op2 indicates GT_EXPR.
1334 if (!contains_zero_p (lhs
))
1336 return VREL_VARYING
;
1340 operator_gt::fold_range (irange
&r
, tree type
,
1341 const irange
&op1
, const irange
&op2
,
1342 relation_trio rel
) const
1344 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1347 signop sign
= TYPE_SIGN (op1
.type ());
1348 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1350 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1351 r
= range_true (type
);
1352 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1353 r
= range_false (type
);
1355 r
= range_true_and_false (type
);
1360 operator_gt::op1_range (irange
&r
, tree type
,
1361 const irange
&lhs
, const irange
&op2
,
1362 relation_trio
) const
1364 if (op2
.undefined_p ())
1367 switch (get_bool_state (r
, lhs
, type
))
1370 build_gt (r
, type
, op2
.lower_bound ());
1374 build_le (r
, type
, op2
.upper_bound ());
1384 operator_gt::op2_range (irange
&r
, tree type
,
1387 relation_trio
) const
1389 if (op1
.undefined_p ())
1392 switch (get_bool_state (r
, lhs
, type
))
1395 build_lt (r
, type
, op1
.upper_bound ());
1399 build_ge (r
, type
, op1
.lower_bound ());
1410 operator_ge::update_bitmask (irange
&r
, const irange
&lh
,
1411 const irange
&rh
) const
1413 update_known_bitmask (r
, GE_EXPR
, lh
, rh
);
1416 // Check if the LHS range indicates a relation between OP1 and OP2.
1419 operator_ge::op1_op2_relation (const irange
&lhs
) const
1421 if (lhs
.undefined_p ())
1422 return VREL_UNDEFINED
;
1424 // FALSE = op1 >= op2 indicates LT_EXPR.
1428 // TRUE = op1 >= op2 indicates GE_EXPR.
1429 if (!contains_zero_p (lhs
))
1431 return VREL_VARYING
;
1435 operator_ge::fold_range (irange
&r
, tree type
,
1438 relation_trio rel
) const
1440 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1443 signop sign
= TYPE_SIGN (op1
.type ());
1444 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1446 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1447 r
= range_true (type
);
1448 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1449 r
= range_false (type
);
1451 r
= range_true_and_false (type
);
1456 operator_ge::op1_range (irange
&r
, tree type
,
1459 relation_trio
) const
1461 if (op2
.undefined_p ())
1464 switch (get_bool_state (r
, lhs
, type
))
1467 build_ge (r
, type
, op2
.lower_bound ());
1471 build_lt (r
, type
, op2
.upper_bound ());
1481 operator_ge::op2_range (irange
&r
, tree type
,
1484 relation_trio
) const
1486 if (op1
.undefined_p ())
1489 switch (get_bool_state (r
, lhs
, type
))
1492 build_le (r
, type
, op1
.upper_bound ());
1496 build_gt (r
, type
, op1
.lower_bound ());
1507 operator_plus::update_bitmask (irange
&r
, const irange
&lh
,
1508 const irange
&rh
) const
1510 update_known_bitmask (r
, PLUS_EXPR
, lh
, rh
);
1513 // Check to see if the range of OP2 indicates anything about the relation
1514 // between LHS and OP1.
1517 operator_plus::lhs_op1_relation (const irange
&lhs
,
1520 relation_kind
) const
1522 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1523 return VREL_VARYING
;
1525 tree type
= lhs
.type ();
1526 unsigned prec
= TYPE_PRECISION (type
);
1527 wi::overflow_type ovf1
, ovf2
;
1528 signop sign
= TYPE_SIGN (type
);
1530 // LHS = OP1 + 0 indicates LHS == OP1.
1534 if (TYPE_OVERFLOW_WRAPS (type
))
1536 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1537 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1540 ovf1
= ovf2
= wi::OVF_NONE
;
1542 // Never wrapping additions.
1545 // Positive op2 means lhs > op1.
1546 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1548 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1551 // Negative op2 means lhs < op1.
1552 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1554 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1557 // Always wrapping additions.
1558 else if (ovf1
&& ovf1
== ovf2
)
1560 // Positive op2 means lhs < op1.
1561 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1563 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1566 // Negative op2 means lhs > op1.
1567 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1569 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1573 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1574 if (!range_includes_zero_p (&op2
))
1577 return VREL_VARYING
;
1580 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1584 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1585 const irange
&op2
, relation_kind rel
) const
1587 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1591 operator_plus::wi_fold (irange
&r
, tree type
,
1592 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1593 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1595 wi::overflow_type ov_lb
, ov_ub
;
1596 signop s
= TYPE_SIGN (type
);
1597 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1598 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1599 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1602 // Given addition or subtraction, determine the possible NORMAL ranges and
1603 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1604 // Return the relation that exists between the LHS and OP1 in order for the
1605 // NORMAL range to apply.
1606 // a return value of VREL_VARYING means no ranges were applicable.
1608 static relation_kind
1609 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1612 relation_kind kind
= VREL_VARYING
;
1613 // For now, only deal with constant adds. This could be extended to ranges
1614 // when someone is so motivated.
1615 if (!offset
.singleton_p () || offset
.zero_p ())
1618 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1619 wide_int off
= offset
.lower_bound ();
1620 if (wi::neg_p (off
, SIGNED
))
1623 off
= wi::neg (off
);
1626 wi::overflow_type ov
;
1627 tree type
= offset
.type ();
1628 unsigned prec
= TYPE_PRECISION (type
);
1631 // calculate the normal range and relation for the operation.
1635 lb
= wi::zero (prec
);
1636 ub
= wi::sub (irange_val_max (type
), off
, UNSIGNED
, &ov
);
1643 ub
= irange_val_max (type
);
1646 int_range
<2> normal_range (type
, lb
, ub
);
1647 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1650 r_normal
= normal_range
;
1654 // Once op1 has been calculated by operator_plus or operator_minus, check
1655 // to see if the relation passed causes any part of the calculation to
1656 // be not possible. ie
1657 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1658 // and that further refines a_2 to [0, 0].
1659 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1660 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1661 // MINUS. IF any adjustment can be made, R will reflect it.
1664 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1667 if (r
.undefined_p ())
1669 tree type
= r
.type ();
1670 // Check for unsigned overflow and calculate the overflow part.
1671 signop s
= TYPE_SIGN (type
);
1672 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1675 // Only work with <, <=, >, >= relations.
1676 if (!relation_lt_le_gt_ge_p (rel
))
1679 // Get the ranges for this offset.
1680 int_range_max normal
, overflow
;
1681 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1683 // VREL_VARYING means there are no adjustments.
1684 if (k
== VREL_VARYING
)
1687 // If the relations match use the normal range, otherwise use overflow range.
1688 if (relation_intersect (k
, rel
) == k
)
1689 r
.intersect (normal
);
1691 r
.intersect (overflow
);
1696 operator_plus::op1_range (irange
&r
, tree type
,
1699 relation_trio trio
) const
1701 if (lhs
.undefined_p ())
1703 // Start with the default operation.
1704 range_op_handler
minus (MINUS_EXPR
);
1707 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1708 relation_kind rel
= trio
.lhs_op1 ();
1709 // Check for a relation refinement.
1711 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1716 operator_plus::op2_range (irange
&r
, tree type
,
1719 relation_trio rel
) const
1721 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1724 class operator_widen_plus_signed
: public range_operator
1727 virtual void wi_fold (irange
&r
, tree type
,
1728 const wide_int
&lh_lb
,
1729 const wide_int
&lh_ub
,
1730 const wide_int
&rh_lb
,
1731 const wide_int
&rh_ub
) const;
1732 } op_widen_plus_signed
;
1735 operator_widen_plus_signed::wi_fold (irange
&r
, tree type
,
1736 const wide_int
&lh_lb
,
1737 const wide_int
&lh_ub
,
1738 const wide_int
&rh_lb
,
1739 const wide_int
&rh_ub
) const
1741 wi::overflow_type ov_lb
, ov_ub
;
1742 signop s
= TYPE_SIGN (type
);
1745 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
1747 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
1748 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1749 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1751 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1752 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1754 r
= int_range
<2> (type
, new_lb
, new_ub
);
1757 class operator_widen_plus_unsigned
: public range_operator
1760 virtual void wi_fold (irange
&r
, tree type
,
1761 const wide_int
&lh_lb
,
1762 const wide_int
&lh_ub
,
1763 const wide_int
&rh_lb
,
1764 const wide_int
&rh_ub
) const;
1765 } op_widen_plus_unsigned
;
1768 operator_widen_plus_unsigned::wi_fold (irange
&r
, tree type
,
1769 const wide_int
&lh_lb
,
1770 const wide_int
&lh_ub
,
1771 const wide_int
&rh_lb
,
1772 const wide_int
&rh_ub
) const
1774 wi::overflow_type ov_lb
, ov_ub
;
1775 signop s
= TYPE_SIGN (type
);
1778 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
1780 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
1781 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1782 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1784 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1785 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1787 r
= int_range
<2> (type
, new_lb
, new_ub
);
1791 operator_minus::update_bitmask (irange
&r
, const irange
&lh
,
1792 const irange
&rh
) const
1794 update_known_bitmask (r
, MINUS_EXPR
, lh
, rh
);
1798 operator_minus::wi_fold (irange
&r
, tree type
,
1799 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1800 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1802 wi::overflow_type ov_lb
, ov_ub
;
1803 signop s
= TYPE_SIGN (type
);
1804 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
1805 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
1806 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1810 // Return the relation between LHS and OP1 based on the relation between
1814 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
1815 const irange
&, relation_kind rel
) const
1817 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
1826 return VREL_VARYING
;
1829 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1830 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1831 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1834 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1835 const irange
&op1_range ATTRIBUTE_UNUSED
,
1836 const irange
&op2_range ATTRIBUTE_UNUSED
,
1839 if (rel
== VREL_VARYING
)
1842 int_range
<2> rel_range
;
1843 unsigned prec
= TYPE_PRECISION (type
);
1844 signop sgn
= TYPE_SIGN (type
);
1846 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1848 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
1849 else if (rel
== VREL_NE
)
1850 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1852 else if (TYPE_OVERFLOW_WRAPS (type
))
1856 // For wrapping signed values and unsigned, if op1 > op2 or
1857 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1860 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
1871 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1873 rel_range
= int_range
<2> (type
, wi::one (prec
),
1874 wi::max_value (prec
, sgn
));
1876 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1878 rel_range
= int_range
<2> (type
, wi::zero (prec
),
1879 wi::max_value (prec
, sgn
));
1881 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1883 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1884 wi::minus_one (prec
));
1886 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1888 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
1895 lhs_range
.intersect (rel_range
);
1900 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
1901 const irange
&op1_range
,
1902 const irange
&op2_range
,
1903 relation_kind rel
) const
1905 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
1910 operator_minus::op1_range (irange
&r
, tree type
,
1913 relation_trio trio
) const
1915 if (lhs
.undefined_p ())
1917 // Start with the default operation.
1918 range_op_handler
minus (PLUS_EXPR
);
1921 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1922 relation_kind rel
= trio
.lhs_op1 ();
1924 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
1930 operator_minus::op2_range (irange
&r
, tree type
,
1933 relation_trio
) const
1935 if (lhs
.undefined_p ())
1937 return fold_range (r
, type
, op1
, lhs
);
1941 operator_min::update_bitmask (irange
&r
, const irange
&lh
,
1942 const irange
&rh
) const
1944 update_known_bitmask (r
, MIN_EXPR
, lh
, rh
);
1948 operator_min::wi_fold (irange
&r
, tree type
,
1949 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1950 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1952 signop s
= TYPE_SIGN (type
);
1953 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1954 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1955 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1960 operator_max::update_bitmask (irange
&r
, const irange
&lh
,
1961 const irange
&rh
) const
1963 update_known_bitmask (r
, MAX_EXPR
, lh
, rh
);
1967 operator_max::wi_fold (irange
&r
, tree type
,
1968 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1969 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1971 signop s
= TYPE_SIGN (type
);
1972 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1973 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1974 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1978 // Calculate the cross product of two sets of ranges and return it.
1980 // Multiplications, divisions and shifts are a bit tricky to handle,
1981 // depending on the mix of signs we have in the two ranges, we need to
1982 // operate on different values to get the minimum and maximum values
1983 // for the new range. One approach is to figure out all the
1984 // variations of range combinations and do the operations.
1986 // However, this involves several calls to compare_values and it is
1987 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1988 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1989 // figure the smallest and largest values to form the new range.
1992 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1993 const wide_int
&lh_lb
,
1994 const wide_int
&lh_ub
,
1995 const wide_int
&rh_lb
,
1996 const wide_int
&rh_ub
) const
1998 wide_int cp1
, cp2
, cp3
, cp4
;
1999 // Default to varying.
2000 r
.set_varying (type
);
2002 // Compute the 4 cross operations, bailing if we get an overflow we
2004 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
2006 if (wi::eq_p (lh_lb
, lh_ub
))
2008 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
2010 if (wi::eq_p (rh_lb
, rh_ub
))
2012 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
2014 if (wi::eq_p (lh_lb
, lh_ub
))
2016 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
2020 signop sign
= TYPE_SIGN (type
);
2021 if (wi::gt_p (cp1
, cp2
, sign
))
2022 std::swap (cp1
, cp2
);
2023 if (wi::gt_p (cp3
, cp4
, sign
))
2024 std::swap (cp3
, cp4
);
2026 // Choose min and max from the ordered pairs.
2027 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
2028 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
2029 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2034 operator_mult::update_bitmask (irange
&r
, const irange
&lh
,
2035 const irange
&rh
) const
2037 update_known_bitmask (r
, MULT_EXPR
, lh
, rh
);
2041 operator_mult::op1_range (irange
&r
, tree type
,
2042 const irange
&lhs
, const irange
&op2
,
2043 relation_trio
) const
2045 if (lhs
.undefined_p ())
2048 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2049 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2050 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2051 if (TYPE_OVERFLOW_WRAPS (type
))
2055 if (op2
.singleton_p (offset
) && offset
!= 0)
2056 return range_op_handler (TRUNC_DIV_EXPR
).fold_range (r
, type
, lhs
, op2
);
2061 operator_mult::op2_range (irange
&r
, tree type
,
2062 const irange
&lhs
, const irange
&op1
,
2063 relation_trio rel
) const
2065 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
2069 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
2070 const wide_int
&w0
, const wide_int
&w1
) const
2072 wi::overflow_type overflow
= wi::OVF_NONE
;
2073 signop sign
= TYPE_SIGN (type
);
2074 res
= wi::mul (w0
, w1
, sign
, &overflow
);
2075 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2077 // For multiplication, the sign of the overflow is given
2078 // by the comparison of the signs of the operands.
2079 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
2080 res
= wi::max_value (w0
.get_precision (), sign
);
2082 res
= wi::min_value (w0
.get_precision (), sign
);
2089 operator_mult::wi_fold (irange
&r
, tree type
,
2090 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2091 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2093 if (TYPE_OVERFLOW_UNDEFINED (type
))
2095 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2099 // Multiply the ranges when overflow wraps. This is basically fancy
2100 // code so we don't drop to varying with an unsigned
2103 // This test requires 2*prec bits if both operands are signed and
2104 // 2*prec + 2 bits if either is not. Therefore, extend the values
2105 // using the sign of the result to PREC2. From here on out,
2106 // everything is just signed math no matter what the input types
2109 signop sign
= TYPE_SIGN (type
);
2110 unsigned prec
= TYPE_PRECISION (type
);
2111 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
2112 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
2113 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
2114 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
2115 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
2116 widest2_int size
= sizem1
+ 1;
2118 // Canonicalize the intervals.
2119 if (sign
== UNSIGNED
)
2121 if (wi::ltu_p (size
, min0
+ max0
))
2126 if (wi::ltu_p (size
, min1
+ max1
))
2133 // Sort the 4 products so that min is in prod0 and max is in
2135 widest2_int prod0
= min0
* min1
;
2136 widest2_int prod1
= min0
* max1
;
2137 widest2_int prod2
= max0
* min1
;
2138 widest2_int prod3
= max0
* max1
;
2140 // min0min1 > max0max1
2142 std::swap (prod0
, prod3
);
2144 // min0max1 > max0min1
2146 std::swap (prod1
, prod2
);
2149 std::swap (prod0
, prod1
);
2152 std::swap (prod2
, prod3
);
2155 prod2
= prod3
- prod0
;
2156 if (wi::geu_p (prod2
, sizem1
))
2158 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2159 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
2160 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
2162 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
2164 zero
.set_zero (type
);
2168 // The range covers all values.
2169 r
.set_varying (type
);
2173 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
2174 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
2175 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
2179 class operator_widen_mult_signed
: public range_operator
2182 virtual void wi_fold (irange
&r
, tree type
,
2183 const wide_int
&lh_lb
,
2184 const wide_int
&lh_ub
,
2185 const wide_int
&rh_lb
,
2186 const wide_int
&rh_ub
)
2188 } op_widen_mult_signed
;
2191 operator_widen_mult_signed::wi_fold (irange
&r
, tree type
,
2192 const wide_int
&lh_lb
,
2193 const wide_int
&lh_ub
,
2194 const wide_int
&rh_lb
,
2195 const wide_int
&rh_ub
) const
2197 signop s
= TYPE_SIGN (type
);
2199 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
2200 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
2201 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2202 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2204 /* We don't expect a widening multiplication to be able to overflow but range
2205 calculations for multiplications are complicated. After widening the
2206 operands lets call the base class. */
2207 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2211 class operator_widen_mult_unsigned
: public range_operator
2214 virtual void wi_fold (irange
&r
, tree type
,
2215 const wide_int
&lh_lb
,
2216 const wide_int
&lh_ub
,
2217 const wide_int
&rh_lb
,
2218 const wide_int
&rh_ub
)
2220 } op_widen_mult_unsigned
;
2223 operator_widen_mult_unsigned::wi_fold (irange
&r
, tree type
,
2224 const wide_int
&lh_lb
,
2225 const wide_int
&lh_ub
,
2226 const wide_int
&rh_lb
,
2227 const wide_int
&rh_ub
) const
2229 signop s
= TYPE_SIGN (type
);
2231 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
2232 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
2233 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2234 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2236 /* We don't expect a widening multiplication to be able to overflow but range
2237 calculations for multiplications are complicated. After widening the
2238 operands lets call the base class. */
2239 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2242 class operator_div
: public cross_product_operator
2245 operator_div (tree_code div_kind
) { m_code
= div_kind
; }
2246 virtual void wi_fold (irange
&r
, tree type
,
2247 const wide_int
&lh_lb
,
2248 const wide_int
&lh_ub
,
2249 const wide_int
&rh_lb
,
2250 const wide_int
&rh_ub
) const final override
;
2251 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
2252 const wide_int
&, const wide_int
&)
2253 const final override
;
2254 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
2255 { update_known_bitmask (r
, m_code
, lh
, rh
); }
2260 static operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
2261 static operator_div
op_floor_div (FLOOR_DIV_EXPR
);
2262 static operator_div
op_round_div (ROUND_DIV_EXPR
);
2263 static operator_div
op_ceil_div (CEIL_DIV_EXPR
);
2266 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2267 const wide_int
&w0
, const wide_int
&w1
) const
2272 wi::overflow_type overflow
= wi::OVF_NONE
;
2273 signop sign
= TYPE_SIGN (type
);
2277 case EXACT_DIV_EXPR
:
2278 case TRUNC_DIV_EXPR
:
2279 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2281 case FLOOR_DIV_EXPR
:
2282 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2284 case ROUND_DIV_EXPR
:
2285 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2288 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2294 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2296 // For division, the only case is -INF / -1 = +INF.
2297 res
= wi::max_value (w0
.get_precision (), sign
);
2304 operator_div::wi_fold (irange
&r
, tree type
,
2305 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2306 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2308 const wide_int dividend_min
= lh_lb
;
2309 const wide_int dividend_max
= lh_ub
;
2310 const wide_int divisor_min
= rh_lb
;
2311 const wide_int divisor_max
= rh_ub
;
2312 signop sign
= TYPE_SIGN (type
);
2313 unsigned prec
= TYPE_PRECISION (type
);
2314 wide_int extra_min
, extra_max
;
2316 // If we know we won't divide by zero, just do the division.
2317 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2319 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2320 divisor_min
, divisor_max
);
2324 // If we're definitely dividing by zero, there's nothing to do.
2325 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2331 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2332 // skip any division by zero.
2334 // First divide by the negative numbers, if any.
2335 if (wi::neg_p (divisor_min
, sign
))
2336 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2337 divisor_min
, wi::minus_one (prec
));
2341 // Then divide by the non-zero positive numbers, if any.
2342 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2345 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2346 wi::one (prec
), divisor_max
);
2349 // We shouldn't still have undefined here.
2350 gcc_checking_assert (!r
.undefined_p ());
2354 class operator_exact_divide
: public operator_div
2356 using range_operator::op1_range
;
2358 operator_exact_divide () : operator_div (EXACT_DIV_EXPR
) { }
2359 virtual bool op1_range (irange
&r
, tree type
,
2362 relation_trio
) const;
2367 operator_exact_divide::op1_range (irange
&r
, tree type
,
2370 relation_trio
) const
2372 if (lhs
.undefined_p ())
2375 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2376 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2377 // We wont bother trying to enumerate all the in between stuff :-P
2378 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2379 // the time however.
2380 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2381 if (op2
.singleton_p (offset
) && offset
!= 0)
2382 return range_op_handler (MULT_EXPR
).fold_range (r
, type
, lhs
, op2
);
2387 class operator_lshift
: public cross_product_operator
2389 using range_operator::fold_range
;
2390 using range_operator::op1_range
;
2392 virtual bool op1_range (irange
&r
, tree type
,
2395 relation_trio rel
= TRIO_VARYING
) const;
2396 virtual bool fold_range (irange
&r
, tree type
,
2399 relation_trio rel
= TRIO_VARYING
) const;
2401 virtual void wi_fold (irange
&r
, tree type
,
2402 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2403 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2404 virtual bool wi_op_overflows (wide_int
&res
,
2407 const wide_int
&) const;
2408 void update_bitmask (irange
&r
, const irange
&lh
,
2409 const irange
&rh
) const final override
2410 { update_known_bitmask (r
, LSHIFT_EXPR
, lh
, rh
); }
2413 class operator_rshift
: public cross_product_operator
2415 using range_operator::fold_range
;
2416 using range_operator::op1_range
;
2417 using range_operator::lhs_op1_relation
;
2419 virtual bool fold_range (irange
&r
, tree type
,
2422 relation_trio rel
= TRIO_VARYING
) const;
2423 virtual void wi_fold (irange
&r
, tree type
,
2424 const wide_int
&lh_lb
,
2425 const wide_int
&lh_ub
,
2426 const wide_int
&rh_lb
,
2427 const wide_int
&rh_ub
) const;
2428 virtual bool wi_op_overflows (wide_int
&res
,
2431 const wide_int
&w1
) const;
2432 virtual bool op1_range (irange
&, tree type
,
2435 relation_trio rel
= TRIO_VARYING
) const;
2436 virtual relation_kind
lhs_op1_relation (const irange
&lhs
,
2439 relation_kind rel
) const;
2440 void update_bitmask (irange
&r
, const irange
&lh
,
2441 const irange
&rh
) const final override
2442 { update_known_bitmask (r
, RSHIFT_EXPR
, lh
, rh
); }
2447 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2450 relation_kind
) const
2452 // If both operands range are >= 0, then the LHS <= op1.
2453 if (!op1
.undefined_p () && !op2
.undefined_p ()
2454 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2455 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2457 return VREL_VARYING
;
2461 operator_lshift::fold_range (irange
&r
, tree type
,
2464 relation_trio rel
) const
2466 int_range_max shift_range
;
2467 if (!get_shift_range (shift_range
, type
, op2
))
2469 if (op2
.undefined_p ())
2476 // Transform left shifts by constants into multiplies.
2477 if (shift_range
.singleton_p ())
2479 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2480 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2481 int_range
<1> mult (type
, tmp
, tmp
);
2483 // Force wrapping multiplication.
2484 bool saved_flag_wrapv
= flag_wrapv
;
2485 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2487 flag_wrapv_pointer
= 1;
2488 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2489 flag_wrapv
= saved_flag_wrapv
;
2490 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2494 // Otherwise, invoke the generic fold routine.
2495 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2499 operator_lshift::wi_fold (irange
&r
, tree type
,
2500 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2501 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2503 signop sign
= TYPE_SIGN (type
);
2504 unsigned prec
= TYPE_PRECISION (type
);
2505 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2506 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2507 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2508 // overflow. However, for that to happen, rh.max needs to be zero,
2509 // which means rh is a singleton range of zero, which means we simply return
2510 // [lh_lb, lh_ub] as the range.
2511 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2513 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2517 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2518 wide_int complement
= ~(bound
- 1);
2519 wide_int low_bound
, high_bound
;
2520 bool in_bounds
= false;
2522 if (sign
== UNSIGNED
)
2525 high_bound
= complement
;
2526 if (wi::ltu_p (lh_ub
, low_bound
))
2528 // [5, 6] << [1, 2] == [10, 24].
2529 // We're shifting out only zeroes, the value increases
2533 else if (wi::ltu_p (high_bound
, lh_lb
))
2535 // [0xffffff00, 0xffffffff] << [1, 2]
2536 // == [0xfffffc00, 0xfffffffe].
2537 // We're shifting out only ones, the value decreases
2544 // [-1, 1] << [1, 2] == [-4, 4]
2545 low_bound
= complement
;
2547 if (wi::lts_p (lh_ub
, high_bound
)
2548 && wi::lts_p (low_bound
, lh_lb
))
2550 // For non-negative numbers, we're shifting out only zeroes,
2551 // the value increases monotonically. For negative numbers,
2552 // we're shifting out only ones, the value decreases
2559 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2561 r
.set_varying (type
);
2565 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2566 const wide_int
&w0
, const wide_int
&w1
) const
2568 signop sign
= TYPE_SIGN (type
);
2571 // It's unclear from the C standard whether shifts can overflow.
2572 // The following code ignores overflow; perhaps a C standard
2573 // interpretation ruling is needed.
2574 res
= wi::rshift (w0
, -w1
, sign
);
2577 res
= wi::lshift (w0
, w1
);
2582 operator_lshift::op1_range (irange
&r
,
2586 relation_trio
) const
2588 if (lhs
.undefined_p ())
2591 if (!contains_zero_p (lhs
))
2592 r
.set_nonzero (type
);
2594 r
.set_varying (type
);
2597 if (op2
.singleton_p (shift
))
2599 if (wi::lt_p (shift
, 0, SIGNED
))
2601 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2602 TYPE_PRECISION (op2
.type ())),
2611 // Work completely in unsigned mode to start.
2613 int_range_max tmp_range
;
2614 if (TYPE_SIGN (type
) == SIGNED
)
2616 int_range_max tmp
= lhs
;
2617 utype
= unsigned_type_for (type
);
2618 range_cast (tmp
, utype
);
2619 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2622 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2624 // Start with ranges which can produce the LHS by right shifting the
2625 // result by the shift amount.
2626 // ie [0x08, 0xF0] = op1 << 2 will start with
2627 // [00001000, 11110000] = op1 << 2
2628 // [0x02, 0x4C] aka [00000010, 00111100]
2630 // Then create a range from the LB with the least significant upper bit
2631 // set, to the upper bound with all the bits set.
2632 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2634 // Ideally we do this for each subrange, but just lump them all for now.
2635 unsigned low_bits
= TYPE_PRECISION (utype
) - shift
.to_uhwi ();
2636 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2637 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2638 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2639 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2640 tmp_range
.union_ (fill_range
);
2643 range_cast (tmp_range
, type
);
2645 r
.intersect (tmp_range
);
2649 return !r
.varying_p ();
2653 operator_rshift::op1_range (irange
&r
,
2657 relation_trio
) const
2659 if (lhs
.undefined_p ())
2662 if (op2
.singleton_p (shift
))
2664 // Ignore nonsensical shifts.
2665 unsigned prec
= TYPE_PRECISION (type
);
2666 if (wi::ge_p (shift
,
2667 wi::uhwi (prec
, TYPE_PRECISION (op2
.type ())),
2676 // Folding the original operation may discard some impossible
2677 // ranges from the LHS.
2678 int_range_max lhs_refined
;
2679 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2680 lhs_refined
.intersect (lhs
);
2681 if (lhs_refined
.undefined_p ())
2686 int_range_max
shift_range (op2
.type (), shift
, shift
);
2687 int_range_max lb
, ub
;
2688 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2690 // 0000 0111 = OP1 >> 3
2692 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2693 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2694 // right hand side (0x07).
2695 wide_int mask
= wi::bit_not (wi::lshift (wi::minus_one (prec
), shift
));
2696 int_range_max
mask_range (type
,
2697 wi::zero (TYPE_PRECISION (type
)),
2699 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2702 if (!contains_zero_p (lhs_refined
))
2704 mask_range
.invert ();
2705 r
.intersect (mask_range
);
2713 operator_rshift::wi_op_overflows (wide_int
&res
,
2716 const wide_int
&w1
) const
2718 signop sign
= TYPE_SIGN (type
);
2720 res
= wi::lshift (w0
, -w1
);
2723 // It's unclear from the C standard whether shifts can overflow.
2724 // The following code ignores overflow; perhaps a C standard
2725 // interpretation ruling is needed.
2726 res
= wi::rshift (w0
, w1
, sign
);
2732 operator_rshift::fold_range (irange
&r
, tree type
,
2735 relation_trio rel
) const
2737 int_range_max shift
;
2738 if (!get_shift_range (shift
, type
, op2
))
2740 if (op2
.undefined_p ())
2747 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2751 operator_rshift::wi_fold (irange
&r
, tree type
,
2752 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2753 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2755 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2759 // Add a partial equivalence between the LHS and op1 for casts.
2762 operator_cast::lhs_op1_relation (const irange
&lhs
,
2764 const irange
&op2 ATTRIBUTE_UNUSED
,
2765 relation_kind
) const
2767 if (lhs
.undefined_p () || op1
.undefined_p ())
2768 return VREL_VARYING
;
2769 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2770 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2771 // If the result gets sign extended into a larger type check first if this
2772 // qualifies as a partial equivalence.
2773 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2775 // If the result is sign extended, and the LHS is larger than op1,
2776 // check if op1's range can be negative as the sign extension will
2777 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2778 int_range
<3> negs
= range_negatives (op1
.type ());
2779 negs
.intersect (op1
);
2780 if (!negs
.undefined_p ())
2781 return VREL_VARYING
;
2784 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2785 return bits_to_pe (prec
);
2788 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2791 operator_cast::truncating_cast_p (const irange
&inner
,
2792 const irange
&outer
) const
2794 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2797 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2800 operator_cast::inside_domain_p (const wide_int
&min
,
2801 const wide_int
&max
,
2802 const irange
&range
) const
2804 wide_int domain_min
= irange_val_min (range
.type ());
2805 wide_int domain_max
= irange_val_max (range
.type ());
2806 signop domain_sign
= TYPE_SIGN (range
.type ());
2807 return (wi::le_p (min
, domain_max
, domain_sign
)
2808 && wi::le_p (max
, domain_max
, domain_sign
)
2809 && wi::ge_p (min
, domain_min
, domain_sign
)
2810 && wi::ge_p (max
, domain_min
, domain_sign
));
2814 // Helper for fold_range which work on a pair at a time.
2817 operator_cast::fold_pair (irange
&r
, unsigned index
,
2818 const irange
&inner
,
2819 const irange
&outer
) const
2821 tree inner_type
= inner
.type ();
2822 tree outer_type
= outer
.type ();
2823 signop inner_sign
= TYPE_SIGN (inner_type
);
2824 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2826 // check to see if casting from INNER to OUTER is a conversion that
2827 // fits in the resulting OUTER type.
2828 wide_int inner_lb
= inner
.lower_bound (index
);
2829 wide_int inner_ub
= inner
.upper_bound (index
);
2830 if (truncating_cast_p (inner
, outer
))
2832 // We may be able to accommodate a truncating cast if the
2833 // resulting range can be represented in the target type...
2834 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
2835 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
2838 r
.set_varying (outer_type
);
2842 // ...but we must still verify that the final range fits in the
2843 // domain. This catches -fstrict-enum restrictions where the domain
2844 // range is smaller than what fits in the underlying type.
2845 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
2846 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
2847 if (inside_domain_p (min
, max
, outer
))
2848 create_possibly_reversed_range (r
, outer_type
, min
, max
);
2850 r
.set_varying (outer_type
);
2855 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2856 const irange
&inner
,
2857 const irange
&outer
,
2858 relation_trio
) const
2860 if (empty_range_varying (r
, type
, inner
, outer
))
2863 gcc_checking_assert (outer
.varying_p ());
2864 gcc_checking_assert (inner
.num_pairs () > 0);
2866 // Avoid a temporary by folding the first pair directly into the result.
2867 fold_pair (r
, 0, inner
, outer
);
2869 // Then process any additional pairs by unioning with their results.
2870 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
2873 fold_pair (tmp
, x
, inner
, outer
);
2879 // Update the nonzero mask. Truncating casts are problematic unless
2880 // the conversion fits in the resulting outer type.
2881 wide_int nz
= inner
.get_nonzero_bits ();
2882 if (truncating_cast_p (inner
, outer
)
2883 && wi::rshift (nz
, wi::uhwi (TYPE_PRECISION (outer
.type ()),
2884 TYPE_PRECISION (inner
.type ())),
2885 TYPE_SIGN (inner
.type ())) != 0)
2887 nz
= wide_int::from (nz
, TYPE_PRECISION (type
), TYPE_SIGN (inner
.type ()));
2888 r
.set_nonzero_bits (nz
);
2894 operator_cast::op1_range (irange
&r
, tree type
,
2897 relation_trio
) const
2899 if (lhs
.undefined_p ())
2901 tree lhs_type
= lhs
.type ();
2902 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
2904 // If we are calculating a pointer, shortcut to what we really care about.
2905 if (POINTER_TYPE_P (type
))
2907 // Conversion from other pointers or a constant (including 0/NULL)
2908 // are straightforward.
2909 if (POINTER_TYPE_P (lhs
.type ())
2910 || (lhs
.singleton_p ()
2911 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
2914 range_cast (r
, type
);
2918 // If the LHS is not a pointer nor a singleton, then it is
2919 // either VARYING or non-zero.
2920 if (!contains_zero_p (lhs
))
2921 r
.set_nonzero (type
);
2923 r
.set_varying (type
);
2929 if (truncating_cast_p (op2
, lhs
))
2931 if (lhs
.varying_p ())
2932 r
.set_varying (type
);
2935 // We want to insert the LHS as an unsigned value since it
2936 // would not trigger the signed bit of the larger type.
2937 int_range_max converted_lhs
= lhs
;
2938 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
2939 range_cast (converted_lhs
, type
);
2940 // Start by building the positive signed outer range for the type.
2941 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
2942 TYPE_PRECISION (type
));
2943 create_possibly_reversed_range (r
, type
, lim
,
2944 wi::max_value (TYPE_PRECISION (type
),
2946 // For the signed part, we need to simply union the 2 ranges now.
2947 r
.union_ (converted_lhs
);
2949 // Create maximal negative number outside of LHS bits.
2950 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
2951 TYPE_PRECISION (type
));
2952 // Add this to the unsigned LHS range(s).
2953 int_range_max
lim_range (type
, lim
, lim
);
2954 int_range_max lhs_neg
;
2955 range_op_handler (PLUS_EXPR
).fold_range (lhs_neg
, type
,
2956 converted_lhs
, lim_range
);
2957 // lhs_neg now has all the negative versions of the LHS.
2958 // Now union in all the values from SIGNED MIN (0x80000) to
2959 // lim-1 in order to fill in all the ranges with the upper
2962 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2963 // we don't need to create a range from min to lim-1
2964 // calculate neg range traps trying to create [lim, lim - 1].
2965 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
2968 int_range_max
neg (type
,
2969 wi::min_value (TYPE_PRECISION (type
),
2972 lhs_neg
.union_ (neg
);
2974 // And finally, munge the signed and unsigned portions.
2977 // And intersect with any known value passed in the extra operand.
2983 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
2987 // The cast is not truncating, and the range is restricted to
2988 // the range of the RHS by this assignment.
2990 // Cast the range of the RHS to the type of the LHS.
2991 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
2992 // Intersect this with the LHS range will produce the range,
2993 // which will be cast to the RHS type before returning.
2994 tmp
.intersect (lhs
);
2997 // Cast the calculated range to the type of the RHS.
2998 fold_range (r
, type
, tmp
, int_range
<1> (type
));
3003 class operator_logical_and
: public range_operator
3005 using range_operator::fold_range
;
3006 using range_operator::op1_range
;
3007 using range_operator::op2_range
;
3009 virtual bool fold_range (irange
&r
, tree type
,
3012 relation_trio rel
= TRIO_VARYING
) const;
3013 virtual bool op1_range (irange
&r
, tree type
,
3016 relation_trio rel
= TRIO_VARYING
) const;
3017 virtual bool op2_range (irange
&r
, tree type
,
3020 relation_trio rel
= TRIO_VARYING
) const;
3025 operator_logical_and::fold_range (irange
&r
, tree type
,
3028 relation_trio
) const
3030 if (empty_range_varying (r
, type
, lh
, rh
))
3033 // 0 && anything is 0.
3034 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
3035 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
3036 r
= range_false (type
);
3037 else if (contains_zero_p (lh
) || contains_zero_p (rh
))
3038 // To reach this point, there must be a logical 1 on each side, and
3039 // the only remaining question is whether there is a zero or not.
3040 r
= range_true_and_false (type
);
3042 r
= range_true (type
);
3047 operator_logical_and::op1_range (irange
&r
, tree type
,
3049 const irange
&op2 ATTRIBUTE_UNUSED
,
3050 relation_trio
) const
3052 switch (get_bool_state (r
, lhs
, type
))
3055 // A true result means both sides of the AND must be true.
3056 r
= range_true (type
);
3059 // Any other result means only one side has to be false, the
3060 // other side can be anything. So we cannot be sure of any
3062 r
= range_true_and_false (type
);
3069 operator_logical_and::op2_range (irange
&r
, tree type
,
3072 relation_trio
) const
3074 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
3079 operator_bitwise_and::update_bitmask (irange
&r
, const irange
&lh
,
3080 const irange
&rh
) const
3082 update_known_bitmask (r
, BIT_AND_EXPR
, lh
, rh
);
3085 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3086 // by considering the number of leading redundant sign bit copies.
3087 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3088 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3090 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
3091 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3092 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3094 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
3095 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
3096 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
3099 int type_prec
= TYPE_PRECISION (type
);
3100 int rprec
= (type_prec
- new_clrsb
) - 1;
3101 value_range_with_overflow (r
, type
,
3102 wi::mask (rprec
, true, type_prec
),
3103 wi::mask (rprec
, false, type_prec
));
3107 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3111 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
3114 relation_kind
) const
3116 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
3117 return VREL_VARYING
;
3118 if (!op2
.singleton_p ())
3119 return VREL_VARYING
;
3120 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3121 int prec1
= TYPE_PRECISION (op1
.type ());
3122 int prec2
= TYPE_PRECISION (op2
.type ());
3124 wide_int mask
= op2
.lower_bound ();
3125 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
3127 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
3129 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
3131 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
3133 return bits_to_pe (MIN (prec1
, mask_prec
));
3136 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3137 // possible. Basically, see if we can optimize:
3141 // [LB op Z, UB op Z]
3143 // If the optimization was successful, accumulate the range in R and
3147 wi_optimize_and_or (irange
&r
,
3148 enum tree_code code
,
3150 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3151 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3153 // Calculate the singleton mask among the ranges, if any.
3154 wide_int lower_bound
, upper_bound
, mask
;
3155 if (wi::eq_p (rh_lb
, rh_ub
))
3158 lower_bound
= lh_lb
;
3159 upper_bound
= lh_ub
;
3161 else if (wi::eq_p (lh_lb
, lh_ub
))
3164 lower_bound
= rh_lb
;
3165 upper_bound
= rh_ub
;
3170 // If Z is a constant which (for op | its bitwise not) has n
3171 // consecutive least significant bits cleared followed by m 1
3172 // consecutive bits set immediately above it and either
3173 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3175 // The least significant n bits of all the values in the range are
3176 // cleared or set, the m bits above it are preserved and any bits
3177 // above these are required to be the same for all values in the
3181 if (code
== BIT_IOR_EXPR
)
3183 if (wi::eq_p (w
, 0))
3184 n
= w
.get_precision ();
3188 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3189 if (wi::eq_p (w
, 0))
3190 m
= w
.get_precision () - n
;
3192 m
= wi::ctz (w
) - n
;
3194 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3195 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3198 wide_int res_lb
, res_ub
;
3199 if (code
== BIT_AND_EXPR
)
3201 res_lb
= wi::bit_and (lower_bound
, mask
);
3202 res_ub
= wi::bit_and (upper_bound
, mask
);
3204 else if (code
== BIT_IOR_EXPR
)
3206 res_lb
= wi::bit_or (lower_bound
, mask
);
3207 res_ub
= wi::bit_or (upper_bound
, mask
);
3211 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3213 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3214 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3217 tmp
.set_nonzero (type
);
3223 // For range [LB, UB] compute two wide_int bit masks.
3225 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3226 // for all numbers in the range the bit is 0, otherwise it might be 0
3229 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3230 // for all numbers in the range the bit is 1, otherwise it might be 0
3234 wi_set_zero_nonzero_bits (tree type
,
3235 const wide_int
&lb
, const wide_int
&ub
,
3236 wide_int
&maybe_nonzero
,
3237 wide_int
&mustbe_nonzero
)
3239 signop sign
= TYPE_SIGN (type
);
3241 if (wi::eq_p (lb
, ub
))
3242 maybe_nonzero
= mustbe_nonzero
= lb
;
3243 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3245 wide_int xor_mask
= lb
^ ub
;
3246 maybe_nonzero
= lb
| ub
;
3247 mustbe_nonzero
= lb
& ub
;
3250 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3251 maybe_nonzero
.get_precision ());
3252 maybe_nonzero
= maybe_nonzero
| mask
;
3253 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3258 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3259 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3264 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3265 const wide_int
&lh_lb
,
3266 const wide_int
&lh_ub
,
3267 const wide_int
&rh_lb
,
3268 const wide_int
&rh_ub
) const
3270 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3273 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3274 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3275 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3276 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3277 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3278 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3280 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3281 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3282 signop sign
= TYPE_SIGN (type
);
3283 unsigned prec
= TYPE_PRECISION (type
);
3284 // If both input ranges contain only negative values, we can
3285 // truncate the result range maximum to the minimum of the
3286 // input range maxima.
3287 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3289 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3290 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3292 // If either input range contains only non-negative values
3293 // we can truncate the result range maximum to the respective
3294 // maximum of the input range.
3295 if (wi::ge_p (lh_lb
, 0, sign
))
3296 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3297 if (wi::ge_p (rh_lb
, 0, sign
))
3298 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3299 // PR68217: In case of signed & sign-bit-CST should
3300 // result in [-INF, 0] instead of [-INF, INF].
3301 if (wi::gt_p (new_lb
, new_ub
, sign
))
3303 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3305 && ((wi::eq_p (lh_lb
, lh_ub
)
3306 && !wi::cmps (lh_lb
, sign_bit
))
3307 || (wi::eq_p (rh_lb
, rh_ub
)
3308 && !wi::cmps (rh_lb
, sign_bit
))))
3310 new_lb
= wi::min_value (prec
, sign
);
3311 new_ub
= wi::zero (prec
);
3314 // If the limits got swapped around, return varying.
3315 if (wi::gt_p (new_lb
, new_ub
,sign
))
3318 && wi_optimize_signed_bitwise_op (r
, type
,
3322 r
.set_varying (type
);
3325 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3329 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3331 if (!contains_zero_p (lhs
))
3332 r
= range_nonzero (type
);
3334 r
.set_varying (type
);
3337 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3338 (otherwise return VAL). VAL and MASK must be zero-extended for
3339 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3340 (to transform signed values into unsigned) and at the end xor
3344 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3345 const wide_int
&sgnbit
, unsigned int prec
)
3347 wide_int bit
= wi::one (prec
), res
;
3350 wide_int val
= val_in
^ sgnbit
;
3351 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3354 if ((res
& bit
) == 0)
3357 res
= wi::bit_and_not (val
+ bit
, res
);
3359 if (wi::gtu_p (res
, val
))
3360 return res
^ sgnbit
;
3362 return val
^ sgnbit
;
3365 // This was shamelessly stolen from register_edge_assert_for_2 and
3366 // adjusted to work with iranges.
3369 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3371 const irange
&op2
) const
3373 if (!op2
.singleton_p ())
3375 set_nonzero_range_from_mask (r
, type
, lhs
);
3378 unsigned int nprec
= TYPE_PRECISION (type
);
3379 wide_int cst2v
= op2
.lower_bound ();
3380 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3383 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3385 sgnbit
= wi::zero (nprec
);
3387 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3389 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3390 // maximum unsigned value is ~0. For signed comparison, if CST2
3391 // doesn't have the most significant bit set, handle it similarly. If
3392 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3393 wide_int valv
= lhs
.lower_bound ();
3394 wide_int minv
= valv
& cst2v
, maxv
;
3395 bool we_know_nothing
= false;
3398 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3399 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3402 // If we can't determine anything on this bound, fall
3403 // through and conservatively solve for the other end point.
3404 we_know_nothing
= true;
3407 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3408 if (we_know_nothing
)
3409 r
.set_varying (type
);
3411 create_possibly_reversed_range (r
, type
, minv
, maxv
);
3413 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3415 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3416 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3418 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3420 // For signed comparison, if CST2 doesn't have most significant bit
3421 // set, handle it similarly. If CST2 has MSB set, the maximum is
3422 // the same and minimum is INT_MIN.
3423 valv
= lhs
.upper_bound ();
3424 minv
= valv
& cst2v
;
3429 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3432 // If we couldn't determine anything on either bound, return
3434 if (we_know_nothing
)
3442 int_range
<2> upper_bits
;
3443 create_possibly_reversed_range (upper_bits
, type
, minv
, maxv
);
3444 r
.intersect (upper_bits
);
3448 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3451 relation_trio
) const
3453 if (lhs
.undefined_p ())
3455 if (types_compatible_p (type
, boolean_type_node
))
3456 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3459 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3461 int_range_max
chunk (lhs
.type (),
3462 lhs
.lower_bound (i
),
3463 lhs
.upper_bound (i
));
3465 simple_op1_range_solver (res
, type
, chunk
, op2
);
3468 if (r
.undefined_p ())
3469 set_nonzero_range_from_mask (r
, type
, lhs
);
3471 // For 0 = op1 & MASK, op1 is ~MASK.
3472 if (lhs
.zero_p () && op2
.singleton_p ())
3474 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3475 int_range
<2> tmp (type
);
3476 tmp
.set_nonzero_bits (nz
);
3483 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3486 relation_trio
) const
3488 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3492 class operator_logical_or
: public range_operator
3494 using range_operator::fold_range
;
3495 using range_operator::op1_range
;
3496 using range_operator::op2_range
;
3498 virtual bool fold_range (irange
&r
, tree type
,
3501 relation_trio rel
= TRIO_VARYING
) const;
3502 virtual bool op1_range (irange
&r
, tree type
,
3505 relation_trio rel
= TRIO_VARYING
) const;
3506 virtual bool op2_range (irange
&r
, tree type
,
3509 relation_trio rel
= TRIO_VARYING
) const;
3513 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3516 relation_trio
) const
3518 if (empty_range_varying (r
, type
, lh
, rh
))
3527 operator_logical_or::op1_range (irange
&r
, tree type
,
3529 const irange
&op2 ATTRIBUTE_UNUSED
,
3530 relation_trio
) const
3532 switch (get_bool_state (r
, lhs
, type
))
3535 // A false result means both sides of the OR must be false.
3536 r
= range_false (type
);
3539 // Any other result means only one side has to be true, the
3540 // other side can be anything. so we can't be sure of any result
3542 r
= range_true_and_false (type
);
3549 operator_logical_or::op2_range (irange
&r
, tree type
,
3552 relation_trio
) const
3554 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3559 operator_bitwise_or::update_bitmask (irange
&r
, const irange
&lh
,
3560 const irange
&rh
) const
3562 update_known_bitmask (r
, BIT_IOR_EXPR
, lh
, rh
);
3566 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3567 const wide_int
&lh_lb
,
3568 const wide_int
&lh_ub
,
3569 const wide_int
&rh_lb
,
3570 const wide_int
&rh_ub
) const
3572 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3575 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3576 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3577 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3578 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3579 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3580 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3581 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3582 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3583 signop sign
= TYPE_SIGN (type
);
3584 // If the input ranges contain only positive values we can
3585 // truncate the minimum of the result range to the maximum
3586 // of the input range minima.
3587 if (wi::ge_p (lh_lb
, 0, sign
)
3588 && wi::ge_p (rh_lb
, 0, sign
))
3590 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3591 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3593 // If either input range contains only negative values
3594 // we can truncate the minimum of the result range to the
3595 // respective minimum range.
3596 if (wi::lt_p (lh_ub
, 0, sign
))
3597 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3598 if (wi::lt_p (rh_ub
, 0, sign
))
3599 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3600 // If the limits got swapped around, return a conservative range.
3601 if (wi::gt_p (new_lb
, new_ub
, sign
))
3603 // Make sure that nonzero|X is nonzero.
3604 if (wi::gt_p (lh_lb
, 0, sign
)
3605 || wi::gt_p (rh_lb
, 0, sign
)
3606 || wi::lt_p (lh_ub
, 0, sign
)
3607 || wi::lt_p (rh_ub
, 0, sign
))
3608 r
.set_nonzero (type
);
3609 else if (sign
== SIGNED
3610 && wi_optimize_signed_bitwise_op (r
, type
,
3615 r
.set_varying (type
);
3618 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3622 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3625 relation_trio
) const
3627 if (lhs
.undefined_p ())
3629 // If this is really a logical wi_fold, call that.
3630 if (types_compatible_p (type
, boolean_type_node
))
3631 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3638 r
.set_varying (type
);
3643 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3646 relation_trio
) const
3648 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3652 operator_bitwise_xor::update_bitmask (irange
&r
, const irange
&lh
,
3653 const irange
&rh
) const
3655 update_known_bitmask (r
, BIT_XOR_EXPR
, lh
, rh
);
3659 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3660 const wide_int
&lh_lb
,
3661 const wide_int
&lh_ub
,
3662 const wide_int
&rh_lb
,
3663 const wide_int
&rh_ub
) const
3665 signop sign
= TYPE_SIGN (type
);
3666 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3667 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3668 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3669 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3670 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3671 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3673 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3674 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3675 wide_int result_one_bits
3676 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3677 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3678 wide_int new_ub
= ~result_zero_bits
;
3679 wide_int new_lb
= result_one_bits
;
3681 // If the range has all positive or all negative values, the result
3682 // is better than VARYING.
3683 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3684 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3685 else if (sign
== SIGNED
3686 && wi_optimize_signed_bitwise_op (r
, type
,
3691 r
.set_varying (type
);
3693 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3694 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3695 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3696 || wi::ne_p (result_one_bits
, 0))
3699 tmp
.set_nonzero (type
);
3705 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3709 relation_kind rel
) const
3711 if (rel
== VREL_VARYING
)
3714 int_range
<2> rel_range
;
3719 rel_range
.set_zero (type
);
3722 rel_range
.set_nonzero (type
);
3728 lhs_range
.intersect (rel_range
);
3733 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3736 relation_trio
) const
3738 if (lhs
.undefined_p () || lhs
.varying_p ())
3743 if (types_compatible_p (type
, boolean_type_node
))
3745 switch (get_bool_state (r
, lhs
, type
))
3748 if (op2
.varying_p ())
3749 r
.set_varying (type
);
3750 else if (op2
.zero_p ())
3751 r
= range_true (type
);
3752 // See get_bool_state for the rationale
3753 else if (contains_zero_p (op2
))
3754 r
= range_true_and_false (type
);
3756 r
= range_false (type
);
3766 r
.set_varying (type
);
3771 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3774 relation_trio
) const
3776 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3779 class operator_trunc_mod
: public range_operator
3781 using range_operator::op1_range
;
3782 using range_operator::op2_range
;
3784 virtual void wi_fold (irange
&r
, tree type
,
3785 const wide_int
&lh_lb
,
3786 const wide_int
&lh_ub
,
3787 const wide_int
&rh_lb
,
3788 const wide_int
&rh_ub
) const;
3789 virtual bool op1_range (irange
&r
, tree type
,
3792 relation_trio
) const;
3793 virtual bool op2_range (irange
&r
, tree type
,
3796 relation_trio
) const;
3797 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
3798 { update_known_bitmask (r
, TRUNC_MOD_EXPR
, lh
, rh
); }
3802 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
3803 const wide_int
&lh_lb
,
3804 const wide_int
&lh_ub
,
3805 const wide_int
&rh_lb
,
3806 const wide_int
&rh_ub
) const
3808 wide_int new_lb
, new_ub
, tmp
;
3809 signop sign
= TYPE_SIGN (type
);
3810 unsigned prec
= TYPE_PRECISION (type
);
3812 // Mod 0 is undefined.
3813 if (wi_zero_p (type
, rh_lb
, rh_ub
))
3819 // Check for constant and try to fold.
3820 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
3822 wi::overflow_type ov
= wi::OVF_NONE
;
3823 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
3824 if (ov
== wi::OVF_NONE
)
3826 r
= int_range
<2> (type
, tmp
, tmp
);
3831 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3836 new_ub
= wi::smax (new_ub
, tmp
);
3839 if (sign
== UNSIGNED
)
3840 new_lb
= wi::zero (prec
);
3845 if (wi::gts_p (tmp
, 0))
3846 tmp
= wi::zero (prec
);
3847 new_lb
= wi::smax (new_lb
, tmp
);
3850 if (sign
== SIGNED
&& wi::neg_p (tmp
))
3851 tmp
= wi::zero (prec
);
3852 new_ub
= wi::min (new_ub
, tmp
, sign
);
3854 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3858 operator_trunc_mod::op1_range (irange
&r
, tree type
,
3861 relation_trio
) const
3863 if (lhs
.undefined_p ())
3866 signop sign
= TYPE_SIGN (type
);
3867 unsigned prec
= TYPE_PRECISION (type
);
3868 // (a % b) >= x && x > 0 , then a >= x.
3869 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3871 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
3874 // (a % b) <= x && x < 0 , then a <= x.
3875 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3877 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
3884 operator_trunc_mod::op2_range (irange
&r
, tree type
,
3887 relation_trio
) const
3889 if (lhs
.undefined_p ())
3892 signop sign
= TYPE_SIGN (type
);
3893 unsigned prec
= TYPE_PRECISION (type
);
3894 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3895 // or b > x for unsigned.
3896 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
3899 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
3900 lhs
.lower_bound (), VR_ANTI_RANGE
);
3901 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
3903 r
= value_range (type
, lhs
.lower_bound () + 1,
3904 wi::max_value (prec
, sign
));
3909 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3910 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
3912 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
3913 r
= value_range (type
, lhs
.upper_bound (),
3914 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
3923 class operator_logical_not
: public range_operator
3925 using range_operator::fold_range
;
3926 using range_operator::op1_range
;
3928 virtual bool fold_range (irange
&r
, tree type
,
3931 relation_trio rel
= TRIO_VARYING
) const;
3932 virtual bool op1_range (irange
&r
, tree type
,
3935 relation_trio rel
= TRIO_VARYING
) const;
3938 // Folding a logical NOT, oddly enough, involves doing nothing on the
3939 // forward pass through. During the initial walk backwards, the
3940 // logical NOT reversed the desired outcome on the way back, so on the
3941 // way forward all we do is pass the range forward.
3946 // to determine the TRUE branch, walking backward
3947 // if (b_3) if ([1,1])
3948 // b_3 = !b_2 [1,1] = ![0,0]
3949 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3950 // which is the result we are looking for.. so.. pass it through.
3953 operator_logical_not::fold_range (irange
&r
, tree type
,
3955 const irange
&rh ATTRIBUTE_UNUSED
,
3956 relation_trio
) const
3958 if (empty_range_varying (r
, type
, lh
, rh
))
3962 if (!lh
.varying_p () && !lh
.undefined_p ())
3969 operator_logical_not::op1_range (irange
&r
,
3973 relation_trio
) const
3975 // Logical NOT is involutary...do it again.
3976 return fold_range (r
, type
, lhs
, op2
);
3981 operator_bitwise_not::fold_range (irange
&r
, tree type
,
3984 relation_trio
) const
3986 if (empty_range_varying (r
, type
, lh
, rh
))
3989 if (types_compatible_p (type
, boolean_type_node
))
3990 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
3992 // ~X is simply -1 - X.
3993 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
3994 wi::minus_one (TYPE_PRECISION (type
)));
3995 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, minusone
, lh
);
3999 operator_bitwise_not::op1_range (irange
&r
, tree type
,
4002 relation_trio
) const
4004 if (lhs
.undefined_p ())
4006 if (types_compatible_p (type
, boolean_type_node
))
4007 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
4009 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4010 return fold_range (r
, type
, lhs
, op2
);
4015 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4017 const irange
&rh ATTRIBUTE_UNUSED
,
4018 relation_trio
) const
4025 // Determine if there is a relationship between LHS and OP1.
4028 operator_identity::lhs_op1_relation (const irange
&lhs
,
4029 const irange
&op1 ATTRIBUTE_UNUSED
,
4030 const irange
&op2 ATTRIBUTE_UNUSED
,
4031 relation_kind
) const
4033 if (lhs
.undefined_p ())
4034 return VREL_VARYING
;
4035 // Simply a copy, so they are equivalent.
4040 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4042 const irange
&rh ATTRIBUTE_UNUSED
,
4043 relation_trio
) const
4050 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4052 const irange
&op2 ATTRIBUTE_UNUSED
,
4053 relation_trio
) const
4060 class operator_unknown
: public range_operator
4062 using range_operator::fold_range
;
4064 virtual bool fold_range (irange
&r
, tree type
,
4067 relation_trio rel
= TRIO_VARYING
) const;
4071 operator_unknown::fold_range (irange
&r
, tree type
,
4072 const irange
&lh ATTRIBUTE_UNUSED
,
4073 const irange
&rh ATTRIBUTE_UNUSED
,
4074 relation_trio
) const
4076 r
.set_varying (type
);
4082 operator_abs::wi_fold (irange
&r
, tree type
,
4083 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4084 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4085 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4088 signop sign
= TYPE_SIGN (type
);
4089 unsigned prec
= TYPE_PRECISION (type
);
4091 // Pass through LH for the easy cases.
4092 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
4094 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
4098 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4100 wide_int min_value
= wi::min_value (prec
, sign
);
4101 wide_int max_value
= wi::max_value (prec
, sign
);
4102 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
4104 r
.set_varying (type
);
4108 // ABS_EXPR may flip the range around, if the original range
4109 // included negative values.
4110 if (wi::eq_p (lh_lb
, min_value
))
4112 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4113 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4114 if (wi::eq_p (lh_ub
, min_value
))
4116 r
= int_range
<1> (type
, min_value
, min_value
);
4122 min
= wi::abs (lh_lb
);
4124 if (wi::eq_p (lh_ub
, min_value
))
4127 max
= wi::abs (lh_ub
);
4129 // If the range contains zero then we know that the minimum value in the
4130 // range will be zero.
4131 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4133 if (wi::gt_p (min
, max
, sign
))
4135 min
= wi::zero (prec
);
4139 // If the range was reversed, swap MIN and MAX.
4140 if (wi::gt_p (min
, max
, sign
))
4141 std::swap (min
, max
);
4144 // If the new range has its limits swapped around (MIN > MAX), then
4145 // the operation caused one of them to wrap around. The only thing
4146 // we know is that the result is positive.
4147 if (wi::gt_p (min
, max
, sign
))
4149 min
= wi::zero (prec
);
4152 r
= int_range
<1> (type
, min
, max
);
4156 operator_abs::op1_range (irange
&r
, tree type
,
4159 relation_trio
) const
4161 if (empty_range_varying (r
, type
, lhs
, op2
))
4163 if (TYPE_UNSIGNED (type
))
4168 // Start with the positives because negatives are an impossible result.
4169 int_range_max positives
= range_positives (type
);
4170 positives
.intersect (lhs
);
4172 // Then add the negative of each pair:
4173 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4174 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4175 r
.union_ (int_range
<1> (type
,
4176 -positives
.upper_bound (i
),
4177 -positives
.lower_bound (i
)));
4178 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4179 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4180 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4181 wide_int lb
= lhs
.lower_bound ();
4182 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4183 r
.union_ (int_range
<2> (type
, lb
, lb
));
4188 class operator_absu
: public range_operator
4191 virtual void wi_fold (irange
&r
, tree type
,
4192 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4193 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4197 operator_absu::wi_fold (irange
&r
, tree type
,
4198 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4199 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4200 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4202 wide_int new_lb
, new_ub
;
4204 // Pass through VR0 the easy cases.
4205 if (wi::ges_p (lh_lb
, 0))
4212 new_lb
= wi::abs (lh_lb
);
4213 new_ub
= wi::abs (lh_ub
);
4215 // If the range contains zero then we know that the minimum
4216 // value in the range will be zero.
4217 if (wi::ges_p (lh_ub
, 0))
4219 if (wi::gtu_p (new_lb
, new_ub
))
4221 new_lb
= wi::zero (TYPE_PRECISION (type
));
4224 std::swap (new_lb
, new_ub
);
4227 gcc_checking_assert (TYPE_UNSIGNED (type
));
4228 r
= int_range
<1> (type
, new_lb
, new_ub
);
4233 operator_negate::fold_range (irange
&r
, tree type
,
4236 relation_trio
) const
4238 if (empty_range_varying (r
, type
, lh
, rh
))
4240 // -X is simply 0 - X.
4241 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
,
4242 range_zero (type
), lh
);
4246 operator_negate::op1_range (irange
&r
, tree type
,
4249 relation_trio
) const
4251 // NEGATE is involutory.
4252 return fold_range (r
, type
, lhs
, op2
);
4257 operator_addr_expr::fold_range (irange
&r
, tree type
,
4260 relation_trio
) const
4262 if (empty_range_varying (r
, type
, lh
, rh
))
4265 // Return a non-null pointer of the LHS type (passed in op2).
4267 r
= range_zero (type
);
4268 else if (!contains_zero_p (lh
))
4269 r
= range_nonzero (type
);
4271 r
.set_varying (type
);
4276 operator_addr_expr::op1_range (irange
&r
, tree type
,
4279 relation_trio
) const
4281 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
4284 // Initialize any integral operators to the primary table
4287 range_op_table::initialize_integral_ops ()
4289 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4290 set (FLOOR_DIV_EXPR
, op_floor_div
);
4291 set (ROUND_DIV_EXPR
, op_round_div
);
4292 set (CEIL_DIV_EXPR
, op_ceil_div
);
4293 set (EXACT_DIV_EXPR
, op_exact_div
);
4294 set (LSHIFT_EXPR
, op_lshift
);
4295 set (RSHIFT_EXPR
, op_rshift
);
4296 set (TRUTH_AND_EXPR
, op_logical_and
);
4297 set (TRUTH_OR_EXPR
, op_logical_or
);
4298 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4299 set (TRUTH_NOT_EXPR
, op_logical_not
);
4300 set (IMAGPART_EXPR
, op_unknown
);
4301 set (REALPART_EXPR
, op_unknown
);
4302 set (ABSU_EXPR
, op_absu
);
4303 set (OP_WIDEN_MULT_SIGNED
, op_widen_mult_signed
);
4304 set (OP_WIDEN_MULT_UNSIGNED
, op_widen_mult_unsigned
);
4305 set (OP_WIDEN_PLUS_SIGNED
, op_widen_plus_signed
);
4306 set (OP_WIDEN_PLUS_UNSIGNED
, op_widen_plus_unsigned
);
4311 #include "selftest.h"
4315 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4316 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4317 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4318 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4319 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4320 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4323 range_op_cast_tests ()
4325 int_range
<2> r0
, r1
, r2
, rold
;
4326 r0
.set_varying (integer_type_node
);
4327 wide_int maxint
= r0
.upper_bound ();
4329 // If a range is in any way outside of the range for the converted
4330 // to range, default to the range for the new type.
4331 r0
.set_varying (short_integer_type_node
);
4332 wide_int minshort
= r0
.lower_bound ();
4333 wide_int maxshort
= r0
.upper_bound ();
4334 if (TYPE_PRECISION (integer_type_node
)
4335 > TYPE_PRECISION (short_integer_type_node
))
4337 r1
= int_range
<1> (integer_type_node
,
4338 wi::zero (TYPE_PRECISION (integer_type_node
)),
4340 range_cast (r1
, short_integer_type_node
);
4341 ASSERT_TRUE (r1
.lower_bound () == minshort
4342 && r1
.upper_bound() == maxshort
);
4345 // (unsigned char)[-5,-1] => [251,255].
4346 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (-1));
4347 range_cast (r0
, unsigned_char_type_node
);
4348 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
,
4349 UCHAR (251), UCHAR (255)));
4350 range_cast (r0
, signed_char_type_node
);
4351 ASSERT_TRUE (r0
== rold
);
4353 // (signed char)[15, 150] => [-128,-106][15,127].
4354 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (15), UCHAR (150));
4355 range_cast (r0
, signed_char_type_node
);
4356 r1
= int_range
<1> (signed_char_type_node
, SCHAR (15), SCHAR (127));
4357 r2
= int_range
<1> (signed_char_type_node
, SCHAR (-128), SCHAR (-106));
4359 ASSERT_TRUE (r1
== r0
);
4360 range_cast (r0
, unsigned_char_type_node
);
4361 ASSERT_TRUE (r0
== rold
);
4363 // (unsigned char)[-5, 5] => [0,5][251,255].
4364 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (5));
4365 range_cast (r0
, unsigned_char_type_node
);
4366 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255));
4367 r2
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4369 ASSERT_TRUE (r0
== r1
);
4370 range_cast (r0
, signed_char_type_node
);
4371 ASSERT_TRUE (r0
== rold
);
4373 // (unsigned char)[-5,5] => [0,5][251,255].
4374 r0
= int_range
<1> (integer_type_node
, INT (-5), INT (5));
4375 range_cast (r0
, unsigned_char_type_node
);
4376 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4377 r1
.union_ (int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255)));
4378 ASSERT_TRUE (r0
== r1
);
4380 // (unsigned char)[5U,1974U] => [0,255].
4381 r0
= int_range
<1> (unsigned_type_node
, UINT (5), UINT (1974));
4382 range_cast (r0
, unsigned_char_type_node
);
4383 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (255)));
4384 range_cast (r0
, integer_type_node
);
4385 // Going to a wider range should not sign extend.
4386 ASSERT_TRUE (r0
== int_range
<1> (integer_type_node
, INT (0), INT (255)));
4388 // (unsigned char)[-350,15] => [0,255].
4389 r0
= int_range
<1> (integer_type_node
, INT (-350), INT (15));
4390 range_cast (r0
, unsigned_char_type_node
);
4391 ASSERT_TRUE (r0
== (int_range
<1>
4392 (unsigned_char_type_node
,
4393 min_limit (unsigned_char_type_node
),
4394 max_limit (unsigned_char_type_node
))));
4396 // Casting [-120,20] from signed char to unsigned short.
4397 // => [0, 20][0xff88, 0xffff].
4398 r0
= int_range
<1> (signed_char_type_node
, SCHAR (-120), SCHAR (20));
4399 range_cast (r0
, short_unsigned_type_node
);
4400 r1
= int_range
<1> (short_unsigned_type_node
, UINT16 (0), UINT16 (20));
4401 r2
= int_range
<1> (short_unsigned_type_node
,
4402 UINT16 (0xff88), UINT16 (0xffff));
4404 ASSERT_TRUE (r0
== r1
);
4405 // A truncating cast back to signed char will work because [-120, 20]
4406 // is representable in signed char.
4407 range_cast (r0
, signed_char_type_node
);
4408 ASSERT_TRUE (r0
== int_range
<1> (signed_char_type_node
,
4409 SCHAR (-120), SCHAR (20)));
4411 // unsigned char -> signed short
4412 // (signed short)[(unsigned char)25, (unsigned char)250]
4413 // => [(signed short)25, (signed short)250]
4414 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (25), UCHAR (250));
4415 range_cast (r0
, short_integer_type_node
);
4416 r1
= int_range
<1> (short_integer_type_node
, INT16 (25), INT16 (250));
4417 ASSERT_TRUE (r0
== r1
);
4418 range_cast (r0
, unsigned_char_type_node
);
4419 ASSERT_TRUE (r0
== rold
);
4421 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4422 r0
= int_range
<1> (long_long_integer_type_node
,
4423 min_limit (long_long_integer_type_node
),
4424 max_limit (long_long_integer_type_node
));
4425 range_cast (r0
, short_unsigned_type_node
);
4426 r1
= int_range
<1> (short_unsigned_type_node
,
4427 min_limit (short_unsigned_type_node
),
4428 max_limit (short_unsigned_type_node
));
4429 ASSERT_TRUE (r0
== r1
);
4431 // Casting NONZERO to a narrower type will wrap/overflow so
4432 // it's just the entire range for the narrower type.
4434 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4435 // is outside of the range of a smaller range, return the full
4437 if (TYPE_PRECISION (integer_type_node
)
4438 > TYPE_PRECISION (short_integer_type_node
))
4440 r0
= range_nonzero (integer_type_node
);
4441 range_cast (r0
, short_integer_type_node
);
4442 r1
= int_range
<1> (short_integer_type_node
,
4443 min_limit (short_integer_type_node
),
4444 max_limit (short_integer_type_node
));
4445 ASSERT_TRUE (r0
== r1
);
4448 // Casting NONZERO from a narrower signed to a wider signed.
4450 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4451 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4452 r0
= range_nonzero (short_integer_type_node
);
4453 range_cast (r0
, integer_type_node
);
4454 r1
= int_range
<1> (integer_type_node
, INT (-32768), INT (-1));
4455 r2
= int_range
<1> (integer_type_node
, INT (1), INT (32767));
4457 ASSERT_TRUE (r0
== r1
);
4461 range_op_lshift_tests ()
4463 // Test that 0x808.... & 0x8.... still contains 0x8....
4464 // for a large set of numbers.
4467 tree big_type
= long_long_unsigned_type_node
;
4468 unsigned big_prec
= TYPE_PRECISION (big_type
);
4469 // big_num = 0x808,0000,0000,0000
4470 wide_int big_num
= wi::lshift (wi::uhwi (0x808, big_prec
),
4471 wi::uhwi (48, big_prec
));
4472 op_bitwise_and
.fold_range (res
, big_type
,
4473 int_range
<1> (big_type
),
4474 int_range
<1> (big_type
, big_num
, big_num
));
4475 // val = 0x8,0000,0000,0000
4476 wide_int val
= wi::lshift (wi::uhwi (8, big_prec
),
4477 wi::uhwi (48, big_prec
));
4478 ASSERT_TRUE (res
.contains_p (val
));
4481 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4483 // unsigned VARYING = op1 << 1 should be VARYING.
4484 int_range
<2> lhs (unsigned_type_node
);
4485 int_range
<2> shift (unsigned_type_node
, INT (1), INT (1));
4487 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4488 ASSERT_TRUE (op1
.varying_p ());
4490 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4491 int_range
<2> zero (unsigned_type_node
, UINT (0), UINT (0));
4492 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4493 ASSERT_TRUE (op1
.num_pairs () == 2);
4494 // Remove the [0,0] range.
4495 op1
.intersect (zero
);
4496 ASSERT_TRUE (op1
.num_pairs () == 1);
4497 // op1 << 1 should be [0x8000,0x8000] << 1,
4498 // which should result in [0,0].
4499 int_range_max result
;
4500 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4501 ASSERT_TRUE (result
== zero
);
4503 // signed VARYING = op1 << 1 should be VARYING.
4504 if (TYPE_PRECISION (integer_type_node
) > 31)
4506 // unsigned VARYING = op1 << 1 should be VARYING.
4507 int_range
<2> lhs (integer_type_node
);
4508 int_range
<2> shift (integer_type_node
, INT (1), INT (1));
4510 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4511 ASSERT_TRUE (op1
.varying_p ());
4513 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4514 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4515 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4516 ASSERT_TRUE (op1
.num_pairs () == 2);
4517 // Remove the [0,0] range.
4518 op1
.intersect (zero
);
4519 ASSERT_TRUE (op1
.num_pairs () == 1);
4520 // op1 << 1 should be [0x8000,0x8000] << 1,
4521 // which should result in [0,0].
4522 int_range_max result
;
4523 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4524 ASSERT_TRUE (result
== zero
);
4529 range_op_rshift_tests ()
4531 // unsigned: [3, MAX] = OP1 >> 1
4533 int_range_max
lhs (unsigned_type_node
,
4534 UINT (3), max_limit (unsigned_type_node
));
4535 int_range_max
one (unsigned_type_node
,
4536 wi::one (TYPE_PRECISION (unsigned_type_node
)),
4537 wi::one (TYPE_PRECISION (unsigned_type_node
)));
4539 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4540 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4543 // signed: [3, MAX] = OP1 >> 1
4545 int_range_max
lhs (integer_type_node
,
4546 INT (3), max_limit (integer_type_node
));
4547 int_range_max
one (integer_type_node
, INT (1), INT (1));
4549 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4550 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4553 // This is impossible, so OP1 should be [].
4554 // signed: [MIN, MIN] = OP1 >> 1
4556 int_range_max
lhs (integer_type_node
,
4557 min_limit (integer_type_node
),
4558 min_limit (integer_type_node
));
4559 int_range_max
one (integer_type_node
, INT (1), INT (1));
4561 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4562 ASSERT_TRUE (op1
.undefined_p ());
4565 // signed: ~[-1] = OP1 >> 31
4566 if (TYPE_PRECISION (integer_type_node
) > 31)
4568 int_range_max
lhs (integer_type_node
, INT (-1), INT (-1), VR_ANTI_RANGE
);
4569 int_range_max
shift (integer_type_node
, INT (31), INT (31));
4571 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4572 int_range_max negatives
= range_negatives (integer_type_node
);
4573 negatives
.intersect (op1
);
4574 ASSERT_TRUE (negatives
.undefined_p ());
4579 range_op_bitwise_and_tests ()
4582 wide_int min
= min_limit (integer_type_node
);
4583 wide_int max
= max_limit (integer_type_node
);
4584 wide_int tiny
= wi::add (min
, wi::one (TYPE_PRECISION (integer_type_node
)));
4585 int_range_max
i1 (integer_type_node
, tiny
, max
);
4586 int_range_max
i2 (integer_type_node
, INT (255), INT (255));
4588 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4589 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4590 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4592 // VARYING = OP1 & 255: OP1 is VARYING
4593 i1
= int_range
<1> (integer_type_node
);
4594 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4595 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4597 // For 0 = x & MASK, x is ~MASK.
4599 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4600 int_range
<2> mask
= int_range
<2> (integer_type_node
, INT (7), INT (7));
4601 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4602 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4603 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4606 // (NONZERO | X) is nonzero.
4607 i1
.set_nonzero (integer_type_node
);
4608 i2
.set_varying (integer_type_node
);
4609 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4610 ASSERT_TRUE (res
.nonzero_p ());
4612 // (NEGATIVE | X) is nonzero.
4613 i1
= int_range
<1> (integer_type_node
, INT (-5), INT (-3));
4614 i2
.set_varying (integer_type_node
);
4615 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4616 ASSERT_FALSE (res
.contains_p (INT (0)));
4620 range_relational_tests ()
4622 int_range
<2> lhs (unsigned_char_type_node
);
4623 int_range
<2> op1 (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4624 int_range
<2> op2 (unsigned_char_type_node
, UCHAR (20), UCHAR (20));
4626 // Never wrapping additions mean LHS > OP1.
4627 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4628 ASSERT_TRUE (code
== VREL_GT
);
4630 // Most wrapping additions mean nothing...
4631 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4632 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (0), UCHAR (255));
4633 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4634 ASSERT_TRUE (code
== VREL_VARYING
);
4636 // However, always wrapping additions mean LHS < OP1.
4637 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (1), UCHAR (255));
4638 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (255), UCHAR (255));
4639 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4640 ASSERT_TRUE (code
== VREL_LT
);
4646 range_op_rshift_tests ();
4647 range_op_lshift_tests ();
4648 range_op_bitwise_and_tests ();
4649 range_op_cast_tests ();
4650 range_relational_tests ();
4652 extern void range_op_float_tests ();
4653 range_op_float_tests ();
4656 } // namespace selftest
4658 #endif // CHECKING_P