testsuite: Correct requirements for vadsdu*, vslv and vsrv testcases.
[official-gcc.git] / gcc / range-op.cc
blob74ab2e57fdea265727f7796ed6bcc5043b3562eb
1 /* Code for range operators.
2 Copyright (C) 2017-2020 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-fold.h"
42 #include "tree-eh.h"
43 #include "gimple-iterator.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "range-op.h"
49 // Return the upper limit for a type.
51 static inline wide_int
52 max_limit (const_tree type)
54 return wi::max_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
57 // Return the lower limit for a type.
59 static inline wide_int
60 min_limit (const_tree type)
62 return wi::min_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
65 // If the range of either op1 or op2 is undefined, set the result to
66 // varying and return TRUE. If the caller truely cares about a result,
67 // they should pass in a varying if it has an undefined that it wants
68 // treated as a varying.
70 inline bool
71 empty_range_varying (irange &r, tree type,
72 const irange &op1, const irange & op2)
74 if (op1.undefined_p () || op2.undefined_p ())
76 r.set_varying (type);
77 return true;
79 else
80 return false;
83 // Return TRUE if shifting by OP is undefined behavior, and set R to
84 // the appropriate range.
86 static inline bool
87 undefined_shift_range_check (irange &r, tree type, const irange &op)
89 if (op.undefined_p ())
91 r.set_undefined ();
92 return true;
95 // Shifting by any values outside [0..prec-1], gets undefined
96 // behavior from the shift operation. We cannot even trust
97 // SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
98 // shifts, and the operation at the tree level may be widened.
99 if (wi::lt_p (op.lower_bound (), 0, TYPE_SIGN (op.type ()))
100 || wi::ge_p (op.upper_bound (),
101 TYPE_PRECISION (type), TYPE_SIGN (op.type ())))
103 r.set_varying (type);
104 return true;
106 return false;
109 // Return TRUE if 0 is within [WMIN, WMAX].
111 static inline bool
112 wi_includes_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
114 signop sign = TYPE_SIGN (type);
115 return wi::le_p (wmin, 0, sign) && wi::ge_p (wmax, 0, sign);
118 // Return TRUE if [WMIN, WMAX] is the singleton 0.
120 static inline bool
121 wi_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
123 unsigned prec = TYPE_PRECISION (type);
124 return wmin == wmax && wi::eq_p (wmin, wi::zero (prec));
127 // Default wide_int fold operation returns [MIN, MAX].
129 void
130 range_operator::wi_fold (irange &r, tree type,
131 const wide_int &lh_lb ATTRIBUTE_UNUSED,
132 const wide_int &lh_ub ATTRIBUTE_UNUSED,
133 const wide_int &rh_lb ATTRIBUTE_UNUSED,
134 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
136 gcc_checking_assert (irange::supports_type_p (type));
137 r.set_varying (type);
140 // The default for fold is to break all ranges into sub-ranges and
141 // invoke the wi_fold method on each sub-range pair.
143 bool
144 range_operator::fold_range (irange &r, tree type,
145 const irange &lh,
146 const irange &rh) const
148 gcc_checking_assert (irange::supports_type_p (type));
149 if (empty_range_varying (r, type, lh, rh))
150 return true;
152 unsigned num_lh = lh.num_pairs ();
153 unsigned num_rh = rh.num_pairs ();
155 // If both ranges are single pairs, fold directly into the result range.
156 if (num_lh == 1 && num_rh == 1)
158 wi_fold (r, type, lh.lower_bound (0), lh.upper_bound (0),
159 rh.lower_bound (0), rh.upper_bound (0));
160 return true;
163 int_range_max tmp;
164 r.set_undefined ();
165 for (unsigned x = 0; x < num_lh; ++x)
166 for (unsigned y = 0; y < num_rh; ++y)
168 wide_int lh_lb = lh.lower_bound (x);
169 wide_int lh_ub = lh.upper_bound (x);
170 wide_int rh_lb = rh.lower_bound (y);
171 wide_int rh_ub = rh.upper_bound (y);
172 wi_fold (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
173 r.union_ (tmp);
174 if (r.varying_p ())
175 return true;
177 return true;
180 // The default for op1_range is to return false.
182 bool
183 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
184 tree type ATTRIBUTE_UNUSED,
185 const irange &lhs ATTRIBUTE_UNUSED,
186 const irange &op2 ATTRIBUTE_UNUSED) const
188 return false;
191 // The default for op2_range is to return false.
193 bool
194 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
195 tree type ATTRIBUTE_UNUSED,
196 const irange &lhs ATTRIBUTE_UNUSED,
197 const irange &op1 ATTRIBUTE_UNUSED) const
199 return false;
203 // Create and return a range from a pair of wide-ints that are known
204 // to have overflowed (or underflowed).
206 static void
207 value_range_from_overflowed_bounds (irange &r, tree type,
208 const wide_int &wmin,
209 const wide_int &wmax)
211 const signop sgn = TYPE_SIGN (type);
212 const unsigned int prec = TYPE_PRECISION (type);
214 wide_int tmin = wide_int::from (wmin, prec, sgn);
215 wide_int tmax = wide_int::from (wmax, prec, sgn);
217 bool covers = false;
218 wide_int tem = tmin;
219 tmin = tmax + 1;
220 if (wi::cmp (tmin, tmax, sgn) < 0)
221 covers = true;
222 tmax = tem - 1;
223 if (wi::cmp (tmax, tem, sgn) > 0)
224 covers = true;
226 // If the anti-range would cover nothing, drop to varying.
227 // Likewise if the anti-range bounds are outside of the types
228 // values.
229 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
230 r.set_varying (type);
231 else
233 tree tree_min = wide_int_to_tree (type, tmin);
234 tree tree_max = wide_int_to_tree (type, tmax);
235 r.set (tree_min, tree_max, VR_ANTI_RANGE);
239 // Create and return a range from a pair of wide-ints. MIN_OVF and
240 // MAX_OVF describe any overflow that might have occurred while
241 // calculating WMIN and WMAX respectively.
243 static void
244 value_range_with_overflow (irange &r, tree type,
245 const wide_int &wmin, const wide_int &wmax,
246 wi::overflow_type min_ovf = wi::OVF_NONE,
247 wi::overflow_type max_ovf = wi::OVF_NONE)
249 const signop sgn = TYPE_SIGN (type);
250 const unsigned int prec = TYPE_PRECISION (type);
251 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
253 // For one bit precision if max != min, then the range covers all
254 // values.
255 if (prec == 1 && wi::ne_p (wmax, wmin))
257 r.set_varying (type);
258 return;
261 if (overflow_wraps)
263 // If overflow wraps, truncate the values and adjust the range,
264 // kind, and bounds appropriately.
265 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
267 wide_int tmin = wide_int::from (wmin, prec, sgn);
268 wide_int tmax = wide_int::from (wmax, prec, sgn);
269 // If the limits are swapped, we wrapped around and cover
270 // the entire range.
271 if (wi::gt_p (tmin, tmax, sgn))
272 r.set_varying (type);
273 else
274 // No overflow or both overflow or underflow. The range
275 // kind stays normal.
276 r.set (wide_int_to_tree (type, tmin),
277 wide_int_to_tree (type, tmax));
278 return;
281 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
282 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
283 value_range_from_overflowed_bounds (r, type, wmin, wmax);
284 else
285 // Other underflow and/or overflow, drop to VR_VARYING.
286 r.set_varying (type);
288 else
290 // If both bounds either underflowed or overflowed, then the result
291 // is undefined.
292 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
293 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
295 r.set_undefined ();
296 return;
299 // If overflow does not wrap, saturate to [MIN, MAX].
300 wide_int new_lb, new_ub;
301 if (min_ovf == wi::OVF_UNDERFLOW)
302 new_lb = wi::min_value (prec, sgn);
303 else if (min_ovf == wi::OVF_OVERFLOW)
304 new_lb = wi::max_value (prec, sgn);
305 else
306 new_lb = wmin;
308 if (max_ovf == wi::OVF_UNDERFLOW)
309 new_ub = wi::min_value (prec, sgn);
310 else if (max_ovf == wi::OVF_OVERFLOW)
311 new_ub = wi::max_value (prec, sgn);
312 else
313 new_ub = wmax;
315 r.set (wide_int_to_tree (type, new_lb),
316 wide_int_to_tree (type, new_ub));
320 // Create and return a range from a pair of wide-ints. Canonicalize
321 // the case where the bounds are swapped. In which case, we transform
322 // [10,5] into [MIN,5][10,MAX].
324 static inline void
325 create_possibly_reversed_range (irange &r, tree type,
326 const wide_int &new_lb, const wide_int &new_ub)
328 signop s = TYPE_SIGN (type);
329 // If the bounds are swapped, treat the result as if an overflow occured.
330 if (wi::gt_p (new_lb, new_ub, s))
331 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
332 else
333 // Otherwise it's just a normal range.
334 r.set (wide_int_to_tree (type, new_lb), wide_int_to_tree (type, new_ub));
337 // Return an irange instance that is a boolean TRUE.
339 static inline int_range<1>
340 range_true (tree type)
342 unsigned prec = TYPE_PRECISION (type);
343 return int_range<1> (type, wi::one (prec), wi::one (prec));
346 // Return an irange instance that is a boolean FALSE.
348 static inline int_range<1>
349 range_false (tree type)
351 unsigned prec = TYPE_PRECISION (type);
352 return int_range<1> (type, wi::zero (prec), wi::zero (prec));
355 // Return an irange that covers both true and false.
357 static inline int_range<1>
358 range_true_and_false (tree type)
360 unsigned prec = TYPE_PRECISION (type);
361 return int_range<1> (type, wi::zero (prec), wi::one (prec));
364 enum bool_range_state { BRS_FALSE, BRS_TRUE, BRS_EMPTY, BRS_FULL };
366 // Return the summary information about boolean range LHS. Return an
367 // "interesting" range in R. For EMPTY or FULL, return the equivalent
368 // range for TYPE, for BRS_TRUE and BRS false, return the negation of
369 // the bool range.
371 static bool_range_state
372 get_bool_state (irange &r, const irange &lhs, tree val_type)
374 // If there is no result, then this is unexecutable.
375 if (lhs.undefined_p ())
377 r.set_undefined ();
378 return BRS_EMPTY;
381 if (lhs.zero_p ())
382 return BRS_FALSE;
384 // For TRUE, we can't just test for [1,1] because Ada can have
385 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
386 if (lhs.contains_p (build_zero_cst (lhs.type ())))
388 r.set_varying (val_type);
389 return BRS_FULL;
391 return BRS_TRUE;
395 class operator_equal : public range_operator
397 public:
398 virtual bool fold_range (irange &r, tree type,
399 const irange &op1,
400 const irange &op2) const;
401 virtual bool op1_range (irange &r, tree type,
402 const irange &lhs,
403 const irange &val) const;
404 virtual bool op2_range (irange &r, tree type,
405 const irange &lhs,
406 const irange &val) const;
407 } op_equal;
409 bool
410 operator_equal::fold_range (irange &r, tree type,
411 const irange &op1,
412 const irange &op2) const
414 if (empty_range_varying (r, type, op1, op2))
415 return true;
417 // We can be sure the values are always equal or not if both ranges
418 // consist of a single value, and then compare them.
419 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
420 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
422 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
423 r = range_true (type);
424 else
425 r = range_false (type);
427 else
429 // If ranges do not intersect, we know the range is not equal,
430 // otherwise we don't know anything for sure.
431 r = op1;
432 r.intersect (op2);
433 if (r.undefined_p ())
434 r = range_false (type);
435 else
436 r = range_true_and_false (type);
438 return true;
441 bool
442 operator_equal::op1_range (irange &r, tree type,
443 const irange &lhs,
444 const irange &op2) const
446 switch (get_bool_state (r, lhs, type))
448 case BRS_FALSE:
449 // If the result is false, the only time we know anything is
450 // if OP2 is a constant.
451 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
453 r = op2;
454 r.invert ();
456 else
457 r.set_varying (type);
458 break;
460 case BRS_TRUE:
461 // If it's true, the result is the same as OP2.
462 r = op2;
463 break;
465 default:
466 break;
468 return true;
471 bool
472 operator_equal::op2_range (irange &r, tree type,
473 const irange &lhs,
474 const irange &op1) const
476 return operator_equal::op1_range (r, type, lhs, op1);
480 class operator_not_equal : public range_operator
482 public:
483 virtual bool fold_range (irange &r, tree type,
484 const irange &op1,
485 const irange &op2) const;
486 virtual bool op1_range (irange &r, tree type,
487 const irange &lhs,
488 const irange &op2) const;
489 virtual bool op2_range (irange &r, tree type,
490 const irange &lhs,
491 const irange &op1) const;
492 } op_not_equal;
494 bool
495 operator_not_equal::fold_range (irange &r, tree type,
496 const irange &op1,
497 const irange &op2) const
499 if (empty_range_varying (r, type, op1, op2))
500 return true;
502 // We can be sure the values are always equal or not if both ranges
503 // consist of a single value, and then compare them.
504 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
505 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
507 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
508 r = range_true (type);
509 else
510 r = range_false (type);
512 else
514 // If ranges do not intersect, we know the range is not equal,
515 // otherwise we don't know anything for sure.
516 r = op1;
517 r.intersect (op2);
518 if (r.undefined_p ())
519 r = range_true (type);
520 else
521 r = range_true_and_false (type);
523 return true;
526 bool
527 operator_not_equal::op1_range (irange &r, tree type,
528 const irange &lhs,
529 const irange &op2) const
531 switch (get_bool_state (r, lhs, type))
533 case BRS_TRUE:
534 // If the result is true, the only time we know anything is if
535 // OP2 is a constant.
536 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
538 r = op2;
539 r.invert ();
541 else
542 r.set_varying (type);
543 break;
545 case BRS_FALSE:
546 // If its true, the result is the same as OP2.
547 r = op2;
548 break;
550 default:
551 break;
553 return true;
557 bool
558 operator_not_equal::op2_range (irange &r, tree type,
559 const irange &lhs,
560 const irange &op1) const
562 return operator_not_equal::op1_range (r, type, lhs, op1);
565 // (X < VAL) produces the range of [MIN, VAL - 1].
567 static void
568 build_lt (irange &r, tree type, const wide_int &val)
570 wi::overflow_type ov;
571 wide_int lim = wi::sub (val, 1, TYPE_SIGN (type), &ov);
573 // If val - 1 underflows, check if X < MIN, which is an empty range.
574 if (ov)
575 r.set_undefined ();
576 else
577 r = int_range<1> (type, min_limit (type), lim);
580 // (X <= VAL) produces the range of [MIN, VAL].
582 static void
583 build_le (irange &r, tree type, const wide_int &val)
585 r = int_range<1> (type, min_limit (type), val);
588 // (X > VAL) produces the range of [VAL + 1, MAX].
590 static void
591 build_gt (irange &r, tree type, const wide_int &val)
593 wi::overflow_type ov;
594 wide_int lim = wi::add (val, 1, TYPE_SIGN (type), &ov);
595 // If val + 1 overflows, check is for X > MAX, which is an empty range.
596 if (ov)
597 r.set_undefined ();
598 else
599 r = int_range<1> (type, lim, max_limit (type));
602 // (X >= val) produces the range of [VAL, MAX].
604 static void
605 build_ge (irange &r, tree type, const wide_int &val)
607 r = int_range<1> (type, val, max_limit (type));
611 class operator_lt : public range_operator
613 public:
614 virtual bool fold_range (irange &r, tree type,
615 const irange &op1,
616 const irange &op2) const;
617 virtual bool op1_range (irange &r, tree type,
618 const irange &lhs,
619 const irange &op2) const;
620 virtual bool op2_range (irange &r, tree type,
621 const irange &lhs,
622 const irange &op1) const;
623 } op_lt;
625 bool
626 operator_lt::fold_range (irange &r, tree type,
627 const irange &op1,
628 const irange &op2) const
630 if (empty_range_varying (r, type, op1, op2))
631 return true;
633 signop sign = TYPE_SIGN (op1.type ());
634 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
636 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
637 r = range_true (type);
638 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
639 r = range_false (type);
640 else
641 r = range_true_and_false (type);
642 return true;
645 bool
646 operator_lt::op1_range (irange &r, tree type,
647 const irange &lhs,
648 const irange &op2) const
650 switch (get_bool_state (r, lhs, type))
652 case BRS_TRUE:
653 build_lt (r, type, op2.upper_bound ());
654 break;
656 case BRS_FALSE:
657 build_ge (r, type, op2.lower_bound ());
658 break;
660 default:
661 break;
663 return true;
666 bool
667 operator_lt::op2_range (irange &r, tree type,
668 const irange &lhs,
669 const irange &op1) const
671 switch (get_bool_state (r, lhs, type))
673 case BRS_FALSE:
674 build_le (r, type, op1.upper_bound ());
675 break;
677 case BRS_TRUE:
678 build_gt (r, type, op1.lower_bound ());
679 break;
681 default:
682 break;
684 return true;
688 class operator_le : public range_operator
690 public:
691 virtual bool fold_range (irange &r, tree type,
692 const irange &op1,
693 const irange &op2) const;
694 virtual bool op1_range (irange &r, tree type,
695 const irange &lhs,
696 const irange &op2) const;
697 virtual bool op2_range (irange &r, tree type,
698 const irange &lhs,
699 const irange &op1) const;
700 } op_le;
702 bool
703 operator_le::fold_range (irange &r, tree type,
704 const irange &op1,
705 const irange &op2) const
707 if (empty_range_varying (r, type, op1, op2))
708 return true;
710 signop sign = TYPE_SIGN (op1.type ());
711 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
713 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
714 r = range_true (type);
715 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
716 r = range_false (type);
717 else
718 r = range_true_and_false (type);
719 return true;
722 bool
723 operator_le::op1_range (irange &r, tree type,
724 const irange &lhs,
725 const irange &op2) const
727 switch (get_bool_state (r, lhs, type))
729 case BRS_TRUE:
730 build_le (r, type, op2.upper_bound ());
731 break;
733 case BRS_FALSE:
734 build_gt (r, type, op2.lower_bound ());
735 break;
737 default:
738 break;
740 return true;
743 bool
744 operator_le::op2_range (irange &r, tree type,
745 const irange &lhs,
746 const irange &op1) const
748 switch (get_bool_state (r, lhs, type))
750 case BRS_FALSE:
751 build_lt (r, type, op1.upper_bound ());
752 break;
754 case BRS_TRUE:
755 build_ge (r, type, op1.lower_bound ());
756 break;
758 default:
759 break;
761 return true;
765 class operator_gt : public range_operator
767 public:
768 virtual bool fold_range (irange &r, tree type,
769 const irange &op1,
770 const irange &op2) const;
771 virtual bool op1_range (irange &r, tree type,
772 const irange &lhs,
773 const irange &op2) const;
774 virtual bool op2_range (irange &r, tree type,
775 const irange &lhs,
776 const irange &op1) const;
777 } op_gt;
779 bool
780 operator_gt::fold_range (irange &r, tree type,
781 const irange &op1, const irange &op2) const
783 if (empty_range_varying (r, type, op1, op2))
784 return true;
786 signop sign = TYPE_SIGN (op1.type ());
787 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
789 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
790 r = range_true (type);
791 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
792 r = range_false (type);
793 else
794 r = range_true_and_false (type);
795 return true;
798 bool
799 operator_gt::op1_range (irange &r, tree type,
800 const irange &lhs, const irange &op2) const
802 switch (get_bool_state (r, lhs, type))
804 case BRS_TRUE:
805 build_gt (r, type, op2.lower_bound ());
806 break;
808 case BRS_FALSE:
809 build_le (r, type, op2.upper_bound ());
810 break;
812 default:
813 break;
815 return true;
818 bool
819 operator_gt::op2_range (irange &r, tree type,
820 const irange &lhs,
821 const irange &op1) const
823 switch (get_bool_state (r, lhs, type))
825 case BRS_FALSE:
826 build_ge (r, type, op1.lower_bound ());
827 break;
829 case BRS_TRUE:
830 build_lt (r, type, op1.upper_bound ());
831 break;
833 default:
834 break;
836 return true;
840 class operator_ge : public range_operator
842 public:
843 virtual bool fold_range (irange &r, tree type,
844 const irange &op1,
845 const irange &op2) const;
846 virtual bool op1_range (irange &r, tree type,
847 const irange &lhs,
848 const irange &op2) const;
849 virtual bool op2_range (irange &r, tree type,
850 const irange &lhs,
851 const irange &op1) const;
852 } op_ge;
854 bool
855 operator_ge::fold_range (irange &r, tree type,
856 const irange &op1,
857 const irange &op2) const
859 if (empty_range_varying (r, type, op1, op2))
860 return true;
862 signop sign = TYPE_SIGN (op1.type ());
863 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
865 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
866 r = range_true (type);
867 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
868 r = range_false (type);
869 else
870 r = range_true_and_false (type);
871 return true;
874 bool
875 operator_ge::op1_range (irange &r, tree type,
876 const irange &lhs,
877 const irange &op2) const
879 switch (get_bool_state (r, lhs, type))
881 case BRS_TRUE:
882 build_ge (r, type, op2.lower_bound ());
883 break;
885 case BRS_FALSE:
886 build_lt (r, type, op2.upper_bound ());
887 break;
889 default:
890 break;
892 return true;
895 bool
896 operator_ge::op2_range (irange &r, tree type,
897 const irange &lhs,
898 const irange &op1) const
900 switch (get_bool_state (r, lhs, type))
902 case BRS_FALSE:
903 build_gt (r, type, op1.lower_bound ());
904 break;
906 case BRS_TRUE:
907 build_le (r, type, op1.upper_bound ());
908 break;
910 default:
911 break;
913 return true;
917 class operator_plus : public range_operator
919 public:
920 virtual bool op1_range (irange &r, tree type,
921 const irange &lhs,
922 const irange &op2) const;
923 virtual bool op2_range (irange &r, tree type,
924 const irange &lhs,
925 const irange &op1) const;
926 virtual void wi_fold (irange &r, tree type,
927 const wide_int &lh_lb,
928 const wide_int &lh_ub,
929 const wide_int &rh_lb,
930 const wide_int &rh_ub) const;
931 } op_plus;
933 void
934 operator_plus::wi_fold (irange &r, tree type,
935 const wide_int &lh_lb, const wide_int &lh_ub,
936 const wide_int &rh_lb, const wide_int &rh_ub) const
938 wi::overflow_type ov_lb, ov_ub;
939 signop s = TYPE_SIGN (type);
940 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
941 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
942 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
945 bool
946 operator_plus::op1_range (irange &r, tree type,
947 const irange &lhs,
948 const irange &op2) const
950 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, lhs, op2);
953 bool
954 operator_plus::op2_range (irange &r, tree type,
955 const irange &lhs,
956 const irange &op1) const
958 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, lhs, op1);
962 class operator_minus : public range_operator
964 public:
965 virtual bool op1_range (irange &r, tree type,
966 const irange &lhs,
967 const irange &op2) const;
968 virtual bool op2_range (irange &r, tree type,
969 const irange &lhs,
970 const irange &op1) const;
971 virtual void wi_fold (irange &r, tree type,
972 const wide_int &lh_lb,
973 const wide_int &lh_ub,
974 const wide_int &rh_lb,
975 const wide_int &rh_ub) const;
976 } op_minus;
978 void
979 operator_minus::wi_fold (irange &r, tree type,
980 const wide_int &lh_lb, const wide_int &lh_ub,
981 const wide_int &rh_lb, const wide_int &rh_ub) const
983 wi::overflow_type ov_lb, ov_ub;
984 signop s = TYPE_SIGN (type);
985 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
986 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
987 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
990 bool
991 operator_minus::op1_range (irange &r, tree type,
992 const irange &lhs,
993 const irange &op2) const
995 return range_op_handler (PLUS_EXPR, type)->fold_range (r, type, lhs, op2);
998 bool
999 operator_minus::op2_range (irange &r, tree type,
1000 const irange &lhs,
1001 const irange &op1) const
1003 return fold_range (r, type, op1, lhs);
1007 class operator_min : public range_operator
1009 public:
1010 virtual void wi_fold (irange &r, tree type,
1011 const wide_int &lh_lb,
1012 const wide_int &lh_ub,
1013 const wide_int &rh_lb,
1014 const wide_int &rh_ub) const;
1015 } op_min;
1017 void
1018 operator_min::wi_fold (irange &r, tree type,
1019 const wide_int &lh_lb, const wide_int &lh_ub,
1020 const wide_int &rh_lb, const wide_int &rh_ub) const
1022 signop s = TYPE_SIGN (type);
1023 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
1024 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
1025 value_range_with_overflow (r, type, new_lb, new_ub);
1029 class operator_max : public range_operator
1031 public:
1032 virtual void wi_fold (irange &r, tree type,
1033 const wide_int &lh_lb,
1034 const wide_int &lh_ub,
1035 const wide_int &rh_lb,
1036 const wide_int &rh_ub) const;
1037 } op_max;
1039 void
1040 operator_max::wi_fold (irange &r, tree type,
1041 const wide_int &lh_lb, const wide_int &lh_ub,
1042 const wide_int &rh_lb, const wide_int &rh_ub) const
1044 signop s = TYPE_SIGN (type);
1045 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
1046 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
1047 value_range_with_overflow (r, type, new_lb, new_ub);
1051 class cross_product_operator : public range_operator
1053 public:
1054 // Perform an operation between two wide-ints and place the result
1055 // in R. Return true if the operation overflowed.
1056 virtual bool wi_op_overflows (wide_int &r,
1057 tree type,
1058 const wide_int &,
1059 const wide_int &) const = 0;
1061 // Calculate the cross product of two sets of sub-ranges and return it.
1062 void wi_cross_product (irange &r, tree type,
1063 const wide_int &lh_lb,
1064 const wide_int &lh_ub,
1065 const wide_int &rh_lb,
1066 const wide_int &rh_ub) const;
1069 // Calculate the cross product of two sets of ranges and return it.
1071 // Multiplications, divisions and shifts are a bit tricky to handle,
1072 // depending on the mix of signs we have in the two ranges, we need to
1073 // operate on different values to get the minimum and maximum values
1074 // for the new range. One approach is to figure out all the
1075 // variations of range combinations and do the operations.
1077 // However, this involves several calls to compare_values and it is
1078 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1079 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1080 // figure the smallest and largest values to form the new range.
1082 void
1083 cross_product_operator::wi_cross_product (irange &r, tree type,
1084 const wide_int &lh_lb,
1085 const wide_int &lh_ub,
1086 const wide_int &rh_lb,
1087 const wide_int &rh_ub) const
1089 wide_int cp1, cp2, cp3, cp4;
1090 // Default to varying.
1091 r.set_varying (type);
1093 // Compute the 4 cross operations, bailing if we get an overflow we
1094 // can't handle.
1095 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
1096 return;
1097 if (wi::eq_p (lh_lb, lh_ub))
1098 cp3 = cp1;
1099 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
1100 return;
1101 if (wi::eq_p (rh_lb, rh_ub))
1102 cp2 = cp1;
1103 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
1104 return;
1105 if (wi::eq_p (lh_lb, lh_ub))
1106 cp4 = cp2;
1107 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
1108 return;
1110 // Order pairs.
1111 signop sign = TYPE_SIGN (type);
1112 if (wi::gt_p (cp1, cp2, sign))
1113 std::swap (cp1, cp2);
1114 if (wi::gt_p (cp3, cp4, sign))
1115 std::swap (cp3, cp4);
1117 // Choose min and max from the ordered pairs.
1118 wide_int res_lb = wi::min (cp1, cp3, sign);
1119 wide_int res_ub = wi::max (cp2, cp4, sign);
1120 value_range_with_overflow (r, type, res_lb, res_ub);
1124 class operator_mult : public cross_product_operator
1126 public:
1127 virtual void wi_fold (irange &r, tree type,
1128 const wide_int &lh_lb,
1129 const wide_int &lh_ub,
1130 const wide_int &rh_lb,
1131 const wide_int &rh_ub) const;
1132 virtual bool wi_op_overflows (wide_int &res, tree type,
1133 const wide_int &w0, const wide_int &w1) const;
1134 virtual bool op1_range (irange &r, tree type,
1135 const irange &lhs,
1136 const irange &op2) const;
1137 virtual bool op2_range (irange &r, tree type,
1138 const irange &lhs,
1139 const irange &op1) const;
1140 } op_mult;
1142 bool
1143 operator_mult::op1_range (irange &r, tree type,
1144 const irange &lhs, const irange &op2) const
1146 tree offset;
1148 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1149 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1150 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1151 if (TYPE_OVERFLOW_WRAPS (type))
1152 return false;
1154 if (op2.singleton_p (&offset) && !integer_zerop (offset))
1155 return range_op_handler (TRUNC_DIV_EXPR, type)->fold_range (r, type,
1156 lhs, op2);
1157 return false;
1160 bool
1161 operator_mult::op2_range (irange &r, tree type,
1162 const irange &lhs, const irange &op1) const
1164 return operator_mult::op1_range (r, type, lhs, op1);
1167 bool
1168 operator_mult::wi_op_overflows (wide_int &res, tree type,
1169 const wide_int &w0, const wide_int &w1) const
1171 wi::overflow_type overflow = wi::OVF_NONE;
1172 signop sign = TYPE_SIGN (type);
1173 res = wi::mul (w0, w1, sign, &overflow);
1174 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
1176 // For multiplication, the sign of the overflow is given
1177 // by the comparison of the signs of the operands.
1178 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
1179 res = wi::max_value (w0.get_precision (), sign);
1180 else
1181 res = wi::min_value (w0.get_precision (), sign);
1182 return false;
1184 return overflow;
1187 void
1188 operator_mult::wi_fold (irange &r, tree type,
1189 const wide_int &lh_lb, const wide_int &lh_ub,
1190 const wide_int &rh_lb, const wide_int &rh_ub) const
1192 if (TYPE_OVERFLOW_UNDEFINED (type))
1194 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
1195 return;
1198 // Multiply the ranges when overflow wraps. This is basically fancy
1199 // code so we don't drop to varying with an unsigned
1200 // [-3,-1]*[-3,-1].
1202 // This test requires 2*prec bits if both operands are signed and
1203 // 2*prec + 2 bits if either is not. Therefore, extend the values
1204 // using the sign of the result to PREC2. From here on out,
1205 // everthing is just signed math no matter what the input types
1206 // were.
1208 signop sign = TYPE_SIGN (type);
1209 unsigned prec = TYPE_PRECISION (type);
1210 widest2_int min0 = widest2_int::from (lh_lb, sign);
1211 widest2_int max0 = widest2_int::from (lh_ub, sign);
1212 widest2_int min1 = widest2_int::from (rh_lb, sign);
1213 widest2_int max1 = widest2_int::from (rh_ub, sign);
1214 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
1215 widest2_int size = sizem1 + 1;
1217 // Canonicalize the intervals.
1218 if (sign == UNSIGNED)
1220 if (wi::ltu_p (size, min0 + max0))
1222 min0 -= size;
1223 max0 -= size;
1225 if (wi::ltu_p (size, min1 + max1))
1227 min1 -= size;
1228 max1 -= size;
1232 // Sort the 4 products so that min is in prod0 and max is in
1233 // prod3.
1234 widest2_int prod0 = min0 * min1;
1235 widest2_int prod1 = min0 * max1;
1236 widest2_int prod2 = max0 * min1;
1237 widest2_int prod3 = max0 * max1;
1239 // min0min1 > max0max1
1240 if (prod0 > prod3)
1241 std::swap (prod0, prod3);
1243 // min0max1 > max0min1
1244 if (prod1 > prod2)
1245 std::swap (prod1, prod2);
1247 if (prod0 > prod1)
1248 std::swap (prod0, prod1);
1250 if (prod2 > prod3)
1251 std::swap (prod2, prod3);
1253 // diff = max - min
1254 prod2 = prod3 - prod0;
1255 if (wi::geu_p (prod2, sizem1))
1256 // The range covers all values.
1257 r.set_varying (type);
1258 else
1260 wide_int new_lb = wide_int::from (prod0, prec, sign);
1261 wide_int new_ub = wide_int::from (prod3, prec, sign);
1262 create_possibly_reversed_range (r, type, new_lb, new_ub);
1267 class operator_div : public cross_product_operator
1269 public:
1270 operator_div (enum tree_code c) { code = c; }
1271 virtual void wi_fold (irange &r, tree type,
1272 const wide_int &lh_lb,
1273 const wide_int &lh_ub,
1274 const wide_int &rh_lb,
1275 const wide_int &rh_ub) const;
1276 virtual bool wi_op_overflows (wide_int &res, tree type,
1277 const wide_int &, const wide_int &) const;
1278 private:
1279 enum tree_code code;
1282 bool
1283 operator_div::wi_op_overflows (wide_int &res, tree type,
1284 const wide_int &w0, const wide_int &w1) const
1286 if (w1 == 0)
1287 return true;
1289 wi::overflow_type overflow = wi::OVF_NONE;
1290 signop sign = TYPE_SIGN (type);
1292 switch (code)
1294 case EXACT_DIV_EXPR:
1295 // EXACT_DIV_EXPR is implemented as TRUNC_DIV_EXPR in
1296 // operator_exact_divide. No need to handle it here.
1297 gcc_unreachable ();
1298 break;
1299 case TRUNC_DIV_EXPR:
1300 res = wi::div_trunc (w0, w1, sign, &overflow);
1301 break;
1302 case FLOOR_DIV_EXPR:
1303 res = wi::div_floor (w0, w1, sign, &overflow);
1304 break;
1305 case ROUND_DIV_EXPR:
1306 res = wi::div_round (w0, w1, sign, &overflow);
1307 break;
1308 case CEIL_DIV_EXPR:
1309 res = wi::div_ceil (w0, w1, sign, &overflow);
1310 break;
1311 default:
1312 gcc_unreachable ();
1315 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
1317 // For division, the only case is -INF / -1 = +INF.
1318 res = wi::max_value (w0.get_precision (), sign);
1319 return false;
1321 return overflow;
1324 void
1325 operator_div::wi_fold (irange &r, tree type,
1326 const wide_int &lh_lb, const wide_int &lh_ub,
1327 const wide_int &rh_lb, const wide_int &rh_ub) const
1329 // If we know we will divide by zero...
1330 if (rh_lb == 0 && rh_ub == 0)
1332 r.set_varying (type);
1333 return;
1336 const wide_int dividend_min = lh_lb;
1337 const wide_int dividend_max = lh_ub;
1338 const wide_int divisor_min = rh_lb;
1339 const wide_int divisor_max = rh_ub;
1340 signop sign = TYPE_SIGN (type);
1341 unsigned prec = TYPE_PRECISION (type);
1342 wide_int extra_min, extra_max;
1344 // If we know we won't divide by zero, just do the division.
1345 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
1347 wi_cross_product (r, type, dividend_min, dividend_max,
1348 divisor_min, divisor_max);
1349 return;
1352 // If flag_non_call_exceptions, we must not eliminate a division by zero.
1353 if (cfun->can_throw_non_call_exceptions)
1355 r.set_varying (type);
1356 return;
1359 // If we're definitely dividing by zero, there's nothing to do.
1360 if (wi_zero_p (type, divisor_min, divisor_max))
1362 r.set_varying (type);
1363 return;
1366 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
1367 // skip any division by zero.
1369 // First divide by the negative numbers, if any.
1370 if (wi::neg_p (divisor_min, sign))
1371 wi_cross_product (r, type, dividend_min, dividend_max,
1372 divisor_min, wi::minus_one (prec));
1373 else
1374 r.set_undefined ();
1376 // Then divide by the non-zero positive numbers, if any.
1377 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
1379 int_range_max tmp;
1380 wi_cross_product (tmp, type, dividend_min, dividend_max,
1381 wi::one (prec), divisor_max);
1382 r.union_ (tmp);
1384 // We shouldn't still have undefined here.
1385 gcc_checking_assert (!r.undefined_p ());
1388 operator_div op_trunc_div (TRUNC_DIV_EXPR);
1389 operator_div op_floor_div (FLOOR_DIV_EXPR);
1390 operator_div op_round_div (ROUND_DIV_EXPR);
1391 operator_div op_ceil_div (CEIL_DIV_EXPR);
1394 class operator_exact_divide : public operator_div
1396 public:
1397 operator_exact_divide () : operator_div (TRUNC_DIV_EXPR) { }
1398 virtual bool op1_range (irange &r, tree type,
1399 const irange &lhs,
1400 const irange &op2) const;
1402 } op_exact_div;
1404 bool
1405 operator_exact_divide::op1_range (irange &r, tree type,
1406 const irange &lhs,
1407 const irange &op2) const
1409 tree offset;
1410 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
1411 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
1412 // We wont bother trying to enumerate all the in between stuff :-P
1413 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
1414 // the time however.
1415 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
1416 if (op2.singleton_p (&offset)
1417 && !integer_zerop (offset))
1418 return range_op_handler (MULT_EXPR, type)->fold_range (r, type, lhs, op2);
1419 return false;
1423 class operator_lshift : public cross_product_operator
1425 public:
1426 virtual bool op1_range (irange &r, tree type,
1427 const irange &lhs,
1428 const irange &op2) const;
1429 virtual bool fold_range (irange &r, tree type,
1430 const irange &op1,
1431 const irange &op2) const;
1433 virtual void wi_fold (irange &r, tree type,
1434 const wide_int &lh_lb, const wide_int &lh_ub,
1435 const wide_int &rh_lb, const wide_int &rh_ub) const;
1436 virtual bool wi_op_overflows (wide_int &res,
1437 tree type,
1438 const wide_int &,
1439 const wide_int &) const;
1440 } op_lshift;
1442 class operator_rshift : public cross_product_operator
1444 public:
1445 virtual bool fold_range (irange &r, tree type,
1446 const irange &op1,
1447 const irange &op2) const;
1448 virtual void wi_fold (irange &r, tree type,
1449 const wide_int &lh_lb,
1450 const wide_int &lh_ub,
1451 const wide_int &rh_lb,
1452 const wide_int &rh_ub) const;
1453 virtual bool wi_op_overflows (wide_int &res,
1454 tree type,
1455 const wide_int &w0,
1456 const wide_int &w1) const;
1457 virtual bool op1_range (irange &, tree type,
1458 const irange &lhs,
1459 const irange &op2) const;
1460 } op_rshift;
1463 bool
1464 operator_lshift::fold_range (irange &r, tree type,
1465 const irange &op1,
1466 const irange &op2) const
1468 if (undefined_shift_range_check (r, type, op2))
1469 return true;
1471 // Transform left shifts by constants into multiplies.
1472 if (op2.singleton_p ())
1474 unsigned shift = op2.lower_bound ().to_uhwi ();
1475 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
1476 int_range<1> mult (type, tmp, tmp);
1478 // Force wrapping multiplication.
1479 bool saved_flag_wrapv = flag_wrapv;
1480 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
1481 flag_wrapv = 1;
1482 flag_wrapv_pointer = 1;
1483 bool b = op_mult.fold_range (r, type, op1, mult);
1484 flag_wrapv = saved_flag_wrapv;
1485 flag_wrapv_pointer = saved_flag_wrapv_pointer;
1486 return b;
1488 else
1489 // Otherwise, invoke the generic fold routine.
1490 return range_operator::fold_range (r, type, op1, op2);
1493 void
1494 operator_lshift::wi_fold (irange &r, tree type,
1495 const wide_int &lh_lb, const wide_int &lh_ub,
1496 const wide_int &rh_lb, const wide_int &rh_ub) const
1498 signop sign = TYPE_SIGN (type);
1499 unsigned prec = TYPE_PRECISION (type);
1500 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
1501 int bound_shift = overflow_pos - rh_ub.to_shwi ();
1502 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1503 // overflow. However, for that to happen, rh.max needs to be zero,
1504 // which means rh is a singleton range of zero, which means it
1505 // should be handled by the lshift fold_range above.
1506 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
1507 wide_int complement = ~(bound - 1);
1508 wide_int low_bound, high_bound;
1509 bool in_bounds = false;
1511 if (sign == UNSIGNED)
1513 low_bound = bound;
1514 high_bound = complement;
1515 if (wi::ltu_p (lh_ub, low_bound))
1517 // [5, 6] << [1, 2] == [10, 24].
1518 // We're shifting out only zeroes, the value increases
1519 // monotonically.
1520 in_bounds = true;
1522 else if (wi::ltu_p (high_bound, lh_lb))
1524 // [0xffffff00, 0xffffffff] << [1, 2]
1525 // == [0xfffffc00, 0xfffffffe].
1526 // We're shifting out only ones, the value decreases
1527 // monotonically.
1528 in_bounds = true;
1531 else
1533 // [-1, 1] << [1, 2] == [-4, 4]
1534 low_bound = complement;
1535 high_bound = bound;
1536 if (wi::lts_p (lh_ub, high_bound)
1537 && wi::lts_p (low_bound, lh_lb))
1539 // For non-negative numbers, we're shifting out only zeroes,
1540 // the value increases monotonically. For negative numbers,
1541 // we're shifting out only ones, the value decreases
1542 // monotonically.
1543 in_bounds = true;
1547 if (in_bounds)
1548 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
1549 else
1550 r.set_varying (type);
1553 bool
1554 operator_lshift::wi_op_overflows (wide_int &res, tree type,
1555 const wide_int &w0, const wide_int &w1) const
1557 signop sign = TYPE_SIGN (type);
1558 if (wi::neg_p (w1))
1560 // It's unclear from the C standard whether shifts can overflow.
1561 // The following code ignores overflow; perhaps a C standard
1562 // interpretation ruling is needed.
1563 res = wi::rshift (w0, -w1, sign);
1565 else
1566 res = wi::lshift (w0, w1);
1567 return false;
1570 bool
1571 operator_lshift::op1_range (irange &r,
1572 tree type,
1573 const irange &lhs,
1574 const irange &op2) const
1576 tree shift_amount;
1577 if (op2.singleton_p (&shift_amount))
1579 wide_int shift = wi::to_wide (shift_amount);
1580 if (wi::lt_p (shift, 0, SIGNED))
1581 return false;
1582 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
1583 TYPE_PRECISION (op2.type ())),
1584 UNSIGNED))
1585 return false;
1586 if (shift == 0)
1588 r = lhs;
1589 return true;
1592 // Work completely in unsigned mode to start.
1593 tree utype = type;
1594 if (TYPE_SIGN (type) == SIGNED)
1596 int_range_max tmp = lhs;
1597 utype = unsigned_type_for (type);
1598 range_cast (tmp, utype);
1599 op_rshift.fold_range (r, utype, tmp, op2);
1601 else
1602 op_rshift.fold_range (r, utype, lhs, op2);
1604 // Start with ranges which can produce the LHS by right shifting the
1605 // result by the shift amount.
1606 // ie [0x08, 0xF0] = op1 << 2 will start with
1607 // [00001000, 11110000] = op1 << 2
1608 // [0x02, 0x4C] aka [00000010, 00111100]
1610 // Then create a range from the LB with the least significant upper bit
1611 // set, to the upper bound with all the bits set.
1612 // This would be [0x42, 0xFC] aka [01000010, 11111100].
1614 // Ideally we do this for each subrange, but just lump them all for now.
1615 unsigned low_bits = TYPE_PRECISION (utype)
1616 - TREE_INT_CST_LOW (shift_amount);
1617 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
1618 wide_int new_ub = wi::bit_or (up_mask, r.upper_bound ());
1619 wide_int new_lb = wi::set_bit (r.lower_bound (), low_bits);
1620 int_range<2> fill_range (utype, new_lb, new_ub);
1621 r.union_ (fill_range);
1623 if (utype != type)
1624 range_cast (r, type);
1625 return true;
1627 return false;
1630 bool
1631 operator_rshift::op1_range (irange &r,
1632 tree type,
1633 const irange &lhs,
1634 const irange &op2) const
1636 tree shift;
1637 if (op2.singleton_p (&shift))
1639 // Ignore nonsensical shifts.
1640 unsigned prec = TYPE_PRECISION (type);
1641 if (wi::ge_p (wi::to_wide (shift),
1642 wi::uhwi (prec, TYPE_PRECISION (TREE_TYPE (shift))),
1643 UNSIGNED))
1644 return false;
1645 if (wi::to_wide (shift) == 0)
1647 r = lhs;
1648 return true;
1651 // Folding the original operation may discard some impossible
1652 // ranges from the LHS.
1653 int_range_max lhs_refined;
1654 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
1655 lhs_refined.intersect (lhs);
1656 if (lhs_refined.undefined_p ())
1658 r.set_undefined ();
1659 return true;
1661 int_range_max shift_range (shift, shift);
1662 int_range_max lb, ub;
1663 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
1664 // LHS
1665 // 0000 0111 = OP1 >> 3
1667 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
1668 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
1669 // right hand side (0x07).
1670 tree mask = fold_build1 (BIT_NOT_EXPR, type,
1671 fold_build2 (LSHIFT_EXPR, type,
1672 build_minus_one_cst (type),
1673 shift));
1674 int_range_max mask_range (build_zero_cst (type), mask);
1675 op_plus.fold_range (ub, type, lb, mask_range);
1676 r = lb;
1677 r.union_ (ub);
1678 if (!lhs_refined.contains_p (build_zero_cst (type)))
1680 mask_range.invert ();
1681 r.intersect (mask_range);
1683 return true;
1685 return false;
1688 bool
1689 operator_rshift::wi_op_overflows (wide_int &res,
1690 tree type,
1691 const wide_int &w0,
1692 const wide_int &w1) const
1694 signop sign = TYPE_SIGN (type);
1695 if (wi::neg_p (w1))
1696 res = wi::lshift (w0, -w1);
1697 else
1699 // It's unclear from the C standard whether shifts can overflow.
1700 // The following code ignores overflow; perhaps a C standard
1701 // interpretation ruling is needed.
1702 res = wi::rshift (w0, w1, sign);
1704 return false;
1707 bool
1708 operator_rshift::fold_range (irange &r, tree type,
1709 const irange &op1,
1710 const irange &op2) const
1712 // Invoke the generic fold routine if not undefined..
1713 if (undefined_shift_range_check (r, type, op2))
1714 return true;
1716 return range_operator::fold_range (r, type, op1, op2);
1719 void
1720 operator_rshift::wi_fold (irange &r, tree type,
1721 const wide_int &lh_lb, const wide_int &lh_ub,
1722 const wide_int &rh_lb, const wide_int &rh_ub) const
1724 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
1728 class operator_cast: public range_operator
1730 public:
1731 virtual bool fold_range (irange &r, tree type,
1732 const irange &op1,
1733 const irange &op2) const;
1734 virtual bool op1_range (irange &r, tree type,
1735 const irange &lhs,
1736 const irange &op2) const;
1737 private:
1738 bool truncating_cast_p (const irange &inner, const irange &outer) const;
1739 bool inside_domain_p (const wide_int &min, const wide_int &max,
1740 const irange &outer) const;
1741 void fold_pair (irange &r, unsigned index, const irange &inner,
1742 const irange &outer) const;
1743 } op_convert;
1745 // Return TRUE if casting from INNER to OUTER is a truncating cast.
1747 inline bool
1748 operator_cast::truncating_cast_p (const irange &inner,
1749 const irange &outer) const
1751 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
1754 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
1756 bool
1757 operator_cast::inside_domain_p (const wide_int &min,
1758 const wide_int &max,
1759 const irange &range) const
1761 wide_int domain_min = wi::to_wide (vrp_val_min (range.type ()));
1762 wide_int domain_max = wi::to_wide (vrp_val_max (range.type ()));
1763 signop domain_sign = TYPE_SIGN (range.type ());
1764 return (wi::le_p (min, domain_max, domain_sign)
1765 && wi::le_p (max, domain_max, domain_sign)
1766 && wi::ge_p (min, domain_min, domain_sign)
1767 && wi::ge_p (max, domain_min, domain_sign));
1771 // Helper for fold_range which work on a pair at a time.
1773 void
1774 operator_cast::fold_pair (irange &r, unsigned index,
1775 const irange &inner,
1776 const irange &outer) const
1778 tree inner_type = inner.type ();
1779 tree outer_type = outer.type ();
1780 signop inner_sign = TYPE_SIGN (inner_type);
1781 unsigned outer_prec = TYPE_PRECISION (outer_type);
1783 // check to see if casting from INNER to OUTER is a conversion that
1784 // fits in the resulting OUTER type.
1785 wide_int inner_lb = inner.lower_bound (index);
1786 wide_int inner_ub = inner.upper_bound (index);
1787 if (truncating_cast_p (inner, outer))
1789 // We may be able to accomodate a truncating cast if the
1790 // resulting range can be represented in the target type...
1791 if (wi::rshift (wi::sub (inner_ub, inner_lb),
1792 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
1793 inner_sign) != 0)
1795 r.set_varying (outer_type);
1796 return;
1799 // ...but we must still verify that the final range fits in the
1800 // domain. This catches -fstrict-enum restrictions where the domain
1801 // range is smaller than what fits in the underlying type.
1802 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
1803 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
1804 if (inside_domain_p (min, max, outer))
1805 create_possibly_reversed_range (r, outer_type, min, max);
1806 else
1807 r.set_varying (outer_type);
1811 bool
1812 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
1813 const irange &inner,
1814 const irange &outer) const
1816 if (empty_range_varying (r, type, inner, outer))
1817 return true;
1819 gcc_checking_assert (outer.varying_p ());
1820 gcc_checking_assert (inner.num_pairs () > 0);
1822 // Avoid a temporary by folding the first pair directly into the result.
1823 fold_pair (r, 0, inner, outer);
1825 // Then process any additonal pairs by unioning with their results.
1826 for (unsigned x = 1; x < inner.num_pairs (); ++x)
1828 int_range_max tmp;
1829 fold_pair (tmp, x, inner, outer);
1830 r.union_ (tmp);
1831 if (r.varying_p ())
1832 return true;
1834 return true;
1837 bool
1838 operator_cast::op1_range (irange &r, tree type,
1839 const irange &lhs,
1840 const irange &op2) const
1842 tree lhs_type = lhs.type ();
1843 gcc_checking_assert (types_compatible_p (op2.type(), type));
1845 if (truncating_cast_p (op2, lhs))
1847 if (lhs.varying_p ())
1848 r.set_varying (type);
1849 else
1851 // We want to insert the LHS as an unsigned value since it
1852 // would not trigger the signed bit of the larger type.
1853 int_range_max converted_lhs = lhs;
1854 range_cast (converted_lhs, unsigned_type_for (lhs_type));
1855 range_cast (converted_lhs, type);
1856 // Start by building the positive signed outer range for the type.
1857 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
1858 TYPE_PRECISION (type));
1859 r = int_range<1> (type, lim, wi::max_value (TYPE_PRECISION (type),
1860 SIGNED));
1861 // For the signed part, we need to simply union the 2 ranges now.
1862 r.union_ (converted_lhs);
1864 // Create maximal negative number outside of LHS bits.
1865 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
1866 TYPE_PRECISION (type));
1867 // Add this to the unsigned LHS range(s).
1868 int_range_max lim_range (type, lim, lim);
1869 int_range_max lhs_neg;
1870 range_op_handler (PLUS_EXPR, type)->fold_range (lhs_neg,
1871 type,
1872 converted_lhs,
1873 lim_range);
1874 // lhs_neg now has all the negative versions of the LHS.
1875 // Now union in all the values from SIGNED MIN (0x80000) to
1876 // lim-1 in order to fill in all the ranges with the upper
1877 // bits set.
1879 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
1880 // we don't need to create a range from min to lim-1
1881 // calculate neg range traps trying to create [lim, lim - 1].
1882 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
1883 if (lim != min_val)
1885 int_range_max neg (type,
1886 wi::min_value (TYPE_PRECISION (type),
1887 SIGNED),
1888 lim - 1);
1889 lhs_neg.union_ (neg);
1891 // And finally, munge the signed and unsigned portions.
1892 r.union_ (lhs_neg);
1894 // And intersect with any known value passed in the extra operand.
1895 r.intersect (op2);
1896 return true;
1899 int_range_max tmp;
1900 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
1901 tmp = lhs;
1902 else
1904 // The cast is not truncating, and the range is restricted to
1905 // the range of the RHS by this assignment.
1907 // Cast the range of the RHS to the type of the LHS.
1908 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
1909 // Intersect this with the LHS range will produce the range,
1910 // which will be cast to the RHS type before returning.
1911 tmp.intersect (lhs);
1914 // Cast the calculated range to the type of the RHS.
1915 fold_range (r, type, tmp, int_range<1> (type));
1916 return true;
1920 class operator_logical_and : public range_operator
1922 public:
1923 virtual bool fold_range (irange &r, tree type,
1924 const irange &lh,
1925 const irange &rh) const;
1926 virtual bool op1_range (irange &r, tree type,
1927 const irange &lhs,
1928 const irange &op2) const;
1929 virtual bool op2_range (irange &r, tree type,
1930 const irange &lhs,
1931 const irange &op1) const;
1932 } op_logical_and;
1935 bool
1936 operator_logical_and::fold_range (irange &r, tree type,
1937 const irange &lh,
1938 const irange &rh) const
1940 if (empty_range_varying (r, type, lh, rh))
1941 return true;
1943 // 0 && anything is 0.
1944 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
1945 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
1946 r = range_false (type);
1947 else if (lh.contains_p (build_zero_cst (lh.type ()))
1948 || rh.contains_p (build_zero_cst (rh.type ())))
1949 // To reach this point, there must be a logical 1 on each side, and
1950 // the only remaining question is whether there is a zero or not.
1951 r = range_true_and_false (type);
1952 else
1953 r = range_true (type);
1954 return true;
1957 bool
1958 operator_logical_and::op1_range (irange &r, tree type,
1959 const irange &lhs,
1960 const irange &op2 ATTRIBUTE_UNUSED) const
1962 switch (get_bool_state (r, lhs, type))
1964 case BRS_TRUE:
1965 // A true result means both sides of the AND must be true.
1966 r = range_true (type);
1967 break;
1968 default:
1969 // Any other result means only one side has to be false, the
1970 // other side can be anything. So we cannott be sure of any
1971 // result here.
1972 r = range_true_and_false (type);
1973 break;
1975 return true;
1978 bool
1979 operator_logical_and::op2_range (irange &r, tree type,
1980 const irange &lhs,
1981 const irange &op1) const
1983 return operator_logical_and::op1_range (r, type, lhs, op1);
1987 class operator_bitwise_and : public range_operator
1989 public:
1990 virtual bool fold_range (irange &r, tree type,
1991 const irange &lh,
1992 const irange &rh) const;
1993 virtual bool op1_range (irange &r, tree type,
1994 const irange &lhs,
1995 const irange &op2) const;
1996 virtual bool op2_range (irange &r, tree type,
1997 const irange &lhs,
1998 const irange &op1) const;
1999 virtual void wi_fold (irange &r, tree type,
2000 const wide_int &lh_lb,
2001 const wide_int &lh_ub,
2002 const wide_int &rh_lb,
2003 const wide_int &rh_ub) const;
2004 private:
2005 void simple_op1_range_solver (irange &r, tree type,
2006 const irange &lhs,
2007 const irange &op2) const;
2008 void remove_impossible_ranges (irange &r, const irange &rh) const;
2009 } op_bitwise_and;
2011 static bool
2012 unsigned_singleton_p (const irange &op)
2014 tree mask;
2015 if (op.singleton_p (&mask))
2017 wide_int x = wi::to_wide (mask);
2018 return wi::ge_p (x, 0, TYPE_SIGN (op.type ()));
2020 return false;
2023 // Remove any ranges from R that are known to be impossible when an
2024 // range is ANDed with MASK.
2026 void
2027 operator_bitwise_and::remove_impossible_ranges (irange &r,
2028 const irange &rmask) const
2030 if (r.undefined_p () || !unsigned_singleton_p (rmask))
2031 return;
2033 wide_int mask = rmask.lower_bound ();
2034 tree type = r.type ();
2035 int prec = TYPE_PRECISION (type);
2036 int leading_zeros = wi::clz (mask);
2037 int_range_max impossible_ranges;
2039 /* We know that starting at the most significant bit, any 0 in the
2040 mask means the resulting range cannot contain a 1 in that same
2041 position. This means the following ranges are impossible:
2043 x & 0b1001 1010
2044 IMPOSSIBLE RANGES
2045 01xx xxxx [0100 0000, 0111 1111]
2046 001x xxxx [0010 0000, 0011 1111]
2047 0000 01xx [0000 0100, 0000 0111]
2048 0000 0001 [0000 0001, 0000 0001]
2050 wide_int one = wi::one (prec);
2051 for (int i = 0; i < prec - leading_zeros - 1; ++i)
2052 if (wi::bit_and (mask, wi::lshift (one, wi::uhwi (i, prec))) == 0)
2054 tree lb = fold_build2 (LSHIFT_EXPR, type,
2055 build_one_cst (type),
2056 build_int_cst (type, i));
2057 tree ub_left = fold_build1 (BIT_NOT_EXPR, type,
2058 fold_build2 (LSHIFT_EXPR, type,
2059 build_minus_one_cst (type),
2060 build_int_cst (type, i)));
2061 tree ub_right = fold_build2 (LSHIFT_EXPR, type,
2062 build_one_cst (type),
2063 build_int_cst (type, i));
2064 tree ub = fold_build2 (BIT_IOR_EXPR, type, ub_left, ub_right);
2065 impossible_ranges.union_ (int_range<1> (lb, ub));
2067 if (!impossible_ranges.undefined_p ())
2069 impossible_ranges.invert ();
2070 r.intersect (impossible_ranges);
2074 bool
2075 operator_bitwise_and::fold_range (irange &r, tree type,
2076 const irange &lh,
2077 const irange &rh) const
2079 if (range_operator::fold_range (r, type, lh, rh))
2081 // FIXME: This is temporarily disabled because, though it
2082 // generates better ranges, it's noticeably slower for evrp.
2083 // remove_impossible_ranges (r, rh);
2084 return true;
2086 return false;
2090 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2091 // possible. Basically, see if we can optimize:
2093 // [LB, UB] op Z
2094 // into:
2095 // [LB op Z, UB op Z]
2097 // If the optimization was successful, accumulate the range in R and
2098 // return TRUE.
2100 static bool
2101 wi_optimize_and_or (irange &r,
2102 enum tree_code code,
2103 tree type,
2104 const wide_int &lh_lb, const wide_int &lh_ub,
2105 const wide_int &rh_lb, const wide_int &rh_ub)
2107 // Calculate the singleton mask among the ranges, if any.
2108 wide_int lower_bound, upper_bound, mask;
2109 if (wi::eq_p (rh_lb, rh_ub))
2111 mask = rh_lb;
2112 lower_bound = lh_lb;
2113 upper_bound = lh_ub;
2115 else if (wi::eq_p (lh_lb, lh_ub))
2117 mask = lh_lb;
2118 lower_bound = rh_lb;
2119 upper_bound = rh_ub;
2121 else
2122 return false;
2124 // If Z is a constant which (for op | its bitwise not) has n
2125 // consecutive least significant bits cleared followed by m 1
2126 // consecutive bits set immediately above it and either
2127 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2129 // The least significant n bits of all the values in the range are
2130 // cleared or set, the m bits above it are preserved and any bits
2131 // above these are required to be the same for all values in the
2132 // range.
2133 wide_int w = mask;
2134 int m = 0, n = 0;
2135 if (code == BIT_IOR_EXPR)
2136 w = ~w;
2137 if (wi::eq_p (w, 0))
2138 n = w.get_precision ();
2139 else
2141 n = wi::ctz (w);
2142 w = ~(w | wi::mask (n, false, w.get_precision ()));
2143 if (wi::eq_p (w, 0))
2144 m = w.get_precision () - n;
2145 else
2146 m = wi::ctz (w) - n;
2148 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
2149 if ((new_mask & lower_bound) != (new_mask & upper_bound))
2150 return false;
2152 wide_int res_lb, res_ub;
2153 if (code == BIT_AND_EXPR)
2155 res_lb = wi::bit_and (lower_bound, mask);
2156 res_ub = wi::bit_and (upper_bound, mask);
2158 else if (code == BIT_IOR_EXPR)
2160 res_lb = wi::bit_or (lower_bound, mask);
2161 res_ub = wi::bit_or (upper_bound, mask);
2163 else
2164 gcc_unreachable ();
2165 value_range_with_overflow (r, type, res_lb, res_ub);
2166 return true;
2169 // For range [LB, UB] compute two wide_int bit masks.
2171 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
2172 // for all numbers in the range the bit is 0, otherwise it might be 0
2173 // or 1.
2175 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
2176 // for all numbers in the range the bit is 1, otherwise it might be 0
2177 // or 1.
2179 void
2180 wi_set_zero_nonzero_bits (tree type,
2181 const wide_int &lb, const wide_int &ub,
2182 wide_int &maybe_nonzero,
2183 wide_int &mustbe_nonzero)
2185 signop sign = TYPE_SIGN (type);
2187 if (wi::eq_p (lb, ub))
2188 maybe_nonzero = mustbe_nonzero = lb;
2189 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
2191 wide_int xor_mask = lb ^ ub;
2192 maybe_nonzero = lb | ub;
2193 mustbe_nonzero = lb & ub;
2194 if (xor_mask != 0)
2196 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
2197 maybe_nonzero.get_precision ());
2198 maybe_nonzero = maybe_nonzero | mask;
2199 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
2202 else
2204 maybe_nonzero = wi::minus_one (lb.get_precision ());
2205 mustbe_nonzero = wi::zero (lb.get_precision ());
2209 void
2210 operator_bitwise_and::wi_fold (irange &r, tree type,
2211 const wide_int &lh_lb,
2212 const wide_int &lh_ub,
2213 const wide_int &rh_lb,
2214 const wide_int &rh_ub) const
2216 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
2217 return;
2219 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
2220 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
2221 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
2222 maybe_nonzero_lh, mustbe_nonzero_lh);
2223 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
2224 maybe_nonzero_rh, mustbe_nonzero_rh);
2226 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
2227 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
2228 signop sign = TYPE_SIGN (type);
2229 unsigned prec = TYPE_PRECISION (type);
2230 // If both input ranges contain only negative values, we can
2231 // truncate the result range maximum to the minimum of the
2232 // input range maxima.
2233 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
2235 new_ub = wi::min (new_ub, lh_ub, sign);
2236 new_ub = wi::min (new_ub, rh_ub, sign);
2238 // If either input range contains only non-negative values
2239 // we can truncate the result range maximum to the respective
2240 // maximum of the input range.
2241 if (wi::ge_p (lh_lb, 0, sign))
2242 new_ub = wi::min (new_ub, lh_ub, sign);
2243 if (wi::ge_p (rh_lb, 0, sign))
2244 new_ub = wi::min (new_ub, rh_ub, sign);
2245 // PR68217: In case of signed & sign-bit-CST should
2246 // result in [-INF, 0] instead of [-INF, INF].
2247 if (wi::gt_p (new_lb, new_ub, sign))
2249 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
2250 if (sign == SIGNED
2251 && ((wi::eq_p (lh_lb, lh_ub)
2252 && !wi::cmps (lh_lb, sign_bit))
2253 || (wi::eq_p (rh_lb, rh_ub)
2254 && !wi::cmps (rh_lb, sign_bit))))
2256 new_lb = wi::min_value (prec, sign);
2257 new_ub = wi::zero (prec);
2260 // If the limits got swapped around, return varying.
2261 if (wi::gt_p (new_lb, new_ub,sign))
2262 r.set_varying (type);
2263 else
2264 value_range_with_overflow (r, type, new_lb, new_ub);
2267 static void
2268 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
2270 if (!lhs.contains_p (build_zero_cst (type)))
2271 r = range_nonzero (type);
2272 else
2273 r.set_varying (type);
2276 // This was shamelessly stolen from register_edge_assert_for_2 and
2277 // adjusted to work with iranges.
2279 void
2280 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
2281 const irange &lhs,
2282 const irange &op2) const
2284 if (!op2.singleton_p ())
2286 set_nonzero_range_from_mask (r, type, lhs);
2287 return;
2289 unsigned int nprec = TYPE_PRECISION (type);
2290 wide_int cst2v = op2.lower_bound ();
2291 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
2292 wide_int sgnbit;
2293 if (cst2n)
2294 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2295 else
2296 sgnbit = wi::zero (nprec);
2298 // Solve [lhs.lower_bound (), +INF] = x & MASK.
2300 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
2301 // maximum unsigned value is ~0. For signed comparison, if CST2
2302 // doesn't have the most significant bit set, handle it similarly. If
2303 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
2304 wide_int valv = lhs.lower_bound ();
2305 wide_int minv = valv & cst2v, maxv;
2306 bool we_know_nothing = false;
2307 if (minv != valv)
2309 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
2310 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2311 if (minv == valv)
2313 // If we can't determine anything on this bound, fall
2314 // through and conservatively solve for the other end point.
2315 we_know_nothing = true;
2318 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2319 if (we_know_nothing)
2320 r.set_varying (type);
2321 else
2322 r = int_range<1> (type, minv, maxv);
2324 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
2326 // Minimum unsigned value for <= is 0 and maximum unsigned value is
2327 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
2328 // VAL2 where
2329 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2330 // as maximum.
2331 // For signed comparison, if CST2 doesn't have most significant bit
2332 // set, handle it similarly. If CST2 has MSB set, the maximum is
2333 // the same and minimum is INT_MIN.
2334 valv = lhs.upper_bound ();
2335 minv = valv & cst2v;
2336 if (minv == valv)
2337 maxv = valv;
2338 else
2340 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2341 if (maxv == valv)
2343 // If we couldn't determine anything on either bound, return
2344 // undefined.
2345 if (we_know_nothing)
2346 r.set_undefined ();
2347 return;
2349 maxv -= 1;
2351 maxv |= ~cst2v;
2352 minv = sgnbit;
2353 int_range<1> upper_bits (type, minv, maxv);
2354 r.intersect (upper_bits);
2357 bool
2358 operator_bitwise_and::op1_range (irange &r, tree type,
2359 const irange &lhs,
2360 const irange &op2) const
2362 if (types_compatible_p (type, boolean_type_node))
2363 return op_logical_and.op1_range (r, type, lhs, op2);
2365 r.set_undefined ();
2366 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
2368 int_range_max chunk (lhs.type (),
2369 lhs.lower_bound (i),
2370 lhs.upper_bound (i));
2371 int_range_max res;
2372 simple_op1_range_solver (res, type, chunk, op2);
2373 r.union_ (res);
2375 if (r.undefined_p ())
2376 set_nonzero_range_from_mask (r, type, lhs);
2377 return true;
2380 bool
2381 operator_bitwise_and::op2_range (irange &r, tree type,
2382 const irange &lhs,
2383 const irange &op1) const
2385 return operator_bitwise_and::op1_range (r, type, lhs, op1);
2389 class operator_logical_or : public range_operator
2391 public:
2392 virtual bool fold_range (irange &r, tree type,
2393 const irange &lh,
2394 const irange &rh) const;
2395 virtual bool op1_range (irange &r, tree type,
2396 const irange &lhs,
2397 const irange &op2) const;
2398 virtual bool op2_range (irange &r, tree type,
2399 const irange &lhs,
2400 const irange &op1) const;
2401 } op_logical_or;
2403 bool
2404 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2405 const irange &lh,
2406 const irange &rh) const
2408 if (empty_range_varying (r, type, lh, rh))
2409 return true;
2411 r = lh;
2412 r.union_ (rh);
2413 return true;
2416 bool
2417 operator_logical_or::op1_range (irange &r, tree type,
2418 const irange &lhs,
2419 const irange &op2 ATTRIBUTE_UNUSED) const
2421 switch (get_bool_state (r, lhs, type))
2423 case BRS_FALSE:
2424 // A false result means both sides of the OR must be false.
2425 r = range_false (type);
2426 break;
2427 default:
2428 // Any other result means only one side has to be true, the
2429 // other side can be anything. so we can't be sure of any result
2430 // here.
2431 r = range_true_and_false (type);
2432 break;
2434 return true;
2437 bool
2438 operator_logical_or::op2_range (irange &r, tree type,
2439 const irange &lhs,
2440 const irange &op1) const
2442 return operator_logical_or::op1_range (r, type, lhs, op1);
2446 class operator_bitwise_or : public range_operator
2448 public:
2449 virtual bool op1_range (irange &r, tree type,
2450 const irange &lhs,
2451 const irange &op2) const;
2452 virtual bool op2_range (irange &r, tree type,
2453 const irange &lhs,
2454 const irange &op1) const;
2455 virtual void wi_fold (irange &r, tree type,
2456 const wide_int &lh_lb,
2457 const wide_int &lh_ub,
2458 const wide_int &rh_lb,
2459 const wide_int &rh_ub) const;
2460 } op_bitwise_or;
2462 void
2463 operator_bitwise_or::wi_fold (irange &r, tree type,
2464 const wide_int &lh_lb,
2465 const wide_int &lh_ub,
2466 const wide_int &rh_lb,
2467 const wide_int &rh_ub) const
2469 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
2470 return;
2472 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
2473 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
2474 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
2475 maybe_nonzero_lh, mustbe_nonzero_lh);
2476 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
2477 maybe_nonzero_rh, mustbe_nonzero_rh);
2478 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
2479 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
2480 signop sign = TYPE_SIGN (type);
2481 // If the input ranges contain only positive values we can
2482 // truncate the minimum of the result range to the maximum
2483 // of the input range minima.
2484 if (wi::ge_p (lh_lb, 0, sign)
2485 && wi::ge_p (rh_lb, 0, sign))
2487 new_lb = wi::max (new_lb, lh_lb, sign);
2488 new_lb = wi::max (new_lb, rh_lb, sign);
2490 // If either input range contains only negative values
2491 // we can truncate the minimum of the result range to the
2492 // respective minimum range.
2493 if (wi::lt_p (lh_ub, 0, sign))
2494 new_lb = wi::max (new_lb, lh_lb, sign);
2495 if (wi::lt_p (rh_ub, 0, sign))
2496 new_lb = wi::max (new_lb, rh_lb, sign);
2497 // If the limits got swapped around, return varying.
2498 if (wi::gt_p (new_lb, new_ub,sign))
2499 r.set_varying (type);
2500 else
2501 value_range_with_overflow (r, type, new_lb, new_ub);
2504 bool
2505 operator_bitwise_or::op1_range (irange &r, tree type,
2506 const irange &lhs,
2507 const irange &op2) const
2509 // If this is really a logical wi_fold, call that.
2510 if (types_compatible_p (type, boolean_type_node))
2511 return op_logical_or.op1_range (r, type, lhs, op2);
2513 if (lhs.zero_p ())
2515 tree zero = build_zero_cst (type);
2516 r = int_range<1> (zero, zero);
2517 return true;
2519 r.set_varying (type);
2520 return true;
2523 bool
2524 operator_bitwise_or::op2_range (irange &r, tree type,
2525 const irange &lhs,
2526 const irange &op1) const
2528 return operator_bitwise_or::op1_range (r, type, lhs, op1);
2532 class operator_bitwise_xor : public range_operator
2534 public:
2535 virtual void wi_fold (irange &r, tree type,
2536 const wide_int &lh_lb,
2537 const wide_int &lh_ub,
2538 const wide_int &rh_lb,
2539 const wide_int &rh_ub) const;
2540 virtual bool op1_range (irange &r, tree type,
2541 const irange &lhs,
2542 const irange &op2) const;
2543 virtual bool op2_range (irange &r, tree type,
2544 const irange &lhs,
2545 const irange &op1) const;
2546 } op_bitwise_xor;
2548 void
2549 operator_bitwise_xor::wi_fold (irange &r, tree type,
2550 const wide_int &lh_lb,
2551 const wide_int &lh_ub,
2552 const wide_int &rh_lb,
2553 const wide_int &rh_ub) const
2555 signop sign = TYPE_SIGN (type);
2556 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
2557 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
2558 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
2559 maybe_nonzero_lh, mustbe_nonzero_lh);
2560 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
2561 maybe_nonzero_rh, mustbe_nonzero_rh);
2563 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
2564 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
2565 wide_int result_one_bits
2566 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
2567 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
2568 wide_int new_ub = ~result_zero_bits;
2569 wide_int new_lb = result_one_bits;
2571 // If the range has all positive or all negative values, the result
2572 // is better than VARYING.
2573 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
2574 value_range_with_overflow (r, type, new_lb, new_ub);
2575 else
2576 r.set_varying (type);
2579 bool
2580 operator_bitwise_xor::op1_range (irange &r, tree type,
2581 const irange &lhs,
2582 const irange &op2) const
2584 if (lhs.undefined_p () || lhs.varying_p ())
2586 r = lhs;
2587 return true;
2589 if (types_compatible_p (type, boolean_type_node))
2591 switch (get_bool_state (r, lhs, type))
2593 case BRS_TRUE:
2594 if (op2.varying_p ())
2595 r.set_varying (type);
2596 else if (op2.zero_p ())
2597 r = range_true (type);
2598 else
2599 r = range_false (type);
2600 break;
2601 case BRS_FALSE:
2602 r = op2;
2603 break;
2604 default:
2605 gcc_unreachable ();
2607 return true;
2609 r.set_varying (type);
2610 return true;
2613 bool
2614 operator_bitwise_xor::op2_range (irange &r, tree type,
2615 const irange &lhs,
2616 const irange &op1) const
2618 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
2621 class operator_trunc_mod : public range_operator
2623 public:
2624 virtual void wi_fold (irange &r, tree type,
2625 const wide_int &lh_lb,
2626 const wide_int &lh_ub,
2627 const wide_int &rh_lb,
2628 const wide_int &rh_ub) const;
2629 } op_trunc_mod;
2631 void
2632 operator_trunc_mod::wi_fold (irange &r, tree type,
2633 const wide_int &lh_lb,
2634 const wide_int &lh_ub,
2635 const wide_int &rh_lb,
2636 const wide_int &rh_ub) const
2638 wide_int new_lb, new_ub, tmp;
2639 signop sign = TYPE_SIGN (type);
2640 unsigned prec = TYPE_PRECISION (type);
2642 // Mod 0 is undefined.
2643 if (wi_zero_p (type, rh_lb, rh_ub))
2645 r.set_varying (type);
2646 return;
2649 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
2650 new_ub = rh_ub - 1;
2651 if (sign == SIGNED)
2653 tmp = -1 - rh_lb;
2654 new_ub = wi::smax (new_ub, tmp);
2657 if (sign == UNSIGNED)
2658 new_lb = wi::zero (prec);
2659 else
2661 new_lb = -new_ub;
2662 tmp = lh_lb;
2663 if (wi::gts_p (tmp, 0))
2664 tmp = wi::zero (prec);
2665 new_lb = wi::smax (new_lb, tmp);
2667 tmp = lh_ub;
2668 if (sign == SIGNED && wi::neg_p (tmp))
2669 tmp = wi::zero (prec);
2670 new_ub = wi::min (new_ub, tmp, sign);
2672 value_range_with_overflow (r, type, new_lb, new_ub);
2676 class operator_logical_not : public range_operator
2678 public:
2679 virtual bool fold_range (irange &r, tree type,
2680 const irange &lh,
2681 const irange &rh) const;
2682 virtual bool op1_range (irange &r, tree type,
2683 const irange &lhs,
2684 const irange &op2) const;
2685 } op_logical_not;
2687 // Folding a logical NOT, oddly enough, involves doing nothing on the
2688 // forward pass through. During the initial walk backwards, the
2689 // logical NOT reversed the desired outcome on the way back, so on the
2690 // way forward all we do is pass the range forward.
2692 // b_2 = x_1 < 20
2693 // b_3 = !b_2
2694 // if (b_3)
2695 // to determine the TRUE branch, walking backward
2696 // if (b_3) if ([1,1])
2697 // b_3 = !b_2 [1,1] = ![0,0]
2698 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
2699 // which is the result we are looking for.. so.. pass it through.
2701 bool
2702 operator_logical_not::fold_range (irange &r, tree type,
2703 const irange &lh,
2704 const irange &rh ATTRIBUTE_UNUSED) const
2706 if (empty_range_varying (r, type, lh, rh))
2707 return true;
2709 if (lh.varying_p () || lh.undefined_p ())
2710 r = lh;
2711 else
2713 r = lh;
2714 r.invert ();
2716 gcc_checking_assert (lh.type() == type);
2717 return true;
2720 bool
2721 operator_logical_not::op1_range (irange &r,
2722 tree type ATTRIBUTE_UNUSED,
2723 const irange &lhs,
2724 const irange &op2 ATTRIBUTE_UNUSED) const
2726 r = lhs;
2727 if (!lhs.varying_p () && !lhs.undefined_p ())
2728 r.invert ();
2729 return true;
2733 class operator_bitwise_not : public range_operator
2735 public:
2736 virtual bool fold_range (irange &r, tree type,
2737 const irange &lh,
2738 const irange &rh) const;
2739 virtual bool op1_range (irange &r, tree type,
2740 const irange &lhs,
2741 const irange &op2) const;
2742 } op_bitwise_not;
2744 bool
2745 operator_bitwise_not::fold_range (irange &r, tree type,
2746 const irange &lh,
2747 const irange &rh) const
2749 if (empty_range_varying (r, type, lh, rh))
2750 return true;
2752 // ~X is simply -1 - X.
2753 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
2754 wi::minus_one (TYPE_PRECISION (type)));
2755 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, minusone,
2756 lh);
2759 bool
2760 operator_bitwise_not::op1_range (irange &r, tree type,
2761 const irange &lhs,
2762 const irange &op2) const
2764 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
2765 return fold_range (r, type, lhs, op2);
2769 class operator_cst : public range_operator
2771 public:
2772 virtual bool fold_range (irange &r, tree type,
2773 const irange &op1,
2774 const irange &op2) const;
2775 } op_integer_cst;
2777 bool
2778 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2779 const irange &lh,
2780 const irange &rh ATTRIBUTE_UNUSED) const
2782 r = lh;
2783 return true;
2787 class operator_identity : public range_operator
2789 public:
2790 virtual bool fold_range (irange &r, tree type,
2791 const irange &op1,
2792 const irange &op2) const;
2793 virtual bool op1_range (irange &r, tree type,
2794 const irange &lhs,
2795 const irange &op2) const;
2796 } op_identity;
2798 bool
2799 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2800 const irange &lh,
2801 const irange &rh ATTRIBUTE_UNUSED) const
2803 r = lh;
2804 return true;
2807 bool
2808 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
2809 const irange &lhs,
2810 const irange &op2 ATTRIBUTE_UNUSED) const
2812 r = lhs;
2813 return true;
2817 class operator_unknown : public range_operator
2819 public:
2820 virtual bool fold_range (irange &r, tree type,
2821 const irange &op1,
2822 const irange &op2) const;
2823 } op_unknown;
2825 bool
2826 operator_unknown::fold_range (irange &r, tree type,
2827 const irange &lh ATTRIBUTE_UNUSED,
2828 const irange &rh ATTRIBUTE_UNUSED) const
2830 r.set_varying (type);
2831 return true;
2835 class operator_abs : public range_operator
2837 public:
2838 virtual void wi_fold (irange &r, tree type,
2839 const wide_int &lh_lb,
2840 const wide_int &lh_ub,
2841 const wide_int &rh_lb,
2842 const wide_int &rh_ub) const;
2843 virtual bool op1_range (irange &r, tree type,
2844 const irange &lhs,
2845 const irange &op2) const;
2846 } op_abs;
2848 void
2849 operator_abs::wi_fold (irange &r, tree type,
2850 const wide_int &lh_lb, const wide_int &lh_ub,
2851 const wide_int &rh_lb ATTRIBUTE_UNUSED,
2852 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
2854 wide_int min, max;
2855 signop sign = TYPE_SIGN (type);
2856 unsigned prec = TYPE_PRECISION (type);
2858 // Pass through LH for the easy cases.
2859 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
2861 r = int_range<1> (type, lh_lb, lh_ub);
2862 return;
2865 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
2866 // a useful range.
2867 wide_int min_value = wi::min_value (prec, sign);
2868 wide_int max_value = wi::max_value (prec, sign);
2869 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
2871 r.set_varying (type);
2872 return;
2875 // ABS_EXPR may flip the range around, if the original range
2876 // included negative values.
2877 if (wi::eq_p (lh_lb, min_value))
2879 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
2880 // returned [-MIN,-MIN] so this preserves that behaviour. PR37078
2881 if (wi::eq_p (lh_ub, min_value))
2883 r = int_range<1> (type, min_value, min_value);
2884 return;
2886 min = max_value;
2888 else
2889 min = wi::abs (lh_lb);
2891 if (wi::eq_p (lh_ub, min_value))
2892 max = max_value;
2893 else
2894 max = wi::abs (lh_ub);
2896 // If the range contains zero then we know that the minimum value in the
2897 // range will be zero.
2898 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
2900 if (wi::gt_p (min, max, sign))
2901 max = min;
2902 min = wi::zero (prec);
2904 else
2906 // If the range was reversed, swap MIN and MAX.
2907 if (wi::gt_p (min, max, sign))
2908 std::swap (min, max);
2911 // If the new range has its limits swapped around (MIN > MAX), then
2912 // the operation caused one of them to wrap around. The only thing
2913 // we know is that the result is positive.
2914 if (wi::gt_p (min, max, sign))
2916 min = wi::zero (prec);
2917 max = max_value;
2919 r = int_range<1> (type, min, max);
2922 bool
2923 operator_abs::op1_range (irange &r, tree type,
2924 const irange &lhs,
2925 const irange &op2) const
2927 if (empty_range_varying (r, type, lhs, op2))
2928 return true;
2929 if (TYPE_UNSIGNED (type))
2931 r = lhs;
2932 return true;
2934 // Start with the positives because negatives are an impossible result.
2935 int_range_max positives = range_positives (type);
2936 positives.intersect (lhs);
2937 r = positives;
2938 // Then add the negative of each pair:
2939 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
2940 for (unsigned i = 0; i < positives.num_pairs (); ++i)
2941 r.union_ (int_range<1> (type,
2942 -positives.upper_bound (i),
2943 -positives.lower_bound (i)));
2944 return true;
2948 class operator_absu : public range_operator
2950 public:
2951 virtual void wi_fold (irange &r, tree type,
2952 const wide_int &lh_lb, const wide_int &lh_ub,
2953 const wide_int &rh_lb, const wide_int &rh_ub) const;
2954 } op_absu;
2956 void
2957 operator_absu::wi_fold (irange &r, tree type,
2958 const wide_int &lh_lb, const wide_int &lh_ub,
2959 const wide_int &rh_lb ATTRIBUTE_UNUSED,
2960 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
2962 wide_int new_lb, new_ub;
2964 // Pass through VR0 the easy cases.
2965 if (wi::ges_p (lh_lb, 0))
2967 new_lb = lh_lb;
2968 new_ub = lh_ub;
2970 else
2972 new_lb = wi::abs (lh_lb);
2973 new_ub = wi::abs (lh_ub);
2975 // If the range contains zero then we know that the minimum
2976 // value in the range will be zero.
2977 if (wi::ges_p (lh_ub, 0))
2979 if (wi::gtu_p (new_lb, new_ub))
2980 new_ub = new_lb;
2981 new_lb = wi::zero (TYPE_PRECISION (type));
2983 else
2984 std::swap (new_lb, new_ub);
2987 gcc_checking_assert (TYPE_UNSIGNED (type));
2988 r = int_range<1> (type, new_lb, new_ub);
2992 class operator_negate : public range_operator
2994 public:
2995 virtual bool fold_range (irange &r, tree type,
2996 const irange &op1,
2997 const irange &op2) const;
2998 virtual bool op1_range (irange &r, tree type,
2999 const irange &lhs,
3000 const irange &op2) const;
3001 } op_negate;
3003 bool
3004 operator_negate::fold_range (irange &r, tree type,
3005 const irange &lh,
3006 const irange &rh) const
3008 if (empty_range_varying (r, type, lh, rh))
3009 return true;
3010 // -X is simply 0 - X.
3011 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type,
3012 range_zero (type),
3013 lh);
3016 bool
3017 operator_negate::op1_range (irange &r, tree type,
3018 const irange &lhs,
3019 const irange &op2) const
3021 // NEGATE is involutory.
3022 return fold_range (r, type, lhs, op2);
3026 class operator_addr_expr : public range_operator
3028 public:
3029 virtual bool fold_range (irange &r, tree type,
3030 const irange &op1,
3031 const irange &op2) const;
3032 virtual bool op1_range (irange &r, tree type,
3033 const irange &lhs,
3034 const irange &op2) const;
3035 } op_addr;
3037 bool
3038 operator_addr_expr::fold_range (irange &r, tree type,
3039 const irange &lh,
3040 const irange &rh) const
3042 if (empty_range_varying (r, type, lh, rh))
3043 return true;
3045 // Return a non-null pointer of the LHS type (passed in op2).
3046 if (lh.zero_p ())
3047 r = range_zero (type);
3048 else if (!lh.contains_p (build_zero_cst (lh.type ())))
3049 r = range_nonzero (type);
3050 else
3051 r.set_varying (type);
3052 return true;
3055 bool
3056 operator_addr_expr::op1_range (irange &r, tree type,
3057 const irange &lhs,
3058 const irange &op2) const
3060 return operator_addr_expr::fold_range (r, type, lhs, op2);
3064 class pointer_plus_operator : public range_operator
3066 public:
3067 virtual void wi_fold (irange &r, tree type,
3068 const wide_int &lh_lb,
3069 const wide_int &lh_ub,
3070 const wide_int &rh_lb,
3071 const wide_int &rh_ub) const;
3072 } op_pointer_plus;
3074 void
3075 pointer_plus_operator::wi_fold (irange &r, tree type,
3076 const wide_int &lh_lb,
3077 const wide_int &lh_ub,
3078 const wide_int &rh_lb,
3079 const wide_int &rh_ub) const
3081 // Check for [0,0] + const, and simply return the const.
3082 if (lh_lb == 0 && lh_ub == 0 && rh_lb == rh_ub)
3084 tree val = wide_int_to_tree (type, rh_lb);
3085 r.set (val, val);
3086 return;
3089 // For pointer types, we are really only interested in asserting
3090 // whether the expression evaluates to non-NULL.
3092 // With -fno-delete-null-pointer-checks we need to be more
3093 // conservative. As some object might reside at address 0,
3094 // then some offset could be added to it and the same offset
3095 // subtracted again and the result would be NULL.
3096 // E.g.
3097 // static int a[12]; where &a[0] is NULL and
3098 // ptr = &a[6];
3099 // ptr -= 6;
3100 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
3101 // where the first range doesn't include zero and the second one
3102 // doesn't either. As the second operand is sizetype (unsigned),
3103 // consider all ranges where the MSB could be set as possible
3104 // subtractions where the result might be NULL.
3105 if ((!wi_includes_zero_p (type, lh_lb, lh_ub)
3106 || !wi_includes_zero_p (type, rh_lb, rh_ub))
3107 && !TYPE_OVERFLOW_WRAPS (type)
3108 && (flag_delete_null_pointer_checks
3109 || !wi::sign_mask (rh_ub)))
3110 r = range_nonzero (type);
3111 else if (lh_lb == lh_ub && lh_lb == 0
3112 && rh_lb == rh_ub && rh_lb == 0)
3113 r = range_zero (type);
3114 else
3115 r.set_varying (type);
3119 class pointer_min_max_operator : public range_operator
3121 public:
3122 virtual void wi_fold (irange & r, tree type,
3123 const wide_int &lh_lb, const wide_int &lh_ub,
3124 const wide_int &rh_lb, const wide_int &rh_ub) const;
3125 } op_ptr_min_max;
3127 void
3128 pointer_min_max_operator::wi_fold (irange &r, tree type,
3129 const wide_int &lh_lb,
3130 const wide_int &lh_ub,
3131 const wide_int &rh_lb,
3132 const wide_int &rh_ub) const
3134 // For MIN/MAX expressions with pointers, we only care about
3135 // nullness. If both are non null, then the result is nonnull.
3136 // If both are null, then the result is null. Otherwise they
3137 // are varying.
3138 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
3139 && !wi_includes_zero_p (type, rh_lb, rh_ub))
3140 r = range_nonzero (type);
3141 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
3142 r = range_zero (type);
3143 else
3144 r.set_varying (type);
3148 class pointer_and_operator : public range_operator
3150 public:
3151 virtual void wi_fold (irange &r, tree type,
3152 const wide_int &lh_lb, const wide_int &lh_ub,
3153 const wide_int &rh_lb, const wide_int &rh_ub) const;
3154 } op_pointer_and;
3156 void
3157 pointer_and_operator::wi_fold (irange &r, tree type,
3158 const wide_int &lh_lb,
3159 const wide_int &lh_ub,
3160 const wide_int &rh_lb ATTRIBUTE_UNUSED,
3161 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
3163 // For pointer types, we are really only interested in asserting
3164 // whether the expression evaluates to non-NULL.
3165 if (wi_zero_p (type, lh_lb, lh_ub) || wi_zero_p (type, lh_lb, lh_ub))
3166 r = range_zero (type);
3167 else
3168 r.set_varying (type);
3172 class pointer_or_operator : public range_operator
3174 public:
3175 virtual bool op1_range (irange &r, tree type,
3176 const irange &lhs,
3177 const irange &op2) const;
3178 virtual bool op2_range (irange &r, tree type,
3179 const irange &lhs,
3180 const irange &op1) const;
3181 virtual void wi_fold (irange &r, tree type,
3182 const wide_int &lh_lb, const wide_int &lh_ub,
3183 const wide_int &rh_lb, const wide_int &rh_ub) const;
3184 } op_pointer_or;
3186 bool
3187 pointer_or_operator::op1_range (irange &r, tree type,
3188 const irange &lhs,
3189 const irange &op2 ATTRIBUTE_UNUSED) const
3191 if (lhs.zero_p ())
3193 tree zero = build_zero_cst (type);
3194 r = int_range<1> (zero, zero);
3195 return true;
3197 r.set_varying (type);
3198 return true;
3201 bool
3202 pointer_or_operator::op2_range (irange &r, tree type,
3203 const irange &lhs,
3204 const irange &op1) const
3206 return pointer_or_operator::op1_range (r, type, lhs, op1);
3209 void
3210 pointer_or_operator::wi_fold (irange &r, tree type,
3211 const wide_int &lh_lb,
3212 const wide_int &lh_ub,
3213 const wide_int &rh_lb,
3214 const wide_int &rh_ub) const
3216 // For pointer types, we are really only interested in asserting
3217 // whether the expression evaluates to non-NULL.
3218 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
3219 && !wi_includes_zero_p (type, rh_lb, rh_ub))
3220 r = range_nonzero (type);
3221 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
3222 r = range_zero (type);
3223 else
3224 r.set_varying (type);
3227 // This implements the range operator tables as local objects in this file.
3229 class range_op_table
3231 public:
3232 inline range_operator *operator[] (enum tree_code code);
3233 protected:
3234 void set (enum tree_code code, range_operator &op);
3235 private:
3236 range_operator *m_range_tree[MAX_TREE_CODES];
3239 // Return a pointer to the range_operator instance, if there is one
3240 // associated with tree_code CODE.
3242 range_operator *
3243 range_op_table::operator[] (enum tree_code code)
3245 gcc_checking_assert (code > 0 && code < MAX_TREE_CODES);
3246 return m_range_tree[code];
3249 // Add OP to the handler table for CODE.
3251 void
3252 range_op_table::set (enum tree_code code, range_operator &op)
3254 gcc_checking_assert (m_range_tree[code] == NULL);
3255 m_range_tree[code] = &op;
3258 // Instantiate a range op table for integral operations.
3260 class integral_table : public range_op_table
3262 public:
3263 integral_table ();
3264 } integral_tree_table;
3266 integral_table::integral_table ()
3268 set (EQ_EXPR, op_equal);
3269 set (NE_EXPR, op_not_equal);
3270 set (LT_EXPR, op_lt);
3271 set (LE_EXPR, op_le);
3272 set (GT_EXPR, op_gt);
3273 set (GE_EXPR, op_ge);
3274 set (PLUS_EXPR, op_plus);
3275 set (MINUS_EXPR, op_minus);
3276 set (MIN_EXPR, op_min);
3277 set (MAX_EXPR, op_max);
3278 set (MULT_EXPR, op_mult);
3279 set (TRUNC_DIV_EXPR, op_trunc_div);
3280 set (FLOOR_DIV_EXPR, op_floor_div);
3281 set (ROUND_DIV_EXPR, op_round_div);
3282 set (CEIL_DIV_EXPR, op_ceil_div);
3283 set (EXACT_DIV_EXPR, op_exact_div);
3284 set (LSHIFT_EXPR, op_lshift);
3285 set (RSHIFT_EXPR, op_rshift);
3286 set (NOP_EXPR, op_convert);
3287 set (CONVERT_EXPR, op_convert);
3288 set (TRUTH_AND_EXPR, op_logical_and);
3289 set (BIT_AND_EXPR, op_bitwise_and);
3290 set (TRUTH_OR_EXPR, op_logical_or);
3291 set (BIT_IOR_EXPR, op_bitwise_or);
3292 set (BIT_XOR_EXPR, op_bitwise_xor);
3293 set (TRUNC_MOD_EXPR, op_trunc_mod);
3294 set (TRUTH_NOT_EXPR, op_logical_not);
3295 set (BIT_NOT_EXPR, op_bitwise_not);
3296 set (INTEGER_CST, op_integer_cst);
3297 set (SSA_NAME, op_identity);
3298 set (PAREN_EXPR, op_identity);
3299 set (OBJ_TYPE_REF, op_identity);
3300 set (IMAGPART_EXPR, op_unknown);
3301 set (POINTER_DIFF_EXPR, op_unknown);
3302 set (ABS_EXPR, op_abs);
3303 set (ABSU_EXPR, op_absu);
3304 set (NEGATE_EXPR, op_negate);
3305 set (ADDR_EXPR, op_addr);
3308 // Instantiate a range op table for pointer operations.
3310 class pointer_table : public range_op_table
3312 public:
3313 pointer_table ();
3314 } pointer_tree_table;
3316 pointer_table::pointer_table ()
3318 set (BIT_AND_EXPR, op_pointer_and);
3319 set (BIT_IOR_EXPR, op_pointer_or);
3320 set (MIN_EXPR, op_ptr_min_max);
3321 set (MAX_EXPR, op_ptr_min_max);
3322 set (POINTER_PLUS_EXPR, op_pointer_plus);
3324 set (EQ_EXPR, op_equal);
3325 set (NE_EXPR, op_not_equal);
3326 set (LT_EXPR, op_lt);
3327 set (LE_EXPR, op_le);
3328 set (GT_EXPR, op_gt);
3329 set (GE_EXPR, op_ge);
3330 set (SSA_NAME, op_identity);
3331 set (ADDR_EXPR, op_addr);
3332 set (NOP_EXPR, op_convert);
3333 set (CONVERT_EXPR, op_convert);
3335 set (BIT_NOT_EXPR, op_bitwise_not);
3336 set (BIT_XOR_EXPR, op_bitwise_xor);
3339 // The tables are hidden and accessed via a simple extern function.
3341 range_operator *
3342 range_op_handler (enum tree_code code, tree type)
3344 // First check if there is apointer specialization.
3345 if (POINTER_TYPE_P (type))
3346 return pointer_tree_table[code];
3347 return integral_tree_table[code];
3350 // Cast the range in R to TYPE.
3352 void
3353 range_cast (irange &r, tree type)
3355 int_range_max tmp = r;
3356 range_operator *op = range_op_handler (CONVERT_EXPR, type);
3357 // Call op_convert, if it fails, the result is varying.
3358 if (!op->fold_range (r, type, tmp, int_range<1> (type)))
3359 r.set_varying (type);
3362 #if CHECKING_P
3363 #include "selftest.h"
3364 #include "stor-layout.h"
3366 namespace selftest
3368 #define INT(N) build_int_cst (integer_type_node, (N))
3369 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
3370 #define INT16(N) build_int_cst (short_integer_type_node, (N))
3371 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
3372 #define INT64(N) build_int_cstu (long_long_integer_type_node, (N))
3373 #define UINT64(N) build_int_cstu (long_long_unsigned_type_node, (N))
3374 #define UINT128(N) build_int_cstu (u128_type, (N))
3375 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
3376 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
3378 static int_range<3>
3379 build_range3 (int a, int b, int c, int d, int e, int f)
3381 int_range<3> i1 (INT (a), INT (b));
3382 int_range<3> i2 (INT (c), INT (d));
3383 int_range<3> i3 (INT (e), INT (f));
3384 i1.union_ (i2);
3385 i1.union_ (i3);
3386 return i1;
3389 static void
3390 range3_tests ()
3392 typedef int_range<3> int_range3;
3393 int_range3 r0, r1, r2;
3394 int_range3 i1, i2, i3;
3396 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
3397 r0 = int_range3 (INT (10), INT (20));
3398 r1 = int_range3 (INT (5), INT (8));
3399 r0.union_ (r1);
3400 r1 = int_range3 (INT (1), INT (3));
3401 r0.union_ (r1);
3402 ASSERT_TRUE (r0 == build_range3 (1, 3, 5, 8, 10, 20));
3404 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
3405 r1 = int_range3 (INT (-5), INT (0));
3406 r0.union_ (r1);
3407 ASSERT_TRUE (r0 == build_range3 (-5, 3, 5, 8, 10, 20));
3409 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
3410 r1 = int_range3 (INT (50), INT (60));
3411 r0 = int_range3 (INT (10), INT (20));
3412 r0.union_ (int_range3 (INT (30), INT (40)));
3413 r0.union_ (r1);
3414 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
3415 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
3416 r1 = int_range3 (INT (70), INT (80));
3417 r0.union_ (r1);
3419 r2 = build_range3 (10, 20, 30, 40, 50, 60);
3420 r2.union_ (int_range3 (INT (70), INT (80)));
3421 ASSERT_TRUE (r0 == r2);
3423 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
3424 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3425 r1 = int_range3 (INT (6), INT (35));
3426 r0.union_ (r1);
3427 r1 = int_range3 (INT (6), INT (40));
3428 r1.union_ (int_range3 (INT (50), INT (60)));
3429 ASSERT_TRUE (r0 == r1);
3431 // [10,20][30,40][50,60] U [6,60] => [6,60].
3432 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3433 r1 = int_range3 (INT (6), INT (60));
3434 r0.union_ (r1);
3435 ASSERT_TRUE (r0 == int_range3 (INT (6), INT (60)));
3437 // [10,20][30,40][50,60] U [6,70] => [6,70].
3438 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3439 r1 = int_range3 (INT (6), INT (70));
3440 r0.union_ (r1);
3441 ASSERT_TRUE (r0 == int_range3 (INT (6), INT (70)));
3443 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
3444 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3445 r1 = int_range3 (INT (35), INT (70));
3446 r0.union_ (r1);
3447 r1 = int_range3 (INT (10), INT (20));
3448 r1.union_ (int_range3 (INT (30), INT (70)));
3449 ASSERT_TRUE (r0 == r1);
3451 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
3452 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3453 r1 = int_range3 (INT (15), INT (35));
3454 r0.union_ (r1);
3455 r1 = int_range3 (INT (10), INT (40));
3456 r1.union_ (int_range3 (INT (50), INT (60)));
3457 ASSERT_TRUE (r0 == r1);
3459 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
3460 r0 = build_range3 (10, 20, 30, 40, 50, 60);
3461 r1 = int_range3 (INT (35), INT (35));
3462 r0.union_ (r1);
3463 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
3466 static void
3467 int_range_max_tests ()
3469 int_range_max big;
3470 unsigned int nrange;
3472 // Build a huge multi-range range.
3473 for (nrange = 0; nrange < 50; ++nrange)
3475 int_range<1> tmp (INT (nrange*10), INT (nrange*10 + 5));
3476 big.union_ (tmp);
3478 ASSERT_TRUE (big.num_pairs () == nrange);
3480 // Verify that we can copy it without loosing precision.
3481 int_range_max copy (big);
3482 ASSERT_TRUE (copy.num_pairs () == nrange);
3484 // Inverting it should produce one more sub-range.
3485 big.invert ();
3486 ASSERT_TRUE (big.num_pairs () == nrange + 1);
3488 int_range<1> tmp (INT (5), INT (37));
3489 big.intersect (tmp);
3490 ASSERT_TRUE (big.num_pairs () == 4);
3492 // Test that [10,10][20,20] does NOT contain 15.
3494 int_range_max i1 (build_int_cst (integer_type_node, 10),
3495 build_int_cst (integer_type_node, 10));
3496 int_range_max i2 (build_int_cst (integer_type_node, 20),
3497 build_int_cst (integer_type_node, 20));
3498 i1.union_ (i2);
3499 ASSERT_FALSE (i1.contains_p (build_int_cst (integer_type_node, 15)));
3504 static void
3505 multi_precision_range_tests ()
3507 // Test truncating copy to int_range<1>.
3508 int_range<3> big = build_range3 (10, 20, 30, 40, 50, 60);
3509 int_range<1> small = big;
3510 ASSERT_TRUE (small == int_range<1> (INT (10), INT (60)));
3512 // Test truncating copy to int_range<2>.
3513 int_range<2> medium = big;
3514 ASSERT_TRUE (!medium.undefined_p ());
3516 // Test that a truncating copy of [MIN,20][22,40][80,MAX]
3517 // ends up as a conservative anti-range of ~[21,21].
3518 big = int_range<3> (vrp_val_min (integer_type_node), INT (20));
3519 big.union_ (int_range<1> (INT (22), INT (40)));
3520 big.union_ (int_range<1> (INT (80), vrp_val_max (integer_type_node)));
3521 small = big;
3522 ASSERT_TRUE (small == int_range<1> (INT (21), INT (21), VR_ANTI_RANGE));
3524 // Copying a legacy symbolic to an int_range should normalize the
3525 // symbolic at copy time.
3527 tree ssa = make_ssa_name (integer_type_node);
3528 value_range legacy_range (ssa, INT (25));
3529 int_range<2> copy = legacy_range;
3530 ASSERT_TRUE (copy == int_range<2> (vrp_val_min (integer_type_node),
3531 INT (25)));
3533 // Test that copying ~[abc_23, abc_23] to a multi-range yields varying.
3534 legacy_range = value_range (ssa, ssa, VR_ANTI_RANGE);
3535 copy = legacy_range;
3536 ASSERT_TRUE (copy.varying_p ());
3539 range3_tests ();
3542 static void
3543 operator_tests ()
3545 tree min = vrp_val_min (integer_type_node);
3546 tree max = vrp_val_max (integer_type_node);
3547 tree tiny = fold_build2 (PLUS_EXPR, integer_type_node, min,
3548 build_one_cst (integer_type_node));
3549 int_range_max res;
3550 int_range_max i1 (tiny, max);
3551 int_range_max i2 (build_int_cst (integer_type_node, 255),
3552 build_int_cst (integer_type_node, 255));
3554 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
3555 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
3556 ASSERT_TRUE (res == int_range<1> (integer_type_node));
3558 // VARYING = OP1 & 255: OP1 is VARYING
3559 i1 = int_range<1> (integer_type_node);
3560 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
3561 ASSERT_TRUE (res == int_range<1> (integer_type_node));
3563 // Test that 0x808.... & 0x8.... still contains 0x8....
3564 // for a large set of numbers.
3566 tree big_type = long_long_unsigned_type_node;
3567 // big_num = 0x808,0000,0000,0000
3568 tree big_num = fold_build2 (LSHIFT_EXPR, big_type,
3569 build_int_cst (big_type, 0x808),
3570 build_int_cst (big_type, 48));
3571 op_bitwise_and.fold_range (res, big_type,
3572 int_range <1> (big_type),
3573 int_range <1> (big_num, big_num));
3574 // val = 0x8,0000,0000,0000
3575 tree val = fold_build2 (LSHIFT_EXPR, big_type,
3576 build_int_cst (big_type, 0x8),
3577 build_int_cst (big_type, 48));
3578 ASSERT_TRUE (res.contains_p (val));
3581 // unsigned: [3, MAX] = OP1 >> 1
3583 int_range_max lhs (build_int_cst (unsigned_type_node, 3),
3584 TYPE_MAX_VALUE (unsigned_type_node));
3585 int_range_max one (build_one_cst (unsigned_type_node),
3586 build_one_cst (unsigned_type_node));
3587 int_range_max op1;
3588 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
3589 ASSERT_FALSE (op1.contains_p (UINT (3)));
3592 // signed: [3, MAX] = OP1 >> 1
3594 int_range_max lhs (INT (3), TYPE_MAX_VALUE (integer_type_node));
3595 int_range_max one (INT (1), INT (1));
3596 int_range_max op1;
3597 op_rshift.op1_range (op1, integer_type_node, lhs, one);
3598 ASSERT_FALSE (op1.contains_p (INT (-2)));
3601 // This is impossible, so OP1 should be [].
3602 // signed: [MIN, MIN] = OP1 >> 1
3604 int_range_max lhs (TYPE_MIN_VALUE (integer_type_node),
3605 TYPE_MIN_VALUE (integer_type_node));
3606 int_range_max one (INT (1), INT (1));
3607 int_range_max op1;
3608 op_rshift.op1_range (op1, integer_type_node, lhs, one);
3609 ASSERT_TRUE (op1.undefined_p ());
3612 // signed: ~[-1] = OP1 >> 31
3613 if (TYPE_PRECISION (integer_type_node) > 31)
3615 int_range_max lhs (INT (-1), INT (-1), VR_ANTI_RANGE);
3616 int_range_max shift (INT (31), INT (31));
3617 int_range_max op1;
3618 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
3619 int_range_max negatives = range_negatives (integer_type_node);
3620 negatives.intersect (op1);
3621 ASSERT_TRUE (negatives.undefined_p ());
3624 if (TYPE_PRECISION (unsigned_type_node) > 31)
3626 // unsigned VARYING = op1 << 1 should be VARYING.
3627 int_range<2> lhs (unsigned_type_node);
3628 int_range<2> shift (INT (1), INT (1));
3629 int_range_max op1;
3630 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
3631 ASSERT_TRUE (op1.varying_p ());
3633 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
3634 int_range<2> zero (UINT (0), UINT (0));
3635 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
3636 ASSERT_TRUE (op1.num_pairs () == 2);
3637 // Remove the [0,0] range.
3638 op1.intersect (zero);
3639 ASSERT_TRUE (op1.num_pairs () == 1);
3640 // op1 << 1 should be [0x8000,0x8000] << 1,
3641 // which should result in [0,0].
3642 int_range_max result;
3643 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
3644 ASSERT_TRUE (result == zero);
3646 // signed VARYING = op1 << 1 should be VARYING.
3647 if (TYPE_PRECISION (integer_type_node) > 31)
3649 // unsigned VARYING = op1 << 1 hould be VARYING.
3650 int_range<2> lhs (integer_type_node);
3651 int_range<2> shift (INT (1), INT (1));
3652 int_range_max op1;
3653 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
3654 ASSERT_TRUE (op1.varying_p ());
3656 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
3657 int_range<2> zero (INT (0), INT (0));
3658 op_lshift.op1_range (op1, integer_type_node, zero, shift);
3659 ASSERT_TRUE (op1.num_pairs () == 2);
3660 // Remove the [0,0] range.
3661 op1.intersect (zero);
3662 ASSERT_TRUE (op1.num_pairs () == 1);
3663 // op1 << 1 shuould be [0x8000,0x8000] << 1,
3664 // which should result in [0,0].
3665 int_range_max result;
3666 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
3667 ASSERT_TRUE (result == zero);
3671 // Run all of the selftests within this file.
3673 void
3674 range_tests ()
3676 tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
3677 int_range<1> i1, i2, i3;
3678 int_range<1> r0, r1, rold;
3680 // Test 1-bit signed integer union.
3681 // [-1,-1] U [0,0] = VARYING.
3682 tree one_bit_type = build_nonstandard_integer_type (1, 0);
3683 tree one_bit_min = vrp_val_min (one_bit_type);
3684 tree one_bit_max = vrp_val_max (one_bit_type);
3686 int_range<2> min (one_bit_min, one_bit_min);
3687 int_range<2> max (one_bit_max, one_bit_max);
3688 max.union_ (min);
3689 ASSERT_TRUE (max.varying_p ());
3692 // Test inversion of 1-bit signed integers.
3694 int_range<2> min (one_bit_min, one_bit_min);
3695 int_range<2> max (one_bit_max, one_bit_max);
3696 int_range<2> t;
3697 t = min;
3698 t.invert ();
3699 ASSERT_TRUE (t == max);
3700 t = max;
3701 t.invert ();
3702 ASSERT_TRUE (t == min);
3705 // Test that NOT(255) is [0..254] in 8-bit land.
3706 int_range<1> not_255 (UCHAR (255), UCHAR (255), VR_ANTI_RANGE);
3707 ASSERT_TRUE (not_255 == int_range<1> (UCHAR (0), UCHAR (254)));
3709 // Test that NOT(0) is [1..255] in 8-bit land.
3710 int_range<1> not_zero = range_nonzero (unsigned_char_type_node);
3711 ASSERT_TRUE (not_zero == int_range<1> (UCHAR (1), UCHAR (255)));
3713 // Check that [0,127][0x..ffffff80,0x..ffffff]
3714 // => ~[128, 0x..ffffff7f].
3715 r0 = int_range<1> (UINT128 (0), UINT128 (127));
3716 tree high = build_minus_one_cst (u128_type);
3717 // low = -1 - 127 => 0x..ffffff80.
3718 tree low = fold_build2 (MINUS_EXPR, u128_type, high, UINT128(127));
3719 r1 = int_range<1> (low, high); // [0x..ffffff80, 0x..ffffffff]
3720 // r0 = [0,127][0x..ffffff80,0x..fffffff].
3721 r0.union_ (r1);
3722 // r1 = [128, 0x..ffffff7f].
3723 r1 = int_range<1> (UINT128(128),
3724 fold_build2 (MINUS_EXPR, u128_type,
3725 build_minus_one_cst (u128_type),
3726 UINT128(128)));
3727 r0.invert ();
3728 ASSERT_TRUE (r0 == r1);
3730 r0.set_varying (integer_type_node);
3731 tree minint = wide_int_to_tree (integer_type_node, r0.lower_bound ());
3732 tree maxint = wide_int_to_tree (integer_type_node, r0.upper_bound ());
3734 r0.set_varying (short_integer_type_node);
3735 tree minshort = wide_int_to_tree (short_integer_type_node, r0.lower_bound ());
3736 tree maxshort = wide_int_to_tree (short_integer_type_node, r0.upper_bound ());
3738 r0.set_varying (unsigned_type_node);
3739 tree maxuint = wide_int_to_tree (unsigned_type_node, r0.upper_bound ());
3741 // Check that ~[0,5] => [6,MAX] for unsigned int.
3742 r0 = int_range<1> (UINT (0), UINT (5));
3743 r0.invert ();
3744 ASSERT_TRUE (r0 == int_range<1> (UINT(6), maxuint));
3746 // Check that ~[10,MAX] => [0,9] for unsigned int.
3747 r0 = int_range<1> (UINT(10), maxuint);
3748 r0.invert ();
3749 ASSERT_TRUE (r0 == int_range<1> (UINT (0), UINT (9)));
3751 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
3752 r0 = int_range<1> (UINT128 (0), UINT128 (5), VR_ANTI_RANGE);
3753 r1 = int_range<1> (UINT128(6), build_minus_one_cst (u128_type));
3754 ASSERT_TRUE (r0 == r1);
3756 // Check that [~5] is really [-MIN,4][6,MAX].
3757 r0 = int_range<1> (INT (5), INT (5), VR_ANTI_RANGE);
3758 r1 = int_range<1> (minint, INT (4));
3759 r1.union_ (int_range<1> (INT (6), maxint));
3760 ASSERT_FALSE (r1.undefined_p ());
3761 ASSERT_TRUE (r0 == r1);
3763 r1 = int_range<1> (INT (5), INT (5));
3764 int_range<1> r2 (r1);
3765 ASSERT_TRUE (r1 == r2);
3767 r1 = int_range<1> (INT (5), INT (10));
3769 r1 = int_range<1> (integer_type_node,
3770 wi::to_wide (INT (5)), wi::to_wide (INT (10)));
3771 ASSERT_TRUE (r1.contains_p (INT (7)));
3773 r1 = int_range<1> (SCHAR (0), SCHAR (20));
3774 ASSERT_TRUE (r1.contains_p (SCHAR(15)));
3775 ASSERT_FALSE (r1.contains_p (SCHAR(300)));
3777 // If a range is in any way outside of the range for the converted
3778 // to range, default to the range for the new type.
3779 if (TYPE_PRECISION (TREE_TYPE (maxint))
3780 > TYPE_PRECISION (short_integer_type_node))
3782 r1 = int_range<1> (integer_zero_node, maxint);
3783 range_cast (r1, short_integer_type_node);
3784 ASSERT_TRUE (r1.lower_bound () == wi::to_wide (minshort)
3785 && r1.upper_bound() == wi::to_wide (maxshort));
3788 // (unsigned char)[-5,-1] => [251,255].
3789 r0 = rold = int_range<1> (SCHAR (-5), SCHAR (-1));
3790 range_cast (r0, unsigned_char_type_node);
3791 ASSERT_TRUE (r0 == int_range<1> (UCHAR (251), UCHAR (255)));
3792 range_cast (r0, signed_char_type_node);
3793 ASSERT_TRUE (r0 == rold);
3795 // (signed char)[15, 150] => [-128,-106][15,127].
3796 r0 = rold = int_range<1> (UCHAR (15), UCHAR (150));
3797 range_cast (r0, signed_char_type_node);
3798 r1 = int_range<1> (SCHAR (15), SCHAR (127));
3799 r2 = int_range<1> (SCHAR (-128), SCHAR (-106));
3800 r1.union_ (r2);
3801 ASSERT_TRUE (r1 == r0);
3802 range_cast (r0, unsigned_char_type_node);
3803 ASSERT_TRUE (r0 == rold);
3805 // (unsigned char)[-5, 5] => [0,5][251,255].
3806 r0 = rold = int_range<1> (SCHAR (-5), SCHAR (5));
3807 range_cast (r0, unsigned_char_type_node);
3808 r1 = int_range<1> (UCHAR (251), UCHAR (255));
3809 r2 = int_range<1> (UCHAR (0), UCHAR (5));
3810 r1.union_ (r2);
3811 ASSERT_TRUE (r0 == r1);
3812 range_cast (r0, signed_char_type_node);
3813 ASSERT_TRUE (r0 == rold);
3815 // (unsigned char)[-5,5] => [0,5][251,255].
3816 r0 = int_range<1> (INT (-5), INT (5));
3817 range_cast (r0, unsigned_char_type_node);
3818 r1 = int_range<1> (UCHAR (0), UCHAR (5));
3819 r1.union_ (int_range<1> (UCHAR (251), UCHAR (255)));
3820 ASSERT_TRUE (r0 == r1);
3822 // (unsigned char)[5U,1974U] => [0,255].
3823 r0 = int_range<1> (UINT (5), UINT (1974));
3824 range_cast (r0, unsigned_char_type_node);
3825 ASSERT_TRUE (r0 == int_range<1> (UCHAR (0), UCHAR (255)));
3826 range_cast (r0, integer_type_node);
3827 // Going to a wider range should not sign extend.
3828 ASSERT_TRUE (r0 == int_range<1> (INT (0), INT (255)));
3830 // (unsigned char)[-350,15] => [0,255].
3831 r0 = int_range<1> (INT (-350), INT (15));
3832 range_cast (r0, unsigned_char_type_node);
3833 ASSERT_TRUE (r0 == (int_range<1>
3834 (TYPE_MIN_VALUE (unsigned_char_type_node),
3835 TYPE_MAX_VALUE (unsigned_char_type_node))));
3837 // Casting [-120,20] from signed char to unsigned short.
3838 // => [0, 20][0xff88, 0xffff].
3839 r0 = int_range<1> (SCHAR (-120), SCHAR (20));
3840 range_cast (r0, short_unsigned_type_node);
3841 r1 = int_range<1> (UINT16 (0), UINT16 (20));
3842 r2 = int_range<1> (UINT16 (0xff88), UINT16 (0xffff));
3843 r1.union_ (r2);
3844 ASSERT_TRUE (r0 == r1);
3845 // A truncating cast back to signed char will work because [-120, 20]
3846 // is representable in signed char.
3847 range_cast (r0, signed_char_type_node);
3848 ASSERT_TRUE (r0 == int_range<1> (SCHAR (-120), SCHAR (20)));
3850 // unsigned char -> signed short
3851 // (signed short)[(unsigned char)25, (unsigned char)250]
3852 // => [(signed short)25, (signed short)250]
3853 r0 = rold = int_range<1> (UCHAR (25), UCHAR (250));
3854 range_cast (r0, short_integer_type_node);
3855 r1 = int_range<1> (INT16 (25), INT16 (250));
3856 ASSERT_TRUE (r0 == r1);
3857 range_cast (r0, unsigned_char_type_node);
3858 ASSERT_TRUE (r0 == rold);
3860 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
3861 r0 = int_range<1> (TYPE_MIN_VALUE (long_long_integer_type_node),
3862 TYPE_MAX_VALUE (long_long_integer_type_node));
3863 range_cast (r0, short_unsigned_type_node);
3864 r1 = int_range<1> (TYPE_MIN_VALUE (short_unsigned_type_node),
3865 TYPE_MAX_VALUE (short_unsigned_type_node));
3866 ASSERT_TRUE (r0 == r1);
3868 // NOT([10,20]) ==> [-MIN,9][21,MAX].
3869 r0 = r1 = int_range<1> (INT (10), INT (20));
3870 r2 = int_range<1> (minint, INT(9));
3871 r2.union_ (int_range<1> (INT(21), maxint));
3872 ASSERT_FALSE (r2.undefined_p ());
3873 r1.invert ();
3874 ASSERT_TRUE (r1 == r2);
3875 // Test that NOT(NOT(x)) == x.
3876 r2.invert ();
3877 ASSERT_TRUE (r0 == r2);
3879 // Test that booleans and their inverse work as expected.
3880 r0 = range_zero (boolean_type_node);
3881 ASSERT_TRUE (r0 == int_range<1> (build_zero_cst (boolean_type_node),
3882 build_zero_cst (boolean_type_node)));
3883 r0.invert ();
3884 ASSERT_TRUE (r0 == int_range<1> (build_one_cst (boolean_type_node),
3885 build_one_cst (boolean_type_node)));
3887 // Casting NONZERO to a narrower type will wrap/overflow so
3888 // it's just the entire range for the narrower type.
3890 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
3891 // is outside of the range of a smaller range, return the full
3892 // smaller range.
3893 if (TYPE_PRECISION (integer_type_node)
3894 > TYPE_PRECISION (short_integer_type_node))
3896 r0 = range_nonzero (integer_type_node);
3897 range_cast (r0, short_integer_type_node);
3898 r1 = int_range<1> (TYPE_MIN_VALUE (short_integer_type_node),
3899 TYPE_MAX_VALUE (short_integer_type_node));
3900 ASSERT_TRUE (r0 == r1);
3903 // Casting NONZERO from a narrower signed to a wider signed.
3905 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
3906 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
3907 r0 = range_nonzero (short_integer_type_node);
3908 range_cast (r0, integer_type_node);
3909 r1 = int_range<1> (INT (-32768), INT (-1));
3910 r2 = int_range<1> (INT (1), INT (32767));
3911 r1.union_ (r2);
3912 ASSERT_TRUE (r0 == r1);
3914 // Make sure NULL and non-NULL of pointer types work, and that
3915 // inverses of them are consistent.
3916 tree voidp = build_pointer_type (void_type_node);
3917 r0 = range_zero (voidp);
3918 r1 = r0;
3919 r0.invert ();
3920 r0.invert ();
3921 ASSERT_TRUE (r0 == r1);
3923 // [10,20] U [15, 30] => [10, 30].
3924 r0 = int_range<1> (INT (10), INT (20));
3925 r1 = int_range<1> (INT (15), INT (30));
3926 r0.union_ (r1);
3927 ASSERT_TRUE (r0 == int_range<1> (INT (10), INT (30)));
3929 // [15,40] U [] => [15,40].
3930 r0 = int_range<1> (INT (15), INT (40));
3931 r1.set_undefined ();
3932 r0.union_ (r1);
3933 ASSERT_TRUE (r0 == int_range<1> (INT (15), INT (40)));
3935 // [10,20] U [10,10] => [10,20].
3936 r0 = int_range<1> (INT (10), INT (20));
3937 r1 = int_range<1> (INT (10), INT (10));
3938 r0.union_ (r1);
3939 ASSERT_TRUE (r0 == int_range<1> (INT (10), INT (20)));
3941 // [10,20] U [9,9] => [9,20].
3942 r0 = int_range<1> (INT (10), INT (20));
3943 r1 = int_range<1> (INT (9), INT (9));
3944 r0.union_ (r1);
3945 ASSERT_TRUE (r0 == int_range<1> (INT (9), INT (20)));
3947 // [10,20] ^ [15,30] => [15,20].
3948 r0 = int_range<1> (INT (10), INT (20));
3949 r1 = int_range<1> (INT (15), INT (30));
3950 r0.intersect (r1);
3951 ASSERT_TRUE (r0 == int_range<1> (INT (15), INT (20)));
3953 // Test the internal sanity of wide_int's wrt HWIs.
3954 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
3955 TYPE_SIGN (boolean_type_node))
3956 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
3958 // Test zero_p().
3959 r0 = int_range<1> (INT (0), INT (0));
3960 ASSERT_TRUE (r0.zero_p ());
3962 // Test nonzero_p().
3963 r0 = int_range<1> (INT (0), INT (0));
3964 r0.invert ();
3965 ASSERT_TRUE (r0.nonzero_p ());
3967 // test legacy interaction
3968 // r0 = ~[1,1]
3969 r0 = int_range<1> (UINT (1), UINT (1), VR_ANTI_RANGE);
3970 // r1 = ~[3,3]
3971 r1 = int_range<1> (UINT (3), UINT (3), VR_ANTI_RANGE);
3973 // vv = [0,0][2,2][4, MAX]
3974 int_range<3> vv = r0;
3975 vv.intersect (r1);
3977 ASSERT_TRUE (vv.contains_p (UINT (2)));
3978 ASSERT_TRUE (vv.num_pairs () == 3);
3980 // create r0 as legacy [1,1]
3981 r0 = int_range<1> (UINT (1), UINT (1));
3982 // And union it with [0,0][2,2][4,MAX] multi range
3983 r0.union_ (vv);
3984 // The result should be [0,2][4,MAX], or ~[3,3] but it must contain 2
3985 ASSERT_TRUE (r0.contains_p (UINT (2)));
3988 multi_precision_range_tests ();
3989 int_range_max_tests ();
3990 operator_tests ();
3993 } // namespace selftest
3995 #endif // CHECKING_P