RISC-V: Make stack_save_restore tests more robust
[official-gcc.git] / gcc / range-op.cc
blob268f6b6f02508456c7eba0236cf3a4cc0c14dece
1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator = &default_operator;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code)
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator != &default_operator;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
156 range_operator *
157 range_op_handler::range_op () const
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
171 return ((lhs << 8) + (op1 << 4) + (op2));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
187 unsigned
188 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
189 const vrange& op2) const
191 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
192 op2.m_discriminator);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
197 bool
198 range_op_handler::fold_range (vrange &r, tree type,
199 const vrange &lh,
200 const vrange &rh,
201 relation_trio rel) const
203 gcc_checking_assert (m_operator);
204 switch (dispatch_kind (r, lh, rh))
206 case RO_III:
207 return m_operator->fold_range (as_a <irange> (r), type,
208 as_a <irange> (lh),
209 as_a <irange> (rh), rel);
210 case RO_IFI:
211 return m_operator->fold_range (as_a <irange> (r), type,
212 as_a <frange> (lh),
213 as_a <irange> (rh), rel);
214 case RO_IFF:
215 return m_operator->fold_range (as_a <irange> (r), type,
216 as_a <frange> (lh),
217 as_a <frange> (rh), rel);
218 case RO_FFF:
219 return m_operator->fold_range (as_a <frange> (r), type,
220 as_a <frange> (lh),
221 as_a <frange> (rh), rel);
222 case RO_FII:
223 return m_operator->fold_range (as_a <frange> (r), type,
224 as_a <irange> (lh),
225 as_a <irange> (rh), rel);
226 default:
227 return false;
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
233 bool
234 range_op_handler::op1_range (vrange &r, tree type,
235 const vrange &lhs,
236 const vrange &op2,
237 relation_trio rel) const
239 gcc_checking_assert (m_operator);
241 if (lhs.undefined_p ())
242 return false;
243 switch (dispatch_kind (r, lhs, op2))
245 case RO_III:
246 return m_operator->op1_range (as_a <irange> (r), type,
247 as_a <irange> (lhs),
248 as_a <irange> (op2), rel);
249 case RO_FIF:
250 return m_operator->op1_range (as_a <frange> (r), type,
251 as_a <irange> (lhs),
252 as_a <frange> (op2), rel);
253 case RO_FFF:
254 return m_operator->op1_range (as_a <frange> (r), type,
255 as_a <frange> (lhs),
256 as_a <frange> (op2), rel);
257 default:
258 return false;
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
264 bool
265 range_op_handler::op2_range (vrange &r, tree type,
266 const vrange &lhs,
267 const vrange &op1,
268 relation_trio rel) const
270 gcc_checking_assert (m_operator);
271 if (lhs.undefined_p ())
272 return false;
274 switch (dispatch_kind (r, lhs, op1))
276 case RO_III:
277 return m_operator->op2_range (as_a <irange> (r), type,
278 as_a <irange> (lhs),
279 as_a <irange> (op1), rel);
280 case RO_FIF:
281 return m_operator->op2_range (as_a <frange> (r), type,
282 as_a <irange> (lhs),
283 as_a <frange> (op1), rel);
284 case RO_FFF:
285 return m_operator->op2_range (as_a <frange> (r), type,
286 as_a <frange> (lhs),
287 as_a <frange> (op1), rel);
288 default:
289 return false;
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
295 relation_kind
296 range_op_handler::lhs_op1_relation (const vrange &lhs,
297 const vrange &op1,
298 const vrange &op2,
299 relation_kind rel) const
301 gcc_checking_assert (m_operator);
303 switch (dispatch_kind (lhs, op1, op2))
305 case RO_III:
306 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
307 as_a <irange> (op1),
308 as_a <irange> (op2), rel);
309 case RO_IFF:
310 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
311 as_a <frange> (op1),
312 as_a <frange> (op2), rel);
313 case RO_FFF:
314 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
315 as_a <frange> (op1),
316 as_a <frange> (op2), rel);
317 default:
318 return VREL_VARYING;
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
324 relation_kind
325 range_op_handler::lhs_op2_relation (const vrange &lhs,
326 const vrange &op1,
327 const vrange &op2,
328 relation_kind rel) const
330 gcc_checking_assert (m_operator);
331 switch (dispatch_kind (lhs, op1, op2))
333 case RO_III:
334 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
335 as_a <irange> (op1),
336 as_a <irange> (op2), rel);
337 case RO_IFF:
338 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
339 as_a <frange> (op1),
340 as_a <frange> (op2), rel);
341 case RO_FFF:
342 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
343 as_a <frange> (op1),
344 as_a <frange> (op2), rel);
345 default:
346 return VREL_VARYING;
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
352 relation_kind
353 range_op_handler::op1_op2_relation (const vrange &lhs,
354 const vrange &op1,
355 const vrange &op2) const
357 gcc_checking_assert (m_operator);
358 switch (dispatch_kind (lhs, op1, op2))
360 case RO_III:
361 return m_operator->op1_op2_relation (as_a <irange> (lhs),
362 as_a <irange> (op1),
363 as_a <irange> (op2));
365 case RO_IFF:
366 return m_operator->op1_op2_relation (as_a <irange> (lhs),
367 as_a <frange> (op1),
368 as_a <frange> (op2));
370 case RO_FFF:
371 return m_operator->op1_op2_relation (as_a <frange> (lhs),
372 as_a <frange> (op1),
373 as_a <frange> (op2));
375 default:
376 return VREL_VARYING;
381 // Update the known bitmasks in R when applying the operation CODE to
382 // LH and RH.
384 void
385 update_known_bitmask (irange &r, tree_code code,
386 const irange &lh, const irange &rh)
388 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
389 || r.singleton_p ())
390 return;
392 widest_int widest_value, widest_mask;
393 tree type = r.type ();
394 signop sign = TYPE_SIGN (type);
395 int prec = TYPE_PRECISION (type);
396 irange_bitmask lh_bits = lh.get_bitmask ();
397 irange_bitmask rh_bits = rh.get_bitmask ();
399 switch (get_gimple_rhs_class (code))
401 case GIMPLE_UNARY_RHS:
402 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
403 TYPE_SIGN (lh.type ()),
404 TYPE_PRECISION (lh.type ()),
405 widest_int::from (lh_bits.value (), sign),
406 widest_int::from (lh_bits.mask (), sign));
407 break;
408 case GIMPLE_BINARY_RHS:
409 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
410 TYPE_SIGN (lh.type ()),
411 TYPE_PRECISION (lh.type ()),
412 widest_int::from (lh_bits.value (), sign),
413 widest_int::from (lh_bits.mask (), sign),
414 TYPE_SIGN (rh.type ()),
415 TYPE_PRECISION (rh.type ()),
416 widest_int::from (rh_bits.value (), sign),
417 widest_int::from (rh_bits.mask (), sign));
418 break;
419 default:
420 gcc_unreachable ();
423 wide_int mask = wide_int::from (widest_mask, prec, sign);
424 wide_int value = wide_int::from (widest_value, prec, sign);
425 // Bitmasks must have the unknown value bits cleared.
426 value &= ~mask;
427 irange_bitmask bm (value, mask);
428 r.update_bitmask (bm);
431 // Return the upper limit for a type.
433 static inline wide_int
434 max_limit (const_tree type)
436 return irange_val_max (type);
439 // Return the lower limit for a type.
441 static inline wide_int
442 min_limit (const_tree type)
444 return irange_val_min (type);
447 // Return false if shifting by OP is undefined behavior. Otherwise, return
448 // true and the range it is to be shifted by. This allows trimming out of
449 // undefined ranges, leaving only valid ranges if there are any.
451 static inline bool
452 get_shift_range (irange &r, tree type, const irange &op)
454 if (op.undefined_p ())
455 return false;
457 // Build valid range and intersect it with the shift range.
458 r = value_range (op.type (),
459 wi::shwi (0, TYPE_PRECISION (op.type ())),
460 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
461 r.intersect (op);
463 // If there are no valid ranges in the shift range, returned false.
464 if (r.undefined_p ())
465 return false;
466 return true;
469 // Default wide_int fold operation returns [MIN, MAX].
471 void
472 range_operator::wi_fold (irange &r, tree type,
473 const wide_int &lh_lb ATTRIBUTE_UNUSED,
474 const wide_int &lh_ub ATTRIBUTE_UNUSED,
475 const wide_int &rh_lb ATTRIBUTE_UNUSED,
476 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
478 gcc_checking_assert (r.supports_type_p (type));
479 r.set_varying (type);
482 // Call wi_fold when both op1 and op2 are equivalent. Further split small
483 // subranges into constants. This can provide better precision.
484 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
485 // [0,0][2, 2][4,4][6, 6][8, 8]
486 // LIMIT is the maximum number of elements in range allowed before we
487 // do not process them individually.
489 void
490 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
491 const wide_int &lh_lb,
492 const wide_int &lh_ub,
493 unsigned limit) const
495 int_range_max tmp;
496 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
497 widest_int::from (lh_lb, TYPE_SIGN (type)));
498 // if there are 1 to 8 values in the LH range, split them up.
499 r.set_undefined ();
500 if (lh_range >= 0 && lh_range < limit)
502 for (unsigned x = 0; x <= lh_range; x++)
504 wide_int val = lh_lb + x;
505 wi_fold (tmp, type, val, val, val, val);
506 r.union_ (tmp);
509 // Otherwise just call wi_fold.
510 else
511 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
514 // Call wi_fold, except further split small subranges into constants.
515 // This can provide better precision. For something 8 >> [0,1]
516 // Instead of [8, 16], we will produce [8,8][16,16]
518 void
519 range_operator::wi_fold_in_parts (irange &r, tree type,
520 const wide_int &lh_lb,
521 const wide_int &lh_ub,
522 const wide_int &rh_lb,
523 const wide_int &rh_ub) const
525 int_range_max tmp;
526 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
527 widest_int::from (rh_lb, TYPE_SIGN (type)));
528 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
529 widest_int::from (lh_lb, TYPE_SIGN (type)));
530 // If there are 2, 3, or 4 values in the RH range, do them separately.
531 // Call wi_fold_in_parts to check the RH side.
532 if (rh_range > 0 && rh_range < 4)
534 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
535 if (rh_range > 1)
537 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
538 r.union_ (tmp);
539 if (rh_range == 3)
541 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
542 r.union_ (tmp);
545 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
546 r.union_ (tmp);
548 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
549 // The RH side has been checked, so no recursion needed.
550 else if (lh_range > 0 && lh_range < 4)
552 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
553 if (lh_range > 1)
555 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
556 r.union_ (tmp);
557 if (lh_range == 3)
559 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
560 r.union_ (tmp);
563 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
564 r.union_ (tmp);
566 // Otherwise just call wi_fold.
567 else
568 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
571 // The default for fold is to break all ranges into sub-ranges and
572 // invoke the wi_fold method on each sub-range pair.
574 bool
575 range_operator::fold_range (irange &r, tree type,
576 const irange &lh,
577 const irange &rh,
578 relation_trio trio) const
580 gcc_checking_assert (r.supports_type_p (type));
581 if (empty_range_varying (r, type, lh, rh))
582 return true;
584 relation_kind rel = trio.op1_op2 ();
585 unsigned num_lh = lh.num_pairs ();
586 unsigned num_rh = rh.num_pairs ();
588 // If op1 and op2 are equivalences, then we don't need a complete cross
589 // product, just pairs of matching elements.
590 if (relation_equiv_p (rel) && lh == rh)
592 int_range_max tmp;
593 r.set_undefined ();
594 for (unsigned x = 0; x < num_lh; ++x)
596 // If the number of subranges is too high, limit subrange creation.
597 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
598 wide_int lh_lb = lh.lower_bound (x);
599 wide_int lh_ub = lh.upper_bound (x);
600 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
601 r.union_ (tmp);
602 if (r.varying_p ())
603 break;
605 op1_op2_relation_effect (r, type, lh, rh, rel);
606 update_bitmask (r, lh, rh);
607 return true;
610 // If both ranges are single pairs, fold directly into the result range.
611 // If the number of subranges grows too high, produce a summary result as the
612 // loop becomes exponential with little benefit. See PR 103821.
613 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
615 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
616 rh.lower_bound (), rh.upper_bound ());
617 op1_op2_relation_effect (r, type, lh, rh, rel);
618 update_bitmask (r, lh, rh);
619 return true;
622 int_range_max tmp;
623 r.set_undefined ();
624 for (unsigned x = 0; x < num_lh; ++x)
625 for (unsigned y = 0; y < num_rh; ++y)
627 wide_int lh_lb = lh.lower_bound (x);
628 wide_int lh_ub = lh.upper_bound (x);
629 wide_int rh_lb = rh.lower_bound (y);
630 wide_int rh_ub = rh.upper_bound (y);
631 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
632 r.union_ (tmp);
633 if (r.varying_p ())
635 op1_op2_relation_effect (r, type, lh, rh, rel);
636 update_bitmask (r, lh, rh);
637 return true;
640 op1_op2_relation_effect (r, type, lh, rh, rel);
641 update_bitmask (r, lh, rh);
642 return true;
645 // The default for op1_range is to return false.
647 bool
648 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
649 tree type ATTRIBUTE_UNUSED,
650 const irange &lhs ATTRIBUTE_UNUSED,
651 const irange &op2 ATTRIBUTE_UNUSED,
652 relation_trio) const
654 return false;
657 // The default for op2_range is to return false.
659 bool
660 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
661 tree type ATTRIBUTE_UNUSED,
662 const irange &lhs ATTRIBUTE_UNUSED,
663 const irange &op1 ATTRIBUTE_UNUSED,
664 relation_trio) const
666 return false;
669 // The default relation routines return VREL_VARYING.
671 relation_kind
672 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
673 const irange &op1 ATTRIBUTE_UNUSED,
674 const irange &op2 ATTRIBUTE_UNUSED,
675 relation_kind rel ATTRIBUTE_UNUSED) const
677 return VREL_VARYING;
680 relation_kind
681 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
682 const irange &op1 ATTRIBUTE_UNUSED,
683 const irange &op2 ATTRIBUTE_UNUSED,
684 relation_kind rel ATTRIBUTE_UNUSED) const
686 return VREL_VARYING;
689 relation_kind
690 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
691 const irange &op1 ATTRIBUTE_UNUSED,
692 const irange &op2 ATTRIBUTE_UNUSED) const
694 return VREL_VARYING;
697 // Default is no relation affects the LHS.
699 bool
700 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
701 tree type ATTRIBUTE_UNUSED,
702 const irange &op1_range ATTRIBUTE_UNUSED,
703 const irange &op2_range ATTRIBUTE_UNUSED,
704 relation_kind rel ATTRIBUTE_UNUSED) const
706 return false;
709 // Apply any known bitmask updates based on this operator.
711 void
712 range_operator::update_bitmask (irange &, const irange &,
713 const irange &) const
717 // Create and return a range from a pair of wide-ints that are known
718 // to have overflowed (or underflowed).
720 static void
721 value_range_from_overflowed_bounds (irange &r, tree type,
722 const wide_int &wmin,
723 const wide_int &wmax)
725 const signop sgn = TYPE_SIGN (type);
726 const unsigned int prec = TYPE_PRECISION (type);
728 wide_int tmin = wide_int::from (wmin, prec, sgn);
729 wide_int tmax = wide_int::from (wmax, prec, sgn);
731 bool covers = false;
732 wide_int tem = tmin;
733 tmin = tmax + 1;
734 if (wi::cmp (tmin, tmax, sgn) < 0)
735 covers = true;
736 tmax = tem - 1;
737 if (wi::cmp (tmax, tem, sgn) > 0)
738 covers = true;
740 // If the anti-range would cover nothing, drop to varying.
741 // Likewise if the anti-range bounds are outside of the types
742 // values.
743 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
744 r.set_varying (type);
745 else
746 r.set (type, tmin, tmax, VR_ANTI_RANGE);
749 // Create and return a range from a pair of wide-ints. MIN_OVF and
750 // MAX_OVF describe any overflow that might have occurred while
751 // calculating WMIN and WMAX respectively.
753 static void
754 value_range_with_overflow (irange &r, tree type,
755 const wide_int &wmin, const wide_int &wmax,
756 wi::overflow_type min_ovf = wi::OVF_NONE,
757 wi::overflow_type max_ovf = wi::OVF_NONE)
759 const signop sgn = TYPE_SIGN (type);
760 const unsigned int prec = TYPE_PRECISION (type);
761 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
763 // For one bit precision if max != min, then the range covers all
764 // values.
765 if (prec == 1 && wi::ne_p (wmax, wmin))
767 r.set_varying (type);
768 return;
771 if (overflow_wraps)
773 // If overflow wraps, truncate the values and adjust the range,
774 // kind, and bounds appropriately.
775 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
777 wide_int tmin = wide_int::from (wmin, prec, sgn);
778 wide_int tmax = wide_int::from (wmax, prec, sgn);
779 // If the limits are swapped, we wrapped around and cover
780 // the entire range.
781 if (wi::gt_p (tmin, tmax, sgn))
782 r.set_varying (type);
783 else
784 // No overflow or both overflow or underflow. The range
785 // kind stays normal.
786 r.set (type, tmin, tmax);
787 return;
790 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
791 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
792 value_range_from_overflowed_bounds (r, type, wmin, wmax);
793 else
794 // Other underflow and/or overflow, drop to VR_VARYING.
795 r.set_varying (type);
797 else
799 // If both bounds either underflowed or overflowed, then the result
800 // is undefined.
801 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
802 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
804 r.set_undefined ();
805 return;
808 // If overflow does not wrap, saturate to [MIN, MAX].
809 wide_int new_lb, new_ub;
810 if (min_ovf == wi::OVF_UNDERFLOW)
811 new_lb = wi::min_value (prec, sgn);
812 else if (min_ovf == wi::OVF_OVERFLOW)
813 new_lb = wi::max_value (prec, sgn);
814 else
815 new_lb = wmin;
817 if (max_ovf == wi::OVF_UNDERFLOW)
818 new_ub = wi::min_value (prec, sgn);
819 else if (max_ovf == wi::OVF_OVERFLOW)
820 new_ub = wi::max_value (prec, sgn);
821 else
822 new_ub = wmax;
824 r.set (type, new_lb, new_ub);
828 // Create and return a range from a pair of wide-ints. Canonicalize
829 // the case where the bounds are swapped. In which case, we transform
830 // [10,5] into [MIN,5][10,MAX].
832 static inline void
833 create_possibly_reversed_range (irange &r, tree type,
834 const wide_int &new_lb, const wide_int &new_ub)
836 signop s = TYPE_SIGN (type);
837 // If the bounds are swapped, treat the result as if an overflow occurred.
838 if (wi::gt_p (new_lb, new_ub, s))
839 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
840 else
841 // Otherwise it's just a normal range.
842 r.set (type, new_lb, new_ub);
845 // Return the summary information about boolean range LHS. If EMPTY/FULL,
846 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
848 bool_range_state
849 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
851 // If there is no result, then this is unexecutable.
852 if (lhs.undefined_p ())
854 r.set_undefined ();
855 return BRS_EMPTY;
858 if (lhs.zero_p ())
859 return BRS_FALSE;
861 // For TRUE, we can't just test for [1,1] because Ada can have
862 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
863 if (lhs.contains_p (build_zero_cst (lhs.type ())))
865 r.set_varying (val_type);
866 return BRS_FULL;
869 return BRS_TRUE;
872 // ------------------------------------------------------------------------
874 void
875 operator_equal::update_bitmask (irange &r, const irange &lh,
876 const irange &rh) const
878 update_known_bitmask (r, EQ_EXPR, lh, rh);
881 // Check if the LHS range indicates a relation between OP1 and OP2.
883 relation_kind
884 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
885 const irange &) const
887 if (lhs.undefined_p ())
888 return VREL_UNDEFINED;
890 // FALSE = op1 == op2 indicates NE_EXPR.
891 if (lhs.zero_p ())
892 return VREL_NE;
894 // TRUE = op1 == op2 indicates EQ_EXPR.
895 if (lhs.undefined_p () || !contains_zero_p (lhs))
896 return VREL_EQ;
897 return VREL_VARYING;
900 bool
901 operator_equal::fold_range (irange &r, tree type,
902 const irange &op1,
903 const irange &op2,
904 relation_trio rel) const
906 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
907 return true;
909 // We can be sure the values are always equal or not if both ranges
910 // consist of a single value, and then compare them.
911 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
912 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
914 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
915 r = range_true (type);
916 else
917 r = range_false (type);
919 else
921 // If ranges do not intersect, we know the range is not equal,
922 // otherwise we don't know anything for sure.
923 int_range_max tmp = op1;
924 tmp.intersect (op2);
925 if (tmp.undefined_p ())
926 r = range_false (type);
927 else
928 r = range_true_and_false (type);
930 return true;
933 bool
934 operator_equal::op1_range (irange &r, tree type,
935 const irange &lhs,
936 const irange &op2,
937 relation_trio) const
939 switch (get_bool_state (r, lhs, type))
941 case BRS_TRUE:
942 // If it's true, the result is the same as OP2.
943 r = op2;
944 break;
946 case BRS_FALSE:
947 // If the result is false, the only time we know anything is
948 // if OP2 is a constant.
949 if (!op2.undefined_p ()
950 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
952 r = op2;
953 r.invert ();
955 else
956 r.set_varying (type);
957 break;
959 default:
960 break;
962 return true;
965 bool
966 operator_equal::op2_range (irange &r, tree type,
967 const irange &lhs,
968 const irange &op1,
969 relation_trio rel) const
971 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
974 // -------------------------------------------------------------------------
976 void
977 operator_not_equal::update_bitmask (irange &r, const irange &lh,
978 const irange &rh) const
980 update_known_bitmask (r, NE_EXPR, lh, rh);
983 // Check if the LHS range indicates a relation between OP1 and OP2.
985 relation_kind
986 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
987 const irange &) const
989 if (lhs.undefined_p ())
990 return VREL_UNDEFINED;
992 // FALSE = op1 != op2 indicates EQ_EXPR.
993 if (lhs.zero_p ())
994 return VREL_EQ;
996 // TRUE = op1 != op2 indicates NE_EXPR.
997 if (lhs.undefined_p () || !contains_zero_p (lhs))
998 return VREL_NE;
999 return VREL_VARYING;
1002 bool
1003 operator_not_equal::fold_range (irange &r, tree type,
1004 const irange &op1,
1005 const irange &op2,
1006 relation_trio rel) const
1008 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1009 return true;
1011 // We can be sure the values are always equal or not if both ranges
1012 // consist of a single value, and then compare them.
1013 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
1014 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
1016 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1017 r = range_true (type);
1018 else
1019 r = range_false (type);
1021 else
1023 // If ranges do not intersect, we know the range is not equal,
1024 // otherwise we don't know anything for sure.
1025 int_range_max tmp = op1;
1026 tmp.intersect (op2);
1027 if (tmp.undefined_p ())
1028 r = range_true (type);
1029 else
1030 r = range_true_and_false (type);
1032 return true;
1035 bool
1036 operator_not_equal::op1_range (irange &r, tree type,
1037 const irange &lhs,
1038 const irange &op2,
1039 relation_trio) const
1041 switch (get_bool_state (r, lhs, type))
1043 case BRS_TRUE:
1044 // If the result is true, the only time we know anything is if
1045 // OP2 is a constant.
1046 if (!op2.undefined_p ()
1047 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1049 r = op2;
1050 r.invert ();
1052 else
1053 r.set_varying (type);
1054 break;
1056 case BRS_FALSE:
1057 // If it's false, the result is the same as OP2.
1058 r = op2;
1059 break;
1061 default:
1062 break;
1064 return true;
1068 bool
1069 operator_not_equal::op2_range (irange &r, tree type,
1070 const irange &lhs,
1071 const irange &op1,
1072 relation_trio rel) const
1074 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1077 // (X < VAL) produces the range of [MIN, VAL - 1].
1079 static void
1080 build_lt (irange &r, tree type, const wide_int &val)
1082 wi::overflow_type ov;
1083 wide_int lim;
1084 signop sgn = TYPE_SIGN (type);
1086 // Signed 1 bit cannot represent 1 for subtraction.
1087 if (sgn == SIGNED)
1088 lim = wi::add (val, -1, sgn, &ov);
1089 else
1090 lim = wi::sub (val, 1, sgn, &ov);
1092 // If val - 1 underflows, check if X < MIN, which is an empty range.
1093 if (ov)
1094 r.set_undefined ();
1095 else
1096 r = int_range<1> (type, min_limit (type), lim);
1099 // (X <= VAL) produces the range of [MIN, VAL].
1101 static void
1102 build_le (irange &r, tree type, const wide_int &val)
1104 r = int_range<1> (type, min_limit (type), val);
1107 // (X > VAL) produces the range of [VAL + 1, MAX].
1109 static void
1110 build_gt (irange &r, tree type, const wide_int &val)
1112 wi::overflow_type ov;
1113 wide_int lim;
1114 signop sgn = TYPE_SIGN (type);
1116 // Signed 1 bit cannot represent 1 for addition.
1117 if (sgn == SIGNED)
1118 lim = wi::sub (val, -1, sgn, &ov);
1119 else
1120 lim = wi::add (val, 1, sgn, &ov);
1121 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1122 if (ov)
1123 r.set_undefined ();
1124 else
1125 r = int_range<1> (type, lim, max_limit (type));
1128 // (X >= val) produces the range of [VAL, MAX].
1130 static void
1131 build_ge (irange &r, tree type, const wide_int &val)
1133 r = int_range<1> (type, val, max_limit (type));
1137 void
1138 operator_lt::update_bitmask (irange &r, const irange &lh,
1139 const irange &rh) const
1141 update_known_bitmask (r, LT_EXPR, lh, rh);
1144 // Check if the LHS range indicates a relation between OP1 and OP2.
1146 relation_kind
1147 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1148 const irange &) const
1150 if (lhs.undefined_p ())
1151 return VREL_UNDEFINED;
1153 // FALSE = op1 < op2 indicates GE_EXPR.
1154 if (lhs.zero_p ())
1155 return VREL_GE;
1157 // TRUE = op1 < op2 indicates LT_EXPR.
1158 if (lhs.undefined_p () || !contains_zero_p (lhs))
1159 return VREL_LT;
1160 return VREL_VARYING;
1163 bool
1164 operator_lt::fold_range (irange &r, tree type,
1165 const irange &op1,
1166 const irange &op2,
1167 relation_trio rel) const
1169 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1170 return true;
1172 signop sign = TYPE_SIGN (op1.type ());
1173 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1175 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1176 r = range_true (type);
1177 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1178 r = range_false (type);
1179 // Use nonzero bits to determine if < 0 is false.
1180 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1181 r = range_false (type);
1182 else
1183 r = range_true_and_false (type);
1184 return true;
1187 bool
1188 operator_lt::op1_range (irange &r, tree type,
1189 const irange &lhs,
1190 const irange &op2,
1191 relation_trio) const
1193 if (op2.undefined_p ())
1194 return false;
1196 switch (get_bool_state (r, lhs, type))
1198 case BRS_TRUE:
1199 build_lt (r, type, op2.upper_bound ());
1200 break;
1202 case BRS_FALSE:
1203 build_ge (r, type, op2.lower_bound ());
1204 break;
1206 default:
1207 break;
1209 return true;
1212 bool
1213 operator_lt::op2_range (irange &r, tree type,
1214 const irange &lhs,
1215 const irange &op1,
1216 relation_trio) const
1218 if (op1.undefined_p ())
1219 return false;
1221 switch (get_bool_state (r, lhs, type))
1223 case BRS_TRUE:
1224 build_gt (r, type, op1.lower_bound ());
1225 break;
1227 case BRS_FALSE:
1228 build_le (r, type, op1.upper_bound ());
1229 break;
1231 default:
1232 break;
1234 return true;
1238 void
1239 operator_le::update_bitmask (irange &r, const irange &lh,
1240 const irange &rh) const
1242 update_known_bitmask (r, LE_EXPR, lh, rh);
1245 // Check if the LHS range indicates a relation between OP1 and OP2.
1247 relation_kind
1248 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1249 const irange &) const
1251 if (lhs.undefined_p ())
1252 return VREL_UNDEFINED;
1254 // FALSE = op1 <= op2 indicates GT_EXPR.
1255 if (lhs.zero_p ())
1256 return VREL_GT;
1258 // TRUE = op1 <= op2 indicates LE_EXPR.
1259 if (lhs.undefined_p () || !contains_zero_p (lhs))
1260 return VREL_LE;
1261 return VREL_VARYING;
1264 bool
1265 operator_le::fold_range (irange &r, tree type,
1266 const irange &op1,
1267 const irange &op2,
1268 relation_trio rel) const
1270 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1271 return true;
1273 signop sign = TYPE_SIGN (op1.type ());
1274 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1276 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1277 r = range_true (type);
1278 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1279 r = range_false (type);
1280 else
1281 r = range_true_and_false (type);
1282 return true;
1285 bool
1286 operator_le::op1_range (irange &r, tree type,
1287 const irange &lhs,
1288 const irange &op2,
1289 relation_trio) const
1291 if (op2.undefined_p ())
1292 return false;
1294 switch (get_bool_state (r, lhs, type))
1296 case BRS_TRUE:
1297 build_le (r, type, op2.upper_bound ());
1298 break;
1300 case BRS_FALSE:
1301 build_gt (r, type, op2.lower_bound ());
1302 break;
1304 default:
1305 break;
1307 return true;
1310 bool
1311 operator_le::op2_range (irange &r, tree type,
1312 const irange &lhs,
1313 const irange &op1,
1314 relation_trio) const
1316 if (op1.undefined_p ())
1317 return false;
1319 switch (get_bool_state (r, lhs, type))
1321 case BRS_TRUE:
1322 build_ge (r, type, op1.lower_bound ());
1323 break;
1325 case BRS_FALSE:
1326 build_lt (r, type, op1.upper_bound ());
1327 break;
1329 default:
1330 break;
1332 return true;
1336 void
1337 operator_gt::update_bitmask (irange &r, const irange &lh,
1338 const irange &rh) const
1340 update_known_bitmask (r, GT_EXPR, lh, rh);
1343 // Check if the LHS range indicates a relation between OP1 and OP2.
1345 relation_kind
1346 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1347 const irange &) const
1349 if (lhs.undefined_p ())
1350 return VREL_UNDEFINED;
1352 // FALSE = op1 > op2 indicates LE_EXPR.
1353 if (lhs.zero_p ())
1354 return VREL_LE;
1356 // TRUE = op1 > op2 indicates GT_EXPR.
1357 if (!contains_zero_p (lhs))
1358 return VREL_GT;
1359 return VREL_VARYING;
1362 bool
1363 operator_gt::fold_range (irange &r, tree type,
1364 const irange &op1, const irange &op2,
1365 relation_trio rel) const
1367 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1368 return true;
1370 signop sign = TYPE_SIGN (op1.type ());
1371 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1373 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1374 r = range_true (type);
1375 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1376 r = range_false (type);
1377 else
1378 r = range_true_and_false (type);
1379 return true;
1382 bool
1383 operator_gt::op1_range (irange &r, tree type,
1384 const irange &lhs, const irange &op2,
1385 relation_trio) const
1387 if (op2.undefined_p ())
1388 return false;
1390 switch (get_bool_state (r, lhs, type))
1392 case BRS_TRUE:
1393 build_gt (r, type, op2.lower_bound ());
1394 break;
1396 case BRS_FALSE:
1397 build_le (r, type, op2.upper_bound ());
1398 break;
1400 default:
1401 break;
1403 return true;
1406 bool
1407 operator_gt::op2_range (irange &r, tree type,
1408 const irange &lhs,
1409 const irange &op1,
1410 relation_trio) const
1412 if (op1.undefined_p ())
1413 return false;
1415 switch (get_bool_state (r, lhs, type))
1417 case BRS_TRUE:
1418 build_lt (r, type, op1.upper_bound ());
1419 break;
1421 case BRS_FALSE:
1422 build_ge (r, type, op1.lower_bound ());
1423 break;
1425 default:
1426 break;
1428 return true;
1432 void
1433 operator_ge::update_bitmask (irange &r, const irange &lh,
1434 const irange &rh) const
1436 update_known_bitmask (r, GE_EXPR, lh, rh);
1439 // Check if the LHS range indicates a relation between OP1 and OP2.
1441 relation_kind
1442 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1443 const irange &) const
1445 if (lhs.undefined_p ())
1446 return VREL_UNDEFINED;
1448 // FALSE = op1 >= op2 indicates LT_EXPR.
1449 if (lhs.zero_p ())
1450 return VREL_LT;
1452 // TRUE = op1 >= op2 indicates GE_EXPR.
1453 if (!contains_zero_p (lhs))
1454 return VREL_GE;
1455 return VREL_VARYING;
1458 bool
1459 operator_ge::fold_range (irange &r, tree type,
1460 const irange &op1,
1461 const irange &op2,
1462 relation_trio rel) const
1464 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1465 return true;
1467 signop sign = TYPE_SIGN (op1.type ());
1468 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1470 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1471 r = range_true (type);
1472 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1473 r = range_false (type);
1474 else
1475 r = range_true_and_false (type);
1476 return true;
1479 bool
1480 operator_ge::op1_range (irange &r, tree type,
1481 const irange &lhs,
1482 const irange &op2,
1483 relation_trio) const
1485 if (op2.undefined_p ())
1486 return false;
1488 switch (get_bool_state (r, lhs, type))
1490 case BRS_TRUE:
1491 build_ge (r, type, op2.lower_bound ());
1492 break;
1494 case BRS_FALSE:
1495 build_lt (r, type, op2.upper_bound ());
1496 break;
1498 default:
1499 break;
1501 return true;
1504 bool
1505 operator_ge::op2_range (irange &r, tree type,
1506 const irange &lhs,
1507 const irange &op1,
1508 relation_trio) const
1510 if (op1.undefined_p ())
1511 return false;
1513 switch (get_bool_state (r, lhs, type))
1515 case BRS_TRUE:
1516 build_le (r, type, op1.upper_bound ());
1517 break;
1519 case BRS_FALSE:
1520 build_gt (r, type, op1.lower_bound ());
1521 break;
1523 default:
1524 break;
1526 return true;
1530 void
1531 operator_plus::update_bitmask (irange &r, const irange &lh,
1532 const irange &rh) const
1534 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1537 // Check to see if the range of OP2 indicates anything about the relation
1538 // between LHS and OP1.
1540 relation_kind
1541 operator_plus::lhs_op1_relation (const irange &lhs,
1542 const irange &op1,
1543 const irange &op2,
1544 relation_kind) const
1546 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1547 return VREL_VARYING;
1549 tree type = lhs.type ();
1550 unsigned prec = TYPE_PRECISION (type);
1551 wi::overflow_type ovf1, ovf2;
1552 signop sign = TYPE_SIGN (type);
1554 // LHS = OP1 + 0 indicates LHS == OP1.
1555 if (op2.zero_p ())
1556 return VREL_EQ;
1558 if (TYPE_OVERFLOW_WRAPS (type))
1560 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1561 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1563 else
1564 ovf1 = ovf2 = wi::OVF_NONE;
1566 // Never wrapping additions.
1567 if (!ovf1 && !ovf2)
1569 // Positive op2 means lhs > op1.
1570 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1571 return VREL_GT;
1572 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1573 return VREL_GE;
1575 // Negative op2 means lhs < op1.
1576 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1577 return VREL_LT;
1578 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1579 return VREL_LE;
1581 // Always wrapping additions.
1582 else if (ovf1 && ovf1 == ovf2)
1584 // Positive op2 means lhs < op1.
1585 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1586 return VREL_LT;
1587 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1588 return VREL_LE;
1590 // Negative op2 means lhs > op1.
1591 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1592 return VREL_GT;
1593 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1594 return VREL_GE;
1597 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1598 if (!range_includes_zero_p (&op2))
1599 return VREL_NE;
1601 return VREL_VARYING;
1604 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1605 // operands.
1607 relation_kind
1608 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1609 const irange &op2, relation_kind rel) const
1611 return lhs_op1_relation (lhs, op2, op1, rel);
1614 void
1615 operator_plus::wi_fold (irange &r, tree type,
1616 const wide_int &lh_lb, const wide_int &lh_ub,
1617 const wide_int &rh_lb, const wide_int &rh_ub) const
1619 wi::overflow_type ov_lb, ov_ub;
1620 signop s = TYPE_SIGN (type);
1621 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1622 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1623 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1626 // Given addition or subtraction, determine the possible NORMAL ranges and
1627 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1628 // Return the relation that exists between the LHS and OP1 in order for the
1629 // NORMAL range to apply.
1630 // a return value of VREL_VARYING means no ranges were applicable.
1632 static relation_kind
1633 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1634 bool add_p)
1636 relation_kind kind = VREL_VARYING;
1637 // For now, only deal with constant adds. This could be extended to ranges
1638 // when someone is so motivated.
1639 if (!offset.singleton_p () || offset.zero_p ())
1640 return kind;
1642 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1643 wide_int off = offset.lower_bound ();
1644 if (wi::neg_p (off, SIGNED))
1646 add_p = !add_p;
1647 off = wi::neg (off);
1650 wi::overflow_type ov;
1651 tree type = offset.type ();
1652 unsigned prec = TYPE_PRECISION (type);
1653 wide_int ub;
1654 wide_int lb;
1655 // calculate the normal range and relation for the operation.
1656 if (add_p)
1658 // [ 0 , INF - OFF]
1659 lb = wi::zero (prec);
1660 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1661 kind = VREL_GT;
1663 else
1665 // [ OFF, INF ]
1666 lb = off;
1667 ub = irange_val_max (type);
1668 kind = VREL_LT;
1670 int_range<2> normal_range (type, lb, ub);
1671 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1673 r_ov = ov_range;
1674 r_normal = normal_range;
1675 return kind;
1678 // Once op1 has been calculated by operator_plus or operator_minus, check
1679 // to see if the relation passed causes any part of the calculation to
1680 // be not possible. ie
1681 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1682 // and that further refines a_2 to [0, 0].
1683 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1684 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1685 // MINUS. IF any adjustment can be made, R will reflect it.
1687 static void
1688 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1689 bool add_p)
1691 if (r.undefined_p ())
1692 return;
1693 tree type = r.type ();
1694 // Check for unsigned overflow and calculate the overflow part.
1695 signop s = TYPE_SIGN (type);
1696 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1697 return;
1699 // Only work with <, <=, >, >= relations.
1700 if (!relation_lt_le_gt_ge_p (rel))
1701 return;
1703 // Get the ranges for this offset.
1704 int_range_max normal, overflow;
1705 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1707 // VREL_VARYING means there are no adjustments.
1708 if (k == VREL_VARYING)
1709 return;
1711 // If the relations match use the normal range, otherwise use overflow range.
1712 if (relation_intersect (k, rel) == k)
1713 r.intersect (normal);
1714 else
1715 r.intersect (overflow);
1716 return;
1719 bool
1720 operator_plus::op1_range (irange &r, tree type,
1721 const irange &lhs,
1722 const irange &op2,
1723 relation_trio trio) const
1725 if (lhs.undefined_p ())
1726 return false;
1727 // Start with the default operation.
1728 range_op_handler minus (MINUS_EXPR);
1729 if (!minus)
1730 return false;
1731 bool res = minus.fold_range (r, type, lhs, op2);
1732 relation_kind rel = trio.lhs_op1 ();
1733 // Check for a relation refinement.
1734 if (res)
1735 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1736 return res;
1739 bool
1740 operator_plus::op2_range (irange &r, tree type,
1741 const irange &lhs,
1742 const irange &op1,
1743 relation_trio rel) const
1745 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1748 class operator_widen_plus_signed : public range_operator
1750 public:
1751 virtual void wi_fold (irange &r, tree type,
1752 const wide_int &lh_lb,
1753 const wide_int &lh_ub,
1754 const wide_int &rh_lb,
1755 const wide_int &rh_ub) const;
1756 } op_widen_plus_signed;
1758 void
1759 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1760 const wide_int &lh_lb,
1761 const wide_int &lh_ub,
1762 const wide_int &rh_lb,
1763 const wide_int &rh_ub) const
1765 wi::overflow_type ov_lb, ov_ub;
1766 signop s = TYPE_SIGN (type);
1768 wide_int lh_wlb
1769 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1770 wide_int lh_wub
1771 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1772 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1773 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1775 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1776 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1778 r = int_range<2> (type, new_lb, new_ub);
1781 class operator_widen_plus_unsigned : public range_operator
1783 public:
1784 virtual void wi_fold (irange &r, tree type,
1785 const wide_int &lh_lb,
1786 const wide_int &lh_ub,
1787 const wide_int &rh_lb,
1788 const wide_int &rh_ub) const;
1789 } op_widen_plus_unsigned;
1791 void
1792 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1793 const wide_int &lh_lb,
1794 const wide_int &lh_ub,
1795 const wide_int &rh_lb,
1796 const wide_int &rh_ub) const
1798 wi::overflow_type ov_lb, ov_ub;
1799 signop s = TYPE_SIGN (type);
1801 wide_int lh_wlb
1802 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1803 wide_int lh_wub
1804 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1805 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1806 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1808 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1809 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1811 r = int_range<2> (type, new_lb, new_ub);
1814 void
1815 operator_minus::update_bitmask (irange &r, const irange &lh,
1816 const irange &rh) const
1818 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1821 void
1822 operator_minus::wi_fold (irange &r, tree type,
1823 const wide_int &lh_lb, const wide_int &lh_ub,
1824 const wide_int &rh_lb, const wide_int &rh_ub) const
1826 wi::overflow_type ov_lb, ov_ub;
1827 signop s = TYPE_SIGN (type);
1828 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1829 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1830 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1834 // Return the relation between LHS and OP1 based on the relation between
1835 // OP1 and OP2.
1837 relation_kind
1838 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1839 const irange &, relation_kind rel) const
1841 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1842 switch (rel)
1844 case VREL_GT:
1845 case VREL_GE:
1846 return VREL_LE;
1847 default:
1848 break;
1850 return VREL_VARYING;
1853 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1854 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1855 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1857 bool
1858 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1859 const irange &op1_range ATTRIBUTE_UNUSED,
1860 const irange &op2_range ATTRIBUTE_UNUSED,
1861 relation_kind rel)
1863 if (rel == VREL_VARYING)
1864 return false;
1866 int_range<2> rel_range;
1867 unsigned prec = TYPE_PRECISION (type);
1868 signop sgn = TYPE_SIGN (type);
1870 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1871 if (rel == VREL_EQ)
1872 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1873 else if (rel == VREL_NE)
1874 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1875 VR_ANTI_RANGE);
1876 else if (TYPE_OVERFLOW_WRAPS (type))
1878 switch (rel)
1880 // For wrapping signed values and unsigned, if op1 > op2 or
1881 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1882 case VREL_GT:
1883 case VREL_LT:
1884 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1885 VR_ANTI_RANGE);
1886 break;
1887 default:
1888 return false;
1891 else
1893 switch (rel)
1895 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1896 case VREL_GT:
1897 rel_range = int_range<2> (type, wi::one (prec),
1898 wi::max_value (prec, sgn));
1899 break;
1900 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1901 case VREL_GE:
1902 rel_range = int_range<2> (type, wi::zero (prec),
1903 wi::max_value (prec, sgn));
1904 break;
1905 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1906 case VREL_LT:
1907 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1908 wi::minus_one (prec));
1909 break;
1910 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1911 case VREL_LE:
1912 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1913 wi::zero (prec));
1914 break;
1915 default:
1916 return false;
1919 lhs_range.intersect (rel_range);
1920 return true;
1923 bool
1924 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1925 const irange &op1_range,
1926 const irange &op2_range,
1927 relation_kind rel) const
1929 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1930 rel);
1933 bool
1934 operator_minus::op1_range (irange &r, tree type,
1935 const irange &lhs,
1936 const irange &op2,
1937 relation_trio trio) const
1939 if (lhs.undefined_p ())
1940 return false;
1941 // Start with the default operation.
1942 range_op_handler minus (PLUS_EXPR);
1943 if (!minus)
1944 return false;
1945 bool res = minus.fold_range (r, type, lhs, op2);
1946 relation_kind rel = trio.lhs_op1 ();
1947 if (res)
1948 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
1949 return res;
1953 bool
1954 operator_minus::op2_range (irange &r, tree type,
1955 const irange &lhs,
1956 const irange &op1,
1957 relation_trio) const
1959 if (lhs.undefined_p ())
1960 return false;
1961 return fold_range (r, type, op1, lhs);
1964 void
1965 operator_min::update_bitmask (irange &r, const irange &lh,
1966 const irange &rh) const
1968 update_known_bitmask (r, MIN_EXPR, lh, rh);
1971 void
1972 operator_min::wi_fold (irange &r, tree type,
1973 const wide_int &lh_lb, const wide_int &lh_ub,
1974 const wide_int &rh_lb, const wide_int &rh_ub) const
1976 signop s = TYPE_SIGN (type);
1977 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
1978 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
1979 value_range_with_overflow (r, type, new_lb, new_ub);
1983 void
1984 operator_max::update_bitmask (irange &r, const irange &lh,
1985 const irange &rh) const
1987 update_known_bitmask (r, MAX_EXPR, lh, rh);
1990 void
1991 operator_max::wi_fold (irange &r, tree type,
1992 const wide_int &lh_lb, const wide_int &lh_ub,
1993 const wide_int &rh_lb, const wide_int &rh_ub) const
1995 signop s = TYPE_SIGN (type);
1996 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
1997 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
1998 value_range_with_overflow (r, type, new_lb, new_ub);
2002 // Calculate the cross product of two sets of ranges and return it.
2004 // Multiplications, divisions and shifts are a bit tricky to handle,
2005 // depending on the mix of signs we have in the two ranges, we need to
2006 // operate on different values to get the minimum and maximum values
2007 // for the new range. One approach is to figure out all the
2008 // variations of range combinations and do the operations.
2010 // However, this involves several calls to compare_values and it is
2011 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2012 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2013 // figure the smallest and largest values to form the new range.
2015 void
2016 cross_product_operator::wi_cross_product (irange &r, tree type,
2017 const wide_int &lh_lb,
2018 const wide_int &lh_ub,
2019 const wide_int &rh_lb,
2020 const wide_int &rh_ub) const
2022 wide_int cp1, cp2, cp3, cp4;
2023 // Default to varying.
2024 r.set_varying (type);
2026 // Compute the 4 cross operations, bailing if we get an overflow we
2027 // can't handle.
2028 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2029 return;
2030 if (wi::eq_p (lh_lb, lh_ub))
2031 cp3 = cp1;
2032 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2033 return;
2034 if (wi::eq_p (rh_lb, rh_ub))
2035 cp2 = cp1;
2036 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2037 return;
2038 if (wi::eq_p (lh_lb, lh_ub))
2039 cp4 = cp2;
2040 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2041 return;
2043 // Order pairs.
2044 signop sign = TYPE_SIGN (type);
2045 if (wi::gt_p (cp1, cp2, sign))
2046 std::swap (cp1, cp2);
2047 if (wi::gt_p (cp3, cp4, sign))
2048 std::swap (cp3, cp4);
2050 // Choose min and max from the ordered pairs.
2051 wide_int res_lb = wi::min (cp1, cp3, sign);
2052 wide_int res_ub = wi::max (cp2, cp4, sign);
2053 value_range_with_overflow (r, type, res_lb, res_ub);
2057 void
2058 operator_mult::update_bitmask (irange &r, const irange &lh,
2059 const irange &rh) const
2061 update_known_bitmask (r, MULT_EXPR, lh, rh);
2064 bool
2065 operator_mult::op1_range (irange &r, tree type,
2066 const irange &lhs, const irange &op2,
2067 relation_trio) const
2069 if (lhs.undefined_p ())
2070 return false;
2072 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2073 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2074 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2075 if (TYPE_OVERFLOW_WRAPS (type))
2076 return false;
2078 wide_int offset;
2079 if (op2.singleton_p (offset) && offset != 0)
2080 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2081 return false;
2084 bool
2085 operator_mult::op2_range (irange &r, tree type,
2086 const irange &lhs, const irange &op1,
2087 relation_trio rel) const
2089 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2092 bool
2093 operator_mult::wi_op_overflows (wide_int &res, tree type,
2094 const wide_int &w0, const wide_int &w1) const
2096 wi::overflow_type overflow = wi::OVF_NONE;
2097 signop sign = TYPE_SIGN (type);
2098 res = wi::mul (w0, w1, sign, &overflow);
2099 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2101 // For multiplication, the sign of the overflow is given
2102 // by the comparison of the signs of the operands.
2103 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2104 res = wi::max_value (w0.get_precision (), sign);
2105 else
2106 res = wi::min_value (w0.get_precision (), sign);
2107 return false;
2109 return overflow;
2112 void
2113 operator_mult::wi_fold (irange &r, tree type,
2114 const wide_int &lh_lb, const wide_int &lh_ub,
2115 const wide_int &rh_lb, const wide_int &rh_ub) const
2117 if (TYPE_OVERFLOW_UNDEFINED (type))
2119 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2120 return;
2123 // Multiply the ranges when overflow wraps. This is basically fancy
2124 // code so we don't drop to varying with an unsigned
2125 // [-3,-1]*[-3,-1].
2127 // This test requires 2*prec bits if both operands are signed and
2128 // 2*prec + 2 bits if either is not. Therefore, extend the values
2129 // using the sign of the result to PREC2. From here on out,
2130 // everything is just signed math no matter what the input types
2131 // were.
2133 signop sign = TYPE_SIGN (type);
2134 unsigned prec = TYPE_PRECISION (type);
2135 widest2_int min0 = widest2_int::from (lh_lb, sign);
2136 widest2_int max0 = widest2_int::from (lh_ub, sign);
2137 widest2_int min1 = widest2_int::from (rh_lb, sign);
2138 widest2_int max1 = widest2_int::from (rh_ub, sign);
2139 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2140 widest2_int size = sizem1 + 1;
2142 // Canonicalize the intervals.
2143 if (sign == UNSIGNED)
2145 if (wi::ltu_p (size, min0 + max0))
2147 min0 -= size;
2148 max0 -= size;
2150 if (wi::ltu_p (size, min1 + max1))
2152 min1 -= size;
2153 max1 -= size;
2157 // Sort the 4 products so that min is in prod0 and max is in
2158 // prod3.
2159 widest2_int prod0 = min0 * min1;
2160 widest2_int prod1 = min0 * max1;
2161 widest2_int prod2 = max0 * min1;
2162 widest2_int prod3 = max0 * max1;
2164 // min0min1 > max0max1
2165 if (prod0 > prod3)
2166 std::swap (prod0, prod3);
2168 // min0max1 > max0min1
2169 if (prod1 > prod2)
2170 std::swap (prod1, prod2);
2172 if (prod0 > prod1)
2173 std::swap (prod0, prod1);
2175 if (prod2 > prod3)
2176 std::swap (prod2, prod3);
2178 // diff = max - min
2179 prod2 = prod3 - prod0;
2180 if (wi::geu_p (prod2, sizem1))
2182 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2183 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2184 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2186 r.set (type, rh_lb, wi::max_value (prec, sign));
2187 int_range<2> zero;
2188 zero.set_zero (type);
2189 r.union_ (zero);
2191 else
2192 // The range covers all values.
2193 r.set_varying (type);
2195 else
2197 wide_int new_lb = wide_int::from (prod0, prec, sign);
2198 wide_int new_ub = wide_int::from (prod3, prec, sign);
2199 create_possibly_reversed_range (r, type, new_lb, new_ub);
2203 class operator_widen_mult_signed : public range_operator
2205 public:
2206 virtual void wi_fold (irange &r, tree type,
2207 const wide_int &lh_lb,
2208 const wide_int &lh_ub,
2209 const wide_int &rh_lb,
2210 const wide_int &rh_ub)
2211 const;
2212 } op_widen_mult_signed;
2214 void
2215 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2216 const wide_int &lh_lb,
2217 const wide_int &lh_ub,
2218 const wide_int &rh_lb,
2219 const wide_int &rh_ub) const
2221 signop s = TYPE_SIGN (type);
2223 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2224 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2225 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2226 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2228 /* We don't expect a widening multiplication to be able to overflow but range
2229 calculations for multiplications are complicated. After widening the
2230 operands lets call the base class. */
2231 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2235 class operator_widen_mult_unsigned : public range_operator
2237 public:
2238 virtual void wi_fold (irange &r, tree type,
2239 const wide_int &lh_lb,
2240 const wide_int &lh_ub,
2241 const wide_int &rh_lb,
2242 const wide_int &rh_ub)
2243 const;
2244 } op_widen_mult_unsigned;
2246 void
2247 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2248 const wide_int &lh_lb,
2249 const wide_int &lh_ub,
2250 const wide_int &rh_lb,
2251 const wide_int &rh_ub) const
2253 signop s = TYPE_SIGN (type);
2255 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2256 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2257 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2258 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2260 /* We don't expect a widening multiplication to be able to overflow but range
2261 calculations for multiplications are complicated. After widening the
2262 operands lets call the base class. */
2263 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2266 class operator_div : public cross_product_operator
2268 public:
2269 operator_div (tree_code div_kind) { m_code = div_kind; }
2270 virtual void wi_fold (irange &r, tree type,
2271 const wide_int &lh_lb,
2272 const wide_int &lh_ub,
2273 const wide_int &rh_lb,
2274 const wide_int &rh_ub) const final override;
2275 virtual bool wi_op_overflows (wide_int &res, tree type,
2276 const wide_int &, const wide_int &)
2277 const final override;
2278 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2279 { update_known_bitmask (r, m_code, lh, rh); }
2280 protected:
2281 tree_code m_code;
2284 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2285 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2286 static operator_div op_round_div (ROUND_DIV_EXPR);
2287 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2289 bool
2290 operator_div::wi_op_overflows (wide_int &res, tree type,
2291 const wide_int &w0, const wide_int &w1) const
2293 if (w1 == 0)
2294 return true;
2296 wi::overflow_type overflow = wi::OVF_NONE;
2297 signop sign = TYPE_SIGN (type);
2299 switch (m_code)
2301 case EXACT_DIV_EXPR:
2302 case TRUNC_DIV_EXPR:
2303 res = wi::div_trunc (w0, w1, sign, &overflow);
2304 break;
2305 case FLOOR_DIV_EXPR:
2306 res = wi::div_floor (w0, w1, sign, &overflow);
2307 break;
2308 case ROUND_DIV_EXPR:
2309 res = wi::div_round (w0, w1, sign, &overflow);
2310 break;
2311 case CEIL_DIV_EXPR:
2312 res = wi::div_ceil (w0, w1, sign, &overflow);
2313 break;
2314 default:
2315 gcc_unreachable ();
2318 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2320 // For division, the only case is -INF / -1 = +INF.
2321 res = wi::max_value (w0.get_precision (), sign);
2322 return false;
2324 return overflow;
2327 void
2328 operator_div::wi_fold (irange &r, tree type,
2329 const wide_int &lh_lb, const wide_int &lh_ub,
2330 const wide_int &rh_lb, const wide_int &rh_ub) const
2332 const wide_int dividend_min = lh_lb;
2333 const wide_int dividend_max = lh_ub;
2334 const wide_int divisor_min = rh_lb;
2335 const wide_int divisor_max = rh_ub;
2336 signop sign = TYPE_SIGN (type);
2337 unsigned prec = TYPE_PRECISION (type);
2338 wide_int extra_min, extra_max;
2340 // If we know we won't divide by zero, just do the division.
2341 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2343 wi_cross_product (r, type, dividend_min, dividend_max,
2344 divisor_min, divisor_max);
2345 return;
2348 // If we're definitely dividing by zero, there's nothing to do.
2349 if (wi_zero_p (type, divisor_min, divisor_max))
2351 r.set_undefined ();
2352 return;
2355 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2356 // skip any division by zero.
2358 // First divide by the negative numbers, if any.
2359 if (wi::neg_p (divisor_min, sign))
2360 wi_cross_product (r, type, dividend_min, dividend_max,
2361 divisor_min, wi::minus_one (prec));
2362 else
2363 r.set_undefined ();
2365 // Then divide by the non-zero positive numbers, if any.
2366 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2368 int_range_max tmp;
2369 wi_cross_product (tmp, type, dividend_min, dividend_max,
2370 wi::one (prec), divisor_max);
2371 r.union_ (tmp);
2373 // We shouldn't still have undefined here.
2374 gcc_checking_assert (!r.undefined_p ());
2378 class operator_exact_divide : public operator_div
2380 using range_operator::op1_range;
2381 public:
2382 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2383 virtual bool op1_range (irange &r, tree type,
2384 const irange &lhs,
2385 const irange &op2,
2386 relation_trio) const;
2388 } op_exact_div;
2390 bool
2391 operator_exact_divide::op1_range (irange &r, tree type,
2392 const irange &lhs,
2393 const irange &op2,
2394 relation_trio) const
2396 if (lhs.undefined_p ())
2397 return false;
2398 wide_int offset;
2399 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2400 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2401 // We wont bother trying to enumerate all the in between stuff :-P
2402 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2403 // the time however.
2404 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2405 if (op2.singleton_p (offset) && offset != 0)
2406 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2407 return false;
2411 class operator_lshift : public cross_product_operator
2413 using range_operator::fold_range;
2414 using range_operator::op1_range;
2415 public:
2416 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2417 const irange &op2, relation_trio rel = TRIO_VARYING)
2418 const final override;
2419 virtual bool fold_range (irange &r, tree type, const irange &op1,
2420 const irange &op2, relation_trio rel = TRIO_VARYING)
2421 const final override;
2423 virtual void wi_fold (irange &r, tree type,
2424 const wide_int &lh_lb, const wide_int &lh_ub,
2425 const wide_int &rh_lb,
2426 const wide_int &rh_ub) const final override;
2427 virtual bool wi_op_overflows (wide_int &res,
2428 tree type,
2429 const wide_int &,
2430 const wide_int &) const final override;
2431 void update_bitmask (irange &r, const irange &lh,
2432 const irange &rh) const final override
2433 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2434 } op_lshift;
2436 class operator_rshift : public cross_product_operator
2438 using range_operator::fold_range;
2439 using range_operator::op1_range;
2440 using range_operator::lhs_op1_relation;
2441 public:
2442 virtual bool fold_range (irange &r, tree type, const irange &op1,
2443 const irange &op2, relation_trio rel = TRIO_VARYING)
2444 const final override;
2445 virtual void wi_fold (irange &r, tree type,
2446 const wide_int &lh_lb,
2447 const wide_int &lh_ub,
2448 const wide_int &rh_lb,
2449 const wide_int &rh_ub) const final override;
2450 virtual bool wi_op_overflows (wide_int &res,
2451 tree type,
2452 const wide_int &w0,
2453 const wide_int &w1) const final override;
2454 virtual bool op1_range (irange &, tree type, const irange &lhs,
2455 const irange &op2, relation_trio rel = TRIO_VARYING)
2456 const final override;
2457 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2458 const irange &op2, relation_kind rel)
2459 const final override;
2460 void update_bitmask (irange &r, const irange &lh,
2461 const irange &rh) const final override
2462 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2463 } op_rshift;
2466 relation_kind
2467 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2468 const irange &op1,
2469 const irange &op2,
2470 relation_kind) const
2472 // If both operands range are >= 0, then the LHS <= op1.
2473 if (!op1.undefined_p () && !op2.undefined_p ()
2474 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2475 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2476 return VREL_LE;
2477 return VREL_VARYING;
2480 bool
2481 operator_lshift::fold_range (irange &r, tree type,
2482 const irange &op1,
2483 const irange &op2,
2484 relation_trio rel) const
2486 int_range_max shift_range;
2487 if (!get_shift_range (shift_range, type, op2))
2489 if (op2.undefined_p ())
2490 r.set_undefined ();
2491 else
2492 r.set_zero (type);
2493 return true;
2496 // Transform left shifts by constants into multiplies.
2497 if (shift_range.singleton_p ())
2499 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2500 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2501 int_range<1> mult (type, tmp, tmp);
2503 // Force wrapping multiplication.
2504 bool saved_flag_wrapv = flag_wrapv;
2505 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2506 flag_wrapv = 1;
2507 flag_wrapv_pointer = 1;
2508 bool b = op_mult.fold_range (r, type, op1, mult);
2509 flag_wrapv = saved_flag_wrapv;
2510 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2511 return b;
2513 else
2514 // Otherwise, invoke the generic fold routine.
2515 return range_operator::fold_range (r, type, op1, shift_range, rel);
2518 void
2519 operator_lshift::wi_fold (irange &r, tree type,
2520 const wide_int &lh_lb, const wide_int &lh_ub,
2521 const wide_int &rh_lb, const wide_int &rh_ub) const
2523 signop sign = TYPE_SIGN (type);
2524 unsigned prec = TYPE_PRECISION (type);
2525 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2526 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2527 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2528 // overflow. However, for that to happen, rh.max needs to be zero,
2529 // which means rh is a singleton range of zero, which means we simply return
2530 // [lh_lb, lh_ub] as the range.
2531 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2533 r = int_range<2> (type, lh_lb, lh_ub);
2534 return;
2537 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2538 wide_int complement = ~(bound - 1);
2539 wide_int low_bound, high_bound;
2540 bool in_bounds = false;
2542 if (sign == UNSIGNED)
2544 low_bound = bound;
2545 high_bound = complement;
2546 if (wi::ltu_p (lh_ub, low_bound))
2548 // [5, 6] << [1, 2] == [10, 24].
2549 // We're shifting out only zeroes, the value increases
2550 // monotonically.
2551 in_bounds = true;
2553 else if (wi::ltu_p (high_bound, lh_lb))
2555 // [0xffffff00, 0xffffffff] << [1, 2]
2556 // == [0xfffffc00, 0xfffffffe].
2557 // We're shifting out only ones, the value decreases
2558 // monotonically.
2559 in_bounds = true;
2562 else
2564 // [-1, 1] << [1, 2] == [-4, 4]
2565 low_bound = complement;
2566 high_bound = bound;
2567 if (wi::lts_p (lh_ub, high_bound)
2568 && wi::lts_p (low_bound, lh_lb))
2570 // For non-negative numbers, we're shifting out only zeroes,
2571 // the value increases monotonically. For negative numbers,
2572 // we're shifting out only ones, the value decreases
2573 // monotonically.
2574 in_bounds = true;
2578 if (in_bounds)
2579 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2580 else
2581 r.set_varying (type);
2584 bool
2585 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2586 const wide_int &w0, const wide_int &w1) const
2588 signop sign = TYPE_SIGN (type);
2589 if (wi::neg_p (w1))
2591 // It's unclear from the C standard whether shifts can overflow.
2592 // The following code ignores overflow; perhaps a C standard
2593 // interpretation ruling is needed.
2594 res = wi::rshift (w0, -w1, sign);
2596 else
2597 res = wi::lshift (w0, w1);
2598 return false;
2601 bool
2602 operator_lshift::op1_range (irange &r,
2603 tree type,
2604 const irange &lhs,
2605 const irange &op2,
2606 relation_trio) const
2608 if (lhs.undefined_p ())
2609 return false;
2611 if (!contains_zero_p (lhs))
2612 r.set_nonzero (type);
2613 else
2614 r.set_varying (type);
2616 wide_int shift;
2617 if (op2.singleton_p (shift))
2619 if (wi::lt_p (shift, 0, SIGNED))
2620 return false;
2621 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2622 TYPE_PRECISION (op2.type ())),
2623 UNSIGNED))
2624 return false;
2625 if (shift == 0)
2627 r.intersect (lhs);
2628 return true;
2631 // Work completely in unsigned mode to start.
2632 tree utype = type;
2633 int_range_max tmp_range;
2634 if (TYPE_SIGN (type) == SIGNED)
2636 int_range_max tmp = lhs;
2637 utype = unsigned_type_for (type);
2638 range_cast (tmp, utype);
2639 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2641 else
2642 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2644 // Start with ranges which can produce the LHS by right shifting the
2645 // result by the shift amount.
2646 // ie [0x08, 0xF0] = op1 << 2 will start with
2647 // [00001000, 11110000] = op1 << 2
2648 // [0x02, 0x4C] aka [00000010, 00111100]
2650 // Then create a range from the LB with the least significant upper bit
2651 // set, to the upper bound with all the bits set.
2652 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2654 // Ideally we do this for each subrange, but just lump them all for now.
2655 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2656 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2657 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2658 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2659 int_range<2> fill_range (utype, new_lb, new_ub);
2660 tmp_range.union_ (fill_range);
2662 if (utype != type)
2663 range_cast (tmp_range, type);
2665 r.intersect (tmp_range);
2666 return true;
2669 return !r.varying_p ();
2672 bool
2673 operator_rshift::op1_range (irange &r,
2674 tree type,
2675 const irange &lhs,
2676 const irange &op2,
2677 relation_trio) const
2679 if (lhs.undefined_p ())
2680 return false;
2681 wide_int shift;
2682 if (op2.singleton_p (shift))
2684 // Ignore nonsensical shifts.
2685 unsigned prec = TYPE_PRECISION (type);
2686 if (wi::ge_p (shift,
2687 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2688 UNSIGNED))
2689 return false;
2690 if (shift == 0)
2692 r = lhs;
2693 return true;
2696 // Folding the original operation may discard some impossible
2697 // ranges from the LHS.
2698 int_range_max lhs_refined;
2699 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2700 lhs_refined.intersect (lhs);
2701 if (lhs_refined.undefined_p ())
2703 r.set_undefined ();
2704 return true;
2706 int_range_max shift_range (op2.type (), shift, shift);
2707 int_range_max lb, ub;
2708 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2709 // LHS
2710 // 0000 0111 = OP1 >> 3
2712 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2713 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2714 // right hand side (0x07).
2715 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2716 int_range_max mask_range (type,
2717 wi::zero (TYPE_PRECISION (type)),
2718 mask);
2719 op_plus.fold_range (ub, type, lb, mask_range);
2720 r = lb;
2721 r.union_ (ub);
2722 if (!contains_zero_p (lhs_refined))
2724 mask_range.invert ();
2725 r.intersect (mask_range);
2727 return true;
2729 return false;
2732 bool
2733 operator_rshift::wi_op_overflows (wide_int &res,
2734 tree type,
2735 const wide_int &w0,
2736 const wide_int &w1) const
2738 signop sign = TYPE_SIGN (type);
2739 if (wi::neg_p (w1))
2740 res = wi::lshift (w0, -w1);
2741 else
2743 // It's unclear from the C standard whether shifts can overflow.
2744 // The following code ignores overflow; perhaps a C standard
2745 // interpretation ruling is needed.
2746 res = wi::rshift (w0, w1, sign);
2748 return false;
2751 bool
2752 operator_rshift::fold_range (irange &r, tree type,
2753 const irange &op1,
2754 const irange &op2,
2755 relation_trio rel) const
2757 int_range_max shift;
2758 if (!get_shift_range (shift, type, op2))
2760 if (op2.undefined_p ())
2761 r.set_undefined ();
2762 else
2763 r.set_zero (type);
2764 return true;
2767 return range_operator::fold_range (r, type, op1, shift, rel);
2770 void
2771 operator_rshift::wi_fold (irange &r, tree type,
2772 const wide_int &lh_lb, const wide_int &lh_ub,
2773 const wide_int &rh_lb, const wide_int &rh_ub) const
2775 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2779 // Add a partial equivalence between the LHS and op1 for casts.
2781 relation_kind
2782 operator_cast::lhs_op1_relation (const irange &lhs,
2783 const irange &op1,
2784 const irange &op2 ATTRIBUTE_UNUSED,
2785 relation_kind) const
2787 if (lhs.undefined_p () || op1.undefined_p ())
2788 return VREL_VARYING;
2789 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2790 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2791 // If the result gets sign extended into a larger type check first if this
2792 // qualifies as a partial equivalence.
2793 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2795 // If the result is sign extended, and the LHS is larger than op1,
2796 // check if op1's range can be negative as the sign extension will
2797 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2798 int_range<3> negs = range_negatives (op1.type ());
2799 negs.intersect (op1);
2800 if (!negs.undefined_p ())
2801 return VREL_VARYING;
2804 unsigned prec = MIN (lhs_prec, op1_prec);
2805 return bits_to_pe (prec);
2808 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2810 inline bool
2811 operator_cast::truncating_cast_p (const irange &inner,
2812 const irange &outer) const
2814 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2817 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2819 bool
2820 operator_cast::inside_domain_p (const wide_int &min,
2821 const wide_int &max,
2822 const irange &range) const
2824 wide_int domain_min = irange_val_min (range.type ());
2825 wide_int domain_max = irange_val_max (range.type ());
2826 signop domain_sign = TYPE_SIGN (range.type ());
2827 return (wi::le_p (min, domain_max, domain_sign)
2828 && wi::le_p (max, domain_max, domain_sign)
2829 && wi::ge_p (min, domain_min, domain_sign)
2830 && wi::ge_p (max, domain_min, domain_sign));
2834 // Helper for fold_range which work on a pair at a time.
2836 void
2837 operator_cast::fold_pair (irange &r, unsigned index,
2838 const irange &inner,
2839 const irange &outer) const
2841 tree inner_type = inner.type ();
2842 tree outer_type = outer.type ();
2843 signop inner_sign = TYPE_SIGN (inner_type);
2844 unsigned outer_prec = TYPE_PRECISION (outer_type);
2846 // check to see if casting from INNER to OUTER is a conversion that
2847 // fits in the resulting OUTER type.
2848 wide_int inner_lb = inner.lower_bound (index);
2849 wide_int inner_ub = inner.upper_bound (index);
2850 if (truncating_cast_p (inner, outer))
2852 // We may be able to accommodate a truncating cast if the
2853 // resulting range can be represented in the target type...
2854 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2855 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2856 inner_sign) != 0)
2858 r.set_varying (outer_type);
2859 return;
2862 // ...but we must still verify that the final range fits in the
2863 // domain. This catches -fstrict-enum restrictions where the domain
2864 // range is smaller than what fits in the underlying type.
2865 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2866 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2867 if (inside_domain_p (min, max, outer))
2868 create_possibly_reversed_range (r, outer_type, min, max);
2869 else
2870 r.set_varying (outer_type);
2874 bool
2875 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2876 const irange &inner,
2877 const irange &outer,
2878 relation_trio) const
2880 if (empty_range_varying (r, type, inner, outer))
2881 return true;
2883 gcc_checking_assert (outer.varying_p ());
2884 gcc_checking_assert (inner.num_pairs () > 0);
2886 // Avoid a temporary by folding the first pair directly into the result.
2887 fold_pair (r, 0, inner, outer);
2889 // Then process any additional pairs by unioning with their results.
2890 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2892 int_range_max tmp;
2893 fold_pair (tmp, x, inner, outer);
2894 r.union_ (tmp);
2895 if (r.varying_p ())
2896 return true;
2899 update_bitmask (r, inner, outer);
2900 return true;
2903 void
2904 operator_cast::update_bitmask (irange &r, const irange &lh,
2905 const irange &rh) const
2907 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
2910 bool
2911 operator_cast::op1_range (irange &r, tree type,
2912 const irange &lhs,
2913 const irange &op2,
2914 relation_trio) const
2916 if (lhs.undefined_p ())
2917 return false;
2918 tree lhs_type = lhs.type ();
2919 gcc_checking_assert (types_compatible_p (op2.type(), type));
2921 // If we are calculating a pointer, shortcut to what we really care about.
2922 if (POINTER_TYPE_P (type))
2924 // Conversion from other pointers or a constant (including 0/NULL)
2925 // are straightforward.
2926 if (POINTER_TYPE_P (lhs.type ())
2927 || (lhs.singleton_p ()
2928 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2930 r = lhs;
2931 range_cast (r, type);
2933 else
2935 // If the LHS is not a pointer nor a singleton, then it is
2936 // either VARYING or non-zero.
2937 if (!contains_zero_p (lhs))
2938 r.set_nonzero (type);
2939 else
2940 r.set_varying (type);
2942 r.intersect (op2);
2943 return true;
2946 if (truncating_cast_p (op2, lhs))
2948 if (lhs.varying_p ())
2949 r.set_varying (type);
2950 else
2952 // We want to insert the LHS as an unsigned value since it
2953 // would not trigger the signed bit of the larger type.
2954 int_range_max converted_lhs = lhs;
2955 range_cast (converted_lhs, unsigned_type_for (lhs_type));
2956 range_cast (converted_lhs, type);
2957 // Start by building the positive signed outer range for the type.
2958 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
2959 TYPE_PRECISION (type));
2960 create_possibly_reversed_range (r, type, lim,
2961 wi::max_value (TYPE_PRECISION (type),
2962 SIGNED));
2963 // For the signed part, we need to simply union the 2 ranges now.
2964 r.union_ (converted_lhs);
2966 // Create maximal negative number outside of LHS bits.
2967 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
2968 TYPE_PRECISION (type));
2969 // Add this to the unsigned LHS range(s).
2970 int_range_max lim_range (type, lim, lim);
2971 int_range_max lhs_neg;
2972 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
2973 converted_lhs, lim_range);
2974 // lhs_neg now has all the negative versions of the LHS.
2975 // Now union in all the values from SIGNED MIN (0x80000) to
2976 // lim-1 in order to fill in all the ranges with the upper
2977 // bits set.
2979 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2980 // we don't need to create a range from min to lim-1
2981 // calculate neg range traps trying to create [lim, lim - 1].
2982 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
2983 if (lim != min_val)
2985 int_range_max neg (type,
2986 wi::min_value (TYPE_PRECISION (type),
2987 SIGNED),
2988 lim - 1);
2989 lhs_neg.union_ (neg);
2991 // And finally, munge the signed and unsigned portions.
2992 r.union_ (lhs_neg);
2994 // And intersect with any known value passed in the extra operand.
2995 r.intersect (op2);
2996 return true;
2999 int_range_max tmp;
3000 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3001 tmp = lhs;
3002 else
3004 // The cast is not truncating, and the range is restricted to
3005 // the range of the RHS by this assignment.
3007 // Cast the range of the RHS to the type of the LHS.
3008 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3009 // Intersect this with the LHS range will produce the range,
3010 // which will be cast to the RHS type before returning.
3011 tmp.intersect (lhs);
3014 // Cast the calculated range to the type of the RHS.
3015 fold_range (r, type, tmp, int_range<1> (type));
3016 return true;
3020 class operator_logical_and : public range_operator
3022 using range_operator::fold_range;
3023 using range_operator::op1_range;
3024 using range_operator::op2_range;
3025 public:
3026 virtual bool fold_range (irange &r, tree type,
3027 const irange &lh,
3028 const irange &rh,
3029 relation_trio rel = TRIO_VARYING) const;
3030 virtual bool op1_range (irange &r, tree type,
3031 const irange &lhs,
3032 const irange &op2,
3033 relation_trio rel = TRIO_VARYING) const;
3034 virtual bool op2_range (irange &r, tree type,
3035 const irange &lhs,
3036 const irange &op1,
3037 relation_trio rel = TRIO_VARYING) const;
3038 } op_logical_and;
3041 bool
3042 operator_logical_and::fold_range (irange &r, tree type,
3043 const irange &lh,
3044 const irange &rh,
3045 relation_trio) const
3047 if (empty_range_varying (r, type, lh, rh))
3048 return true;
3050 // 0 && anything is 0.
3051 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3052 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3053 r = range_false (type);
3054 else if (contains_zero_p (lh) || contains_zero_p (rh))
3055 // To reach this point, there must be a logical 1 on each side, and
3056 // the only remaining question is whether there is a zero or not.
3057 r = range_true_and_false (type);
3058 else
3059 r = range_true (type);
3060 return true;
3063 bool
3064 operator_logical_and::op1_range (irange &r, tree type,
3065 const irange &lhs,
3066 const irange &op2 ATTRIBUTE_UNUSED,
3067 relation_trio) const
3069 switch (get_bool_state (r, lhs, type))
3071 case BRS_TRUE:
3072 // A true result means both sides of the AND must be true.
3073 r = range_true (type);
3074 break;
3075 default:
3076 // Any other result means only one side has to be false, the
3077 // other side can be anything. So we cannot be sure of any
3078 // result here.
3079 r = range_true_and_false (type);
3080 break;
3082 return true;
3085 bool
3086 operator_logical_and::op2_range (irange &r, tree type,
3087 const irange &lhs,
3088 const irange &op1,
3089 relation_trio) const
3091 return operator_logical_and::op1_range (r, type, lhs, op1);
3095 void
3096 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3097 const irange &rh) const
3099 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3102 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3103 // by considering the number of leading redundant sign bit copies.
3104 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3105 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3106 static bool
3107 wi_optimize_signed_bitwise_op (irange &r, tree type,
3108 const wide_int &lh_lb, const wide_int &lh_ub,
3109 const wide_int &rh_lb, const wide_int &rh_ub)
3111 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3112 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3113 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3114 if (new_clrsb == 0)
3115 return false;
3116 int type_prec = TYPE_PRECISION (type);
3117 int rprec = (type_prec - new_clrsb) - 1;
3118 value_range_with_overflow (r, type,
3119 wi::mask (rprec, true, type_prec),
3120 wi::mask (rprec, false, type_prec));
3121 return true;
3124 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3125 // the LHS and op1.
3127 relation_kind
3128 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3129 const irange &op1,
3130 const irange &op2,
3131 relation_kind) const
3133 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3134 return VREL_VARYING;
3135 if (!op2.singleton_p ())
3136 return VREL_VARYING;
3137 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3138 int prec1 = TYPE_PRECISION (op1.type ());
3139 int prec2 = TYPE_PRECISION (op2.type ());
3140 int mask_prec = 0;
3141 wide_int mask = op2.lower_bound ();
3142 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3143 mask_prec = 8;
3144 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3145 mask_prec = 16;
3146 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3147 mask_prec = 32;
3148 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3149 mask_prec = 64;
3150 return bits_to_pe (MIN (prec1, mask_prec));
3153 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3154 // possible. Basically, see if we can optimize:
3156 // [LB, UB] op Z
3157 // into:
3158 // [LB op Z, UB op Z]
3160 // If the optimization was successful, accumulate the range in R and
3161 // return TRUE.
3163 static bool
3164 wi_optimize_and_or (irange &r,
3165 enum tree_code code,
3166 tree type,
3167 const wide_int &lh_lb, const wide_int &lh_ub,
3168 const wide_int &rh_lb, const wide_int &rh_ub)
3170 // Calculate the singleton mask among the ranges, if any.
3171 wide_int lower_bound, upper_bound, mask;
3172 if (wi::eq_p (rh_lb, rh_ub))
3174 mask = rh_lb;
3175 lower_bound = lh_lb;
3176 upper_bound = lh_ub;
3178 else if (wi::eq_p (lh_lb, lh_ub))
3180 mask = lh_lb;
3181 lower_bound = rh_lb;
3182 upper_bound = rh_ub;
3184 else
3185 return false;
3187 // If Z is a constant which (for op | its bitwise not) has n
3188 // consecutive least significant bits cleared followed by m 1
3189 // consecutive bits set immediately above it and either
3190 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3192 // The least significant n bits of all the values in the range are
3193 // cleared or set, the m bits above it are preserved and any bits
3194 // above these are required to be the same for all values in the
3195 // range.
3196 wide_int w = mask;
3197 int m = 0, n = 0;
3198 if (code == BIT_IOR_EXPR)
3199 w = ~w;
3200 if (wi::eq_p (w, 0))
3201 n = w.get_precision ();
3202 else
3204 n = wi::ctz (w);
3205 w = ~(w | wi::mask (n, false, w.get_precision ()));
3206 if (wi::eq_p (w, 0))
3207 m = w.get_precision () - n;
3208 else
3209 m = wi::ctz (w) - n;
3211 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3212 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3213 return false;
3215 wide_int res_lb, res_ub;
3216 if (code == BIT_AND_EXPR)
3218 res_lb = wi::bit_and (lower_bound, mask);
3219 res_ub = wi::bit_and (upper_bound, mask);
3221 else if (code == BIT_IOR_EXPR)
3223 res_lb = wi::bit_or (lower_bound, mask);
3224 res_ub = wi::bit_or (upper_bound, mask);
3226 else
3227 gcc_unreachable ();
3228 value_range_with_overflow (r, type, res_lb, res_ub);
3230 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3231 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3233 int_range<2> tmp;
3234 tmp.set_nonzero (type);
3235 r.intersect (tmp);
3237 return true;
3240 // For range [LB, UB] compute two wide_int bit masks.
3242 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3243 // for all numbers in the range the bit is 0, otherwise it might be 0
3244 // or 1.
3246 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3247 // for all numbers in the range the bit is 1, otherwise it might be 0
3248 // or 1.
3250 void
3251 wi_set_zero_nonzero_bits (tree type,
3252 const wide_int &lb, const wide_int &ub,
3253 wide_int &maybe_nonzero,
3254 wide_int &mustbe_nonzero)
3256 signop sign = TYPE_SIGN (type);
3258 if (wi::eq_p (lb, ub))
3259 maybe_nonzero = mustbe_nonzero = lb;
3260 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3262 wide_int xor_mask = lb ^ ub;
3263 maybe_nonzero = lb | ub;
3264 mustbe_nonzero = lb & ub;
3265 if (xor_mask != 0)
3267 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3268 maybe_nonzero.get_precision ());
3269 maybe_nonzero = maybe_nonzero | mask;
3270 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3273 else
3275 maybe_nonzero = wi::minus_one (lb.get_precision ());
3276 mustbe_nonzero = wi::zero (lb.get_precision ());
3280 void
3281 operator_bitwise_and::wi_fold (irange &r, tree type,
3282 const wide_int &lh_lb,
3283 const wide_int &lh_ub,
3284 const wide_int &rh_lb,
3285 const wide_int &rh_ub) const
3287 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3288 return;
3290 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3291 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3292 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3293 maybe_nonzero_lh, mustbe_nonzero_lh);
3294 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3295 maybe_nonzero_rh, mustbe_nonzero_rh);
3297 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3298 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3299 signop sign = TYPE_SIGN (type);
3300 unsigned prec = TYPE_PRECISION (type);
3301 // If both input ranges contain only negative values, we can
3302 // truncate the result range maximum to the minimum of the
3303 // input range maxima.
3304 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3306 new_ub = wi::min (new_ub, lh_ub, sign);
3307 new_ub = wi::min (new_ub, rh_ub, sign);
3309 // If either input range contains only non-negative values
3310 // we can truncate the result range maximum to the respective
3311 // maximum of the input range.
3312 if (wi::ge_p (lh_lb, 0, sign))
3313 new_ub = wi::min (new_ub, lh_ub, sign);
3314 if (wi::ge_p (rh_lb, 0, sign))
3315 new_ub = wi::min (new_ub, rh_ub, sign);
3316 // PR68217: In case of signed & sign-bit-CST should
3317 // result in [-INF, 0] instead of [-INF, INF].
3318 if (wi::gt_p (new_lb, new_ub, sign))
3320 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3321 if (sign == SIGNED
3322 && ((wi::eq_p (lh_lb, lh_ub)
3323 && !wi::cmps (lh_lb, sign_bit))
3324 || (wi::eq_p (rh_lb, rh_ub)
3325 && !wi::cmps (rh_lb, sign_bit))))
3327 new_lb = wi::min_value (prec, sign);
3328 new_ub = wi::zero (prec);
3331 // If the limits got swapped around, return varying.
3332 if (wi::gt_p (new_lb, new_ub,sign))
3334 if (sign == SIGNED
3335 && wi_optimize_signed_bitwise_op (r, type,
3336 lh_lb, lh_ub,
3337 rh_lb, rh_ub))
3338 return;
3339 r.set_varying (type);
3341 else
3342 value_range_with_overflow (r, type, new_lb, new_ub);
3345 static void
3346 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3348 if (!contains_zero_p (lhs))
3349 r = range_nonzero (type);
3350 else
3351 r.set_varying (type);
3354 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3355 (otherwise return VAL). VAL and MASK must be zero-extended for
3356 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3357 (to transform signed values into unsigned) and at the end xor
3358 SGNBIT back. */
3360 wide_int
3361 masked_increment (const wide_int &val_in, const wide_int &mask,
3362 const wide_int &sgnbit, unsigned int prec)
3364 wide_int bit = wi::one (prec), res;
3365 unsigned int i;
3367 wide_int val = val_in ^ sgnbit;
3368 for (i = 0; i < prec; i++, bit += bit)
3370 res = mask;
3371 if ((res & bit) == 0)
3372 continue;
3373 res = bit - 1;
3374 res = wi::bit_and_not (val + bit, res);
3375 res &= mask;
3376 if (wi::gtu_p (res, val))
3377 return res ^ sgnbit;
3379 return val ^ sgnbit;
3382 // This was shamelessly stolen from register_edge_assert_for_2 and
3383 // adjusted to work with iranges.
3385 void
3386 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3387 const irange &lhs,
3388 const irange &op2) const
3390 if (!op2.singleton_p ())
3392 set_nonzero_range_from_mask (r, type, lhs);
3393 return;
3395 unsigned int nprec = TYPE_PRECISION (type);
3396 wide_int cst2v = op2.lower_bound ();
3397 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3398 wide_int sgnbit;
3399 if (cst2n)
3400 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3401 else
3402 sgnbit = wi::zero (nprec);
3404 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3406 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3407 // maximum unsigned value is ~0. For signed comparison, if CST2
3408 // doesn't have the most significant bit set, handle it similarly. If
3409 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3410 wide_int valv = lhs.lower_bound ();
3411 wide_int minv = valv & cst2v, maxv;
3412 bool we_know_nothing = false;
3413 if (minv != valv)
3415 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3416 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3417 if (minv == valv)
3419 // If we can't determine anything on this bound, fall
3420 // through and conservatively solve for the other end point.
3421 we_know_nothing = true;
3424 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3425 if (we_know_nothing)
3426 r.set_varying (type);
3427 else
3428 create_possibly_reversed_range (r, type, minv, maxv);
3430 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3432 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3433 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3434 // VAL2 where
3435 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3436 // as maximum.
3437 // For signed comparison, if CST2 doesn't have most significant bit
3438 // set, handle it similarly. If CST2 has MSB set, the maximum is
3439 // the same and minimum is INT_MIN.
3440 valv = lhs.upper_bound ();
3441 minv = valv & cst2v;
3442 if (minv == valv)
3443 maxv = valv;
3444 else
3446 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3447 if (maxv == valv)
3449 // If we couldn't determine anything on either bound, return
3450 // undefined.
3451 if (we_know_nothing)
3452 r.set_undefined ();
3453 return;
3455 maxv -= 1;
3457 maxv |= ~cst2v;
3458 minv = sgnbit;
3459 int_range<2> upper_bits;
3460 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3461 r.intersect (upper_bits);
3464 bool
3465 operator_bitwise_and::op1_range (irange &r, tree type,
3466 const irange &lhs,
3467 const irange &op2,
3468 relation_trio) const
3470 if (lhs.undefined_p ())
3471 return false;
3472 if (types_compatible_p (type, boolean_type_node))
3473 return op_logical_and.op1_range (r, type, lhs, op2);
3475 r.set_undefined ();
3476 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3478 int_range_max chunk (lhs.type (),
3479 lhs.lower_bound (i),
3480 lhs.upper_bound (i));
3481 int_range_max res;
3482 simple_op1_range_solver (res, type, chunk, op2);
3483 r.union_ (res);
3485 if (r.undefined_p ())
3486 set_nonzero_range_from_mask (r, type, lhs);
3488 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3489 wide_int mask;
3490 if (lhs == op2 && lhs.singleton_p (mask))
3492 r.update_bitmask (irange_bitmask (mask, ~mask));
3493 return true;
3496 // For 0 = op1 & MASK, op1 is ~MASK.
3497 if (lhs.zero_p () && op2.singleton_p ())
3499 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3500 int_range<2> tmp (type);
3501 tmp.set_nonzero_bits (nz);
3502 r.intersect (tmp);
3504 return true;
3507 bool
3508 operator_bitwise_and::op2_range (irange &r, tree type,
3509 const irange &lhs,
3510 const irange &op1,
3511 relation_trio) const
3513 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3517 class operator_logical_or : public range_operator
3519 using range_operator::fold_range;
3520 using range_operator::op1_range;
3521 using range_operator::op2_range;
3522 public:
3523 virtual bool fold_range (irange &r, tree type,
3524 const irange &lh,
3525 const irange &rh,
3526 relation_trio rel = TRIO_VARYING) const;
3527 virtual bool op1_range (irange &r, tree type,
3528 const irange &lhs,
3529 const irange &op2,
3530 relation_trio rel = TRIO_VARYING) const;
3531 virtual bool op2_range (irange &r, tree type,
3532 const irange &lhs,
3533 const irange &op1,
3534 relation_trio rel = TRIO_VARYING) const;
3535 } op_logical_or;
3537 bool
3538 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3539 const irange &lh,
3540 const irange &rh,
3541 relation_trio) const
3543 if (empty_range_varying (r, type, lh, rh))
3544 return true;
3546 r = lh;
3547 r.union_ (rh);
3548 return true;
3551 bool
3552 operator_logical_or::op1_range (irange &r, tree type,
3553 const irange &lhs,
3554 const irange &op2 ATTRIBUTE_UNUSED,
3555 relation_trio) const
3557 switch (get_bool_state (r, lhs, type))
3559 case BRS_FALSE:
3560 // A false result means both sides of the OR must be false.
3561 r = range_false (type);
3562 break;
3563 default:
3564 // Any other result means only one side has to be true, the
3565 // other side can be anything. so we can't be sure of any result
3566 // here.
3567 r = range_true_and_false (type);
3568 break;
3570 return true;
3573 bool
3574 operator_logical_or::op2_range (irange &r, tree type,
3575 const irange &lhs,
3576 const irange &op1,
3577 relation_trio) const
3579 return operator_logical_or::op1_range (r, type, lhs, op1);
3583 void
3584 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3585 const irange &rh) const
3587 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3590 void
3591 operator_bitwise_or::wi_fold (irange &r, tree type,
3592 const wide_int &lh_lb,
3593 const wide_int &lh_ub,
3594 const wide_int &rh_lb,
3595 const wide_int &rh_ub) const
3597 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3598 return;
3600 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3601 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3602 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3603 maybe_nonzero_lh, mustbe_nonzero_lh);
3604 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3605 maybe_nonzero_rh, mustbe_nonzero_rh);
3606 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3607 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3608 signop sign = TYPE_SIGN (type);
3609 // If the input ranges contain only positive values we can
3610 // truncate the minimum of the result range to the maximum
3611 // of the input range minima.
3612 if (wi::ge_p (lh_lb, 0, sign)
3613 && wi::ge_p (rh_lb, 0, sign))
3615 new_lb = wi::max (new_lb, lh_lb, sign);
3616 new_lb = wi::max (new_lb, rh_lb, sign);
3618 // If either input range contains only negative values
3619 // we can truncate the minimum of the result range to the
3620 // respective minimum range.
3621 if (wi::lt_p (lh_ub, 0, sign))
3622 new_lb = wi::max (new_lb, lh_lb, sign);
3623 if (wi::lt_p (rh_ub, 0, sign))
3624 new_lb = wi::max (new_lb, rh_lb, sign);
3625 // If the limits got swapped around, return a conservative range.
3626 if (wi::gt_p (new_lb, new_ub, sign))
3628 // Make sure that nonzero|X is nonzero.
3629 if (wi::gt_p (lh_lb, 0, sign)
3630 || wi::gt_p (rh_lb, 0, sign)
3631 || wi::lt_p (lh_ub, 0, sign)
3632 || wi::lt_p (rh_ub, 0, sign))
3633 r.set_nonzero (type);
3634 else if (sign == SIGNED
3635 && wi_optimize_signed_bitwise_op (r, type,
3636 lh_lb, lh_ub,
3637 rh_lb, rh_ub))
3638 return;
3639 else
3640 r.set_varying (type);
3641 return;
3643 value_range_with_overflow (r, type, new_lb, new_ub);
3646 bool
3647 operator_bitwise_or::op1_range (irange &r, tree type,
3648 const irange &lhs,
3649 const irange &op2,
3650 relation_trio) const
3652 if (lhs.undefined_p ())
3653 return false;
3654 // If this is really a logical wi_fold, call that.
3655 if (types_compatible_p (type, boolean_type_node))
3656 return op_logical_or.op1_range (r, type, lhs, op2);
3658 if (lhs.zero_p ())
3660 r.set_zero (type);
3661 return true;
3663 r.set_varying (type);
3664 return true;
3667 bool
3668 operator_bitwise_or::op2_range (irange &r, tree type,
3669 const irange &lhs,
3670 const irange &op1,
3671 relation_trio) const
3673 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3676 void
3677 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3678 const irange &rh) const
3680 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3683 void
3684 operator_bitwise_xor::wi_fold (irange &r, tree type,
3685 const wide_int &lh_lb,
3686 const wide_int &lh_ub,
3687 const wide_int &rh_lb,
3688 const wide_int &rh_ub) const
3690 signop sign = TYPE_SIGN (type);
3691 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3692 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3693 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3694 maybe_nonzero_lh, mustbe_nonzero_lh);
3695 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3696 maybe_nonzero_rh, mustbe_nonzero_rh);
3698 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3699 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3700 wide_int result_one_bits
3701 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3702 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3703 wide_int new_ub = ~result_zero_bits;
3704 wide_int new_lb = result_one_bits;
3706 // If the range has all positive or all negative values, the result
3707 // is better than VARYING.
3708 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3709 value_range_with_overflow (r, type, new_lb, new_ub);
3710 else if (sign == SIGNED
3711 && wi_optimize_signed_bitwise_op (r, type,
3712 lh_lb, lh_ub,
3713 rh_lb, rh_ub))
3714 ; /* Do nothing. */
3715 else
3716 r.set_varying (type);
3718 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3719 if (wi::lt_p (lh_ub, rh_lb, sign)
3720 || wi::lt_p (rh_ub, lh_lb, sign)
3721 || wi::ne_p (result_one_bits, 0))
3723 int_range<2> tmp;
3724 tmp.set_nonzero (type);
3725 r.intersect (tmp);
3729 bool
3730 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3731 tree type,
3732 const irange &,
3733 const irange &,
3734 relation_kind rel) const
3736 if (rel == VREL_VARYING)
3737 return false;
3739 int_range<2> rel_range;
3741 switch (rel)
3743 case VREL_EQ:
3744 rel_range.set_zero (type);
3745 break;
3746 case VREL_NE:
3747 rel_range.set_nonzero (type);
3748 break;
3749 default:
3750 return false;
3753 lhs_range.intersect (rel_range);
3754 return true;
3757 bool
3758 operator_bitwise_xor::op1_range (irange &r, tree type,
3759 const irange &lhs,
3760 const irange &op2,
3761 relation_trio) const
3763 if (lhs.undefined_p () || lhs.varying_p ())
3765 r = lhs;
3766 return true;
3768 if (types_compatible_p (type, boolean_type_node))
3770 switch (get_bool_state (r, lhs, type))
3772 case BRS_TRUE:
3773 if (op2.varying_p ())
3774 r.set_varying (type);
3775 else if (op2.zero_p ())
3776 r = range_true (type);
3777 // See get_bool_state for the rationale
3778 else if (contains_zero_p (op2))
3779 r = range_true_and_false (type);
3780 else
3781 r = range_false (type);
3782 break;
3783 case BRS_FALSE:
3784 r = op2;
3785 break;
3786 default:
3787 break;
3789 return true;
3791 r.set_varying (type);
3792 return true;
3795 bool
3796 operator_bitwise_xor::op2_range (irange &r, tree type,
3797 const irange &lhs,
3798 const irange &op1,
3799 relation_trio) const
3801 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3804 class operator_trunc_mod : public range_operator
3806 using range_operator::op1_range;
3807 using range_operator::op2_range;
3808 public:
3809 virtual void wi_fold (irange &r, tree type,
3810 const wide_int &lh_lb,
3811 const wide_int &lh_ub,
3812 const wide_int &rh_lb,
3813 const wide_int &rh_ub) const;
3814 virtual bool op1_range (irange &r, tree type,
3815 const irange &lhs,
3816 const irange &op2,
3817 relation_trio) const;
3818 virtual bool op2_range (irange &r, tree type,
3819 const irange &lhs,
3820 const irange &op1,
3821 relation_trio) const;
3822 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3823 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3824 } op_trunc_mod;
3826 void
3827 operator_trunc_mod::wi_fold (irange &r, tree type,
3828 const wide_int &lh_lb,
3829 const wide_int &lh_ub,
3830 const wide_int &rh_lb,
3831 const wide_int &rh_ub) const
3833 wide_int new_lb, new_ub, tmp;
3834 signop sign = TYPE_SIGN (type);
3835 unsigned prec = TYPE_PRECISION (type);
3837 // Mod 0 is undefined.
3838 if (wi_zero_p (type, rh_lb, rh_ub))
3840 r.set_undefined ();
3841 return;
3844 // Check for constant and try to fold.
3845 if (lh_lb == lh_ub && rh_lb == rh_ub)
3847 wi::overflow_type ov = wi::OVF_NONE;
3848 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3849 if (ov == wi::OVF_NONE)
3851 r = int_range<2> (type, tmp, tmp);
3852 return;
3856 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3857 new_ub = rh_ub - 1;
3858 if (sign == SIGNED)
3860 tmp = -1 - rh_lb;
3861 new_ub = wi::smax (new_ub, tmp);
3864 if (sign == UNSIGNED)
3865 new_lb = wi::zero (prec);
3866 else
3868 new_lb = -new_ub;
3869 tmp = lh_lb;
3870 if (wi::gts_p (tmp, 0))
3871 tmp = wi::zero (prec);
3872 new_lb = wi::smax (new_lb, tmp);
3874 tmp = lh_ub;
3875 if (sign == SIGNED && wi::neg_p (tmp))
3876 tmp = wi::zero (prec);
3877 new_ub = wi::min (new_ub, tmp, sign);
3879 value_range_with_overflow (r, type, new_lb, new_ub);
3882 bool
3883 operator_trunc_mod::op1_range (irange &r, tree type,
3884 const irange &lhs,
3885 const irange &,
3886 relation_trio) const
3888 if (lhs.undefined_p ())
3889 return false;
3890 // PR 91029.
3891 signop sign = TYPE_SIGN (type);
3892 unsigned prec = TYPE_PRECISION (type);
3893 // (a % b) >= x && x > 0 , then a >= x.
3894 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3896 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3897 return true;
3899 // (a % b) <= x && x < 0 , then a <= x.
3900 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3902 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3903 return true;
3905 return false;
3908 bool
3909 operator_trunc_mod::op2_range (irange &r, tree type,
3910 const irange &lhs,
3911 const irange &,
3912 relation_trio) const
3914 if (lhs.undefined_p ())
3915 return false;
3916 // PR 91029.
3917 signop sign = TYPE_SIGN (type);
3918 unsigned prec = TYPE_PRECISION (type);
3919 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3920 // or b > x for unsigned.
3921 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3923 if (sign == SIGNED)
3924 r = value_range (type, wi::neg (lhs.lower_bound ()),
3925 lhs.lower_bound (), VR_ANTI_RANGE);
3926 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
3927 sign))
3928 r = value_range (type, lhs.lower_bound () + 1,
3929 wi::max_value (prec, sign));
3930 else
3931 return false;
3932 return true;
3934 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3935 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3937 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
3938 r = value_range (type, lhs.upper_bound (),
3939 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
3940 else
3941 return false;
3942 return true;
3944 return false;
3948 class operator_logical_not : public range_operator
3950 using range_operator::fold_range;
3951 using range_operator::op1_range;
3952 public:
3953 virtual bool fold_range (irange &r, tree type,
3954 const irange &lh,
3955 const irange &rh,
3956 relation_trio rel = TRIO_VARYING) const;
3957 virtual bool op1_range (irange &r, tree type,
3958 const irange &lhs,
3959 const irange &op2,
3960 relation_trio rel = TRIO_VARYING) const;
3961 } op_logical_not;
3963 // Folding a logical NOT, oddly enough, involves doing nothing on the
3964 // forward pass through. During the initial walk backwards, the
3965 // logical NOT reversed the desired outcome on the way back, so on the
3966 // way forward all we do is pass the range forward.
3968 // b_2 = x_1 < 20
3969 // b_3 = !b_2
3970 // if (b_3)
3971 // to determine the TRUE branch, walking backward
3972 // if (b_3) if ([1,1])
3973 // b_3 = !b_2 [1,1] = ![0,0]
3974 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3975 // which is the result we are looking for.. so.. pass it through.
3977 bool
3978 operator_logical_not::fold_range (irange &r, tree type,
3979 const irange &lh,
3980 const irange &rh ATTRIBUTE_UNUSED,
3981 relation_trio) const
3983 if (empty_range_varying (r, type, lh, rh))
3984 return true;
3986 r = lh;
3987 if (!lh.varying_p () && !lh.undefined_p ())
3988 r.invert ();
3990 return true;
3993 bool
3994 operator_logical_not::op1_range (irange &r,
3995 tree type,
3996 const irange &lhs,
3997 const irange &op2,
3998 relation_trio) const
4000 // Logical NOT is involutary...do it again.
4001 return fold_range (r, type, lhs, op2);
4005 bool
4006 operator_bitwise_not::fold_range (irange &r, tree type,
4007 const irange &lh,
4008 const irange &rh,
4009 relation_trio) const
4011 if (empty_range_varying (r, type, lh, rh))
4012 return true;
4014 if (types_compatible_p (type, boolean_type_node))
4015 return op_logical_not.fold_range (r, type, lh, rh);
4017 // ~X is simply -1 - X.
4018 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4019 wi::minus_one (TYPE_PRECISION (type)));
4020 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4023 bool
4024 operator_bitwise_not::op1_range (irange &r, tree type,
4025 const irange &lhs,
4026 const irange &op2,
4027 relation_trio) const
4029 if (lhs.undefined_p ())
4030 return false;
4031 if (types_compatible_p (type, boolean_type_node))
4032 return op_logical_not.op1_range (r, type, lhs, op2);
4034 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4035 return fold_range (r, type, lhs, op2);
4038 void
4039 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4040 const irange &rh) const
4042 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4046 bool
4047 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4048 const irange &lh,
4049 const irange &rh ATTRIBUTE_UNUSED,
4050 relation_trio) const
4052 r = lh;
4053 return true;
4057 // Determine if there is a relationship between LHS and OP1.
4059 relation_kind
4060 operator_identity::lhs_op1_relation (const irange &lhs,
4061 const irange &op1 ATTRIBUTE_UNUSED,
4062 const irange &op2 ATTRIBUTE_UNUSED,
4063 relation_kind) const
4065 if (lhs.undefined_p ())
4066 return VREL_VARYING;
4067 // Simply a copy, so they are equivalent.
4068 return VREL_EQ;
4071 bool
4072 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4073 const irange &lh,
4074 const irange &rh ATTRIBUTE_UNUSED,
4075 relation_trio) const
4077 r = lh;
4078 return true;
4081 bool
4082 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4083 const irange &lhs,
4084 const irange &op2 ATTRIBUTE_UNUSED,
4085 relation_trio) const
4087 r = lhs;
4088 return true;
4092 class operator_unknown : public range_operator
4094 using range_operator::fold_range;
4095 public:
4096 virtual bool fold_range (irange &r, tree type,
4097 const irange &op1,
4098 const irange &op2,
4099 relation_trio rel = TRIO_VARYING) const;
4100 } op_unknown;
4102 bool
4103 operator_unknown::fold_range (irange &r, tree type,
4104 const irange &lh ATTRIBUTE_UNUSED,
4105 const irange &rh ATTRIBUTE_UNUSED,
4106 relation_trio) const
4108 r.set_varying (type);
4109 return true;
4113 void
4114 operator_abs::wi_fold (irange &r, tree type,
4115 const wide_int &lh_lb, const wide_int &lh_ub,
4116 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4117 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4119 wide_int min, max;
4120 signop sign = TYPE_SIGN (type);
4121 unsigned prec = TYPE_PRECISION (type);
4123 // Pass through LH for the easy cases.
4124 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4126 r = int_range<1> (type, lh_lb, lh_ub);
4127 return;
4130 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4131 // a useful range.
4132 wide_int min_value = wi::min_value (prec, sign);
4133 wide_int max_value = wi::max_value (prec, sign);
4134 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4136 r.set_varying (type);
4137 return;
4140 // ABS_EXPR may flip the range around, if the original range
4141 // included negative values.
4142 if (wi::eq_p (lh_lb, min_value))
4144 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4145 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4146 if (wi::eq_p (lh_ub, min_value))
4148 r = int_range<1> (type, min_value, min_value);
4149 return;
4151 min = max_value;
4153 else
4154 min = wi::abs (lh_lb);
4156 if (wi::eq_p (lh_ub, min_value))
4157 max = max_value;
4158 else
4159 max = wi::abs (lh_ub);
4161 // If the range contains zero then we know that the minimum value in the
4162 // range will be zero.
4163 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4165 if (wi::gt_p (min, max, sign))
4166 max = min;
4167 min = wi::zero (prec);
4169 else
4171 // If the range was reversed, swap MIN and MAX.
4172 if (wi::gt_p (min, max, sign))
4173 std::swap (min, max);
4176 // If the new range has its limits swapped around (MIN > MAX), then
4177 // the operation caused one of them to wrap around. The only thing
4178 // we know is that the result is positive.
4179 if (wi::gt_p (min, max, sign))
4181 min = wi::zero (prec);
4182 max = max_value;
4184 r = int_range<1> (type, min, max);
4187 bool
4188 operator_abs::op1_range (irange &r, tree type,
4189 const irange &lhs,
4190 const irange &op2,
4191 relation_trio) const
4193 if (empty_range_varying (r, type, lhs, op2))
4194 return true;
4195 if (TYPE_UNSIGNED (type))
4197 r = lhs;
4198 return true;
4200 // Start with the positives because negatives are an impossible result.
4201 int_range_max positives = range_positives (type);
4202 positives.intersect (lhs);
4203 r = positives;
4204 // Then add the negative of each pair:
4205 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4206 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4207 r.union_ (int_range<1> (type,
4208 -positives.upper_bound (i),
4209 -positives.lower_bound (i)));
4210 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4211 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4212 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4213 wide_int lb = lhs.lower_bound ();
4214 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4215 r.union_ (int_range<2> (type, lb, lb));
4216 return true;
4219 void
4220 operator_abs::update_bitmask (irange &r, const irange &lh,
4221 const irange &rh) const
4223 update_known_bitmask (r, ABS_EXPR, lh, rh);
4226 class operator_absu : public range_operator
4228 public:
4229 virtual void wi_fold (irange &r, tree type,
4230 const wide_int &lh_lb, const wide_int &lh_ub,
4231 const wide_int &rh_lb, const wide_int &rh_ub) const;
4232 virtual void update_bitmask (irange &r, const irange &lh,
4233 const irange &rh) const final override;
4234 } op_absu;
4236 void
4237 operator_absu::wi_fold (irange &r, tree type,
4238 const wide_int &lh_lb, const wide_int &lh_ub,
4239 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4240 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4242 wide_int new_lb, new_ub;
4244 // Pass through VR0 the easy cases.
4245 if (wi::ges_p (lh_lb, 0))
4247 new_lb = lh_lb;
4248 new_ub = lh_ub;
4250 else
4252 new_lb = wi::abs (lh_lb);
4253 new_ub = wi::abs (lh_ub);
4255 // If the range contains zero then we know that the minimum
4256 // value in the range will be zero.
4257 if (wi::ges_p (lh_ub, 0))
4259 if (wi::gtu_p (new_lb, new_ub))
4260 new_ub = new_lb;
4261 new_lb = wi::zero (TYPE_PRECISION (type));
4263 else
4264 std::swap (new_lb, new_ub);
4267 gcc_checking_assert (TYPE_UNSIGNED (type));
4268 r = int_range<1> (type, new_lb, new_ub);
4271 void
4272 operator_absu::update_bitmask (irange &r, const irange &lh,
4273 const irange &rh) const
4275 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4279 bool
4280 operator_negate::fold_range (irange &r, tree type,
4281 const irange &lh,
4282 const irange &rh,
4283 relation_trio) const
4285 if (empty_range_varying (r, type, lh, rh))
4286 return true;
4287 // -X is simply 0 - X.
4288 return range_op_handler (MINUS_EXPR).fold_range (r, type,
4289 range_zero (type), lh);
4292 bool
4293 operator_negate::op1_range (irange &r, tree type,
4294 const irange &lhs,
4295 const irange &op2,
4296 relation_trio) const
4298 // NEGATE is involutory.
4299 return fold_range (r, type, lhs, op2);
4303 bool
4304 operator_addr_expr::fold_range (irange &r, tree type,
4305 const irange &lh,
4306 const irange &rh,
4307 relation_trio) const
4309 if (empty_range_varying (r, type, lh, rh))
4310 return true;
4312 // Return a non-null pointer of the LHS type (passed in op2).
4313 if (lh.zero_p ())
4314 r = range_zero (type);
4315 else if (!contains_zero_p (lh))
4316 r = range_nonzero (type);
4317 else
4318 r.set_varying (type);
4319 return true;
4322 bool
4323 operator_addr_expr::op1_range (irange &r, tree type,
4324 const irange &lhs,
4325 const irange &op2,
4326 relation_trio) const
4328 if (empty_range_varying (r, type, lhs, op2))
4329 return true;
4331 // Return a non-null pointer of the LHS type (passed in op2), but only
4332 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4333 // See PR 111009.
4334 if (!contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4335 r = range_nonzero (type);
4336 else
4337 r.set_varying (type);
4338 return true;
4341 // Initialize any integral operators to the primary table
4343 void
4344 range_op_table::initialize_integral_ops ()
4346 set (TRUNC_DIV_EXPR, op_trunc_div);
4347 set (FLOOR_DIV_EXPR, op_floor_div);
4348 set (ROUND_DIV_EXPR, op_round_div);
4349 set (CEIL_DIV_EXPR, op_ceil_div);
4350 set (EXACT_DIV_EXPR, op_exact_div);
4351 set (LSHIFT_EXPR, op_lshift);
4352 set (RSHIFT_EXPR, op_rshift);
4353 set (TRUTH_AND_EXPR, op_logical_and);
4354 set (TRUTH_OR_EXPR, op_logical_or);
4355 set (TRUNC_MOD_EXPR, op_trunc_mod);
4356 set (TRUTH_NOT_EXPR, op_logical_not);
4357 set (IMAGPART_EXPR, op_unknown);
4358 set (REALPART_EXPR, op_unknown);
4359 set (ABSU_EXPR, op_absu);
4360 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4361 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4362 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4363 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4367 #if CHECKING_P
4368 #include "selftest.h"
4370 namespace selftest
4372 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4373 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4374 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4375 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4376 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4377 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4379 static void
4380 range_op_cast_tests ()
4382 int_range<2> r0, r1, r2, rold;
4383 r0.set_varying (integer_type_node);
4384 wide_int maxint = r0.upper_bound ();
4386 // If a range is in any way outside of the range for the converted
4387 // to range, default to the range for the new type.
4388 r0.set_varying (short_integer_type_node);
4389 wide_int minshort = r0.lower_bound ();
4390 wide_int maxshort = r0.upper_bound ();
4391 if (TYPE_PRECISION (integer_type_node)
4392 > TYPE_PRECISION (short_integer_type_node))
4394 r1 = int_range<1> (integer_type_node,
4395 wi::zero (TYPE_PRECISION (integer_type_node)),
4396 maxint);
4397 range_cast (r1, short_integer_type_node);
4398 ASSERT_TRUE (r1.lower_bound () == minshort
4399 && r1.upper_bound() == maxshort);
4402 // (unsigned char)[-5,-1] => [251,255].
4403 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4404 range_cast (r0, unsigned_char_type_node);
4405 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4406 UCHAR (251), UCHAR (255)));
4407 range_cast (r0, signed_char_type_node);
4408 ASSERT_TRUE (r0 == rold);
4410 // (signed char)[15, 150] => [-128,-106][15,127].
4411 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4412 range_cast (r0, signed_char_type_node);
4413 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4414 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4415 r1.union_ (r2);
4416 ASSERT_TRUE (r1 == r0);
4417 range_cast (r0, unsigned_char_type_node);
4418 ASSERT_TRUE (r0 == rold);
4420 // (unsigned char)[-5, 5] => [0,5][251,255].
4421 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4422 range_cast (r0, unsigned_char_type_node);
4423 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4424 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4425 r1.union_ (r2);
4426 ASSERT_TRUE (r0 == r1);
4427 range_cast (r0, signed_char_type_node);
4428 ASSERT_TRUE (r0 == rold);
4430 // (unsigned char)[-5,5] => [0,5][251,255].
4431 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4432 range_cast (r0, unsigned_char_type_node);
4433 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4434 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4435 ASSERT_TRUE (r0 == r1);
4437 // (unsigned char)[5U,1974U] => [0,255].
4438 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4439 range_cast (r0, unsigned_char_type_node);
4440 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4441 range_cast (r0, integer_type_node);
4442 // Going to a wider range should not sign extend.
4443 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4445 // (unsigned char)[-350,15] => [0,255].
4446 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4447 range_cast (r0, unsigned_char_type_node);
4448 ASSERT_TRUE (r0 == (int_range<1>
4449 (unsigned_char_type_node,
4450 min_limit (unsigned_char_type_node),
4451 max_limit (unsigned_char_type_node))));
4453 // Casting [-120,20] from signed char to unsigned short.
4454 // => [0, 20][0xff88, 0xffff].
4455 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4456 range_cast (r0, short_unsigned_type_node);
4457 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4458 r2 = int_range<1> (short_unsigned_type_node,
4459 UINT16 (0xff88), UINT16 (0xffff));
4460 r1.union_ (r2);
4461 ASSERT_TRUE (r0 == r1);
4462 // A truncating cast back to signed char will work because [-120, 20]
4463 // is representable in signed char.
4464 range_cast (r0, signed_char_type_node);
4465 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4466 SCHAR (-120), SCHAR (20)));
4468 // unsigned char -> signed short
4469 // (signed short)[(unsigned char)25, (unsigned char)250]
4470 // => [(signed short)25, (signed short)250]
4471 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4472 range_cast (r0, short_integer_type_node);
4473 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4474 ASSERT_TRUE (r0 == r1);
4475 range_cast (r0, unsigned_char_type_node);
4476 ASSERT_TRUE (r0 == rold);
4478 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4479 r0 = int_range<1> (long_long_integer_type_node,
4480 min_limit (long_long_integer_type_node),
4481 max_limit (long_long_integer_type_node));
4482 range_cast (r0, short_unsigned_type_node);
4483 r1 = int_range<1> (short_unsigned_type_node,
4484 min_limit (short_unsigned_type_node),
4485 max_limit (short_unsigned_type_node));
4486 ASSERT_TRUE (r0 == r1);
4488 // Casting NONZERO to a narrower type will wrap/overflow so
4489 // it's just the entire range for the narrower type.
4491 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4492 // is outside of the range of a smaller range, return the full
4493 // smaller range.
4494 if (TYPE_PRECISION (integer_type_node)
4495 > TYPE_PRECISION (short_integer_type_node))
4497 r0 = range_nonzero (integer_type_node);
4498 range_cast (r0, short_integer_type_node);
4499 r1 = int_range<1> (short_integer_type_node,
4500 min_limit (short_integer_type_node),
4501 max_limit (short_integer_type_node));
4502 ASSERT_TRUE (r0 == r1);
4505 // Casting NONZERO from a narrower signed to a wider signed.
4507 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4508 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4509 r0 = range_nonzero (short_integer_type_node);
4510 range_cast (r0, integer_type_node);
4511 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4512 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4513 r1.union_ (r2);
4514 ASSERT_TRUE (r0 == r1);
4517 static void
4518 range_op_lshift_tests ()
4520 // Test that 0x808.... & 0x8.... still contains 0x8....
4521 // for a large set of numbers.
4523 int_range_max res;
4524 tree big_type = long_long_unsigned_type_node;
4525 unsigned big_prec = TYPE_PRECISION (big_type);
4526 // big_num = 0x808,0000,0000,0000
4527 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4528 wi::uhwi (48, big_prec));
4529 op_bitwise_and.fold_range (res, big_type,
4530 int_range <1> (big_type),
4531 int_range <1> (big_type, big_num, big_num));
4532 // val = 0x8,0000,0000,0000
4533 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4534 wi::uhwi (48, big_prec));
4535 ASSERT_TRUE (res.contains_p (val));
4538 if (TYPE_PRECISION (unsigned_type_node) > 31)
4540 // unsigned VARYING = op1 << 1 should be VARYING.
4541 int_range<2> lhs (unsigned_type_node);
4542 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4543 int_range_max op1;
4544 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4545 ASSERT_TRUE (op1.varying_p ());
4547 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4548 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4549 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4550 ASSERT_TRUE (op1.num_pairs () == 2);
4551 // Remove the [0,0] range.
4552 op1.intersect (zero);
4553 ASSERT_TRUE (op1.num_pairs () == 1);
4554 // op1 << 1 should be [0x8000,0x8000] << 1,
4555 // which should result in [0,0].
4556 int_range_max result;
4557 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4558 ASSERT_TRUE (result == zero);
4560 // signed VARYING = op1 << 1 should be VARYING.
4561 if (TYPE_PRECISION (integer_type_node) > 31)
4563 // unsigned VARYING = op1 << 1 should be VARYING.
4564 int_range<2> lhs (integer_type_node);
4565 int_range<2> shift (integer_type_node, INT (1), INT (1));
4566 int_range_max op1;
4567 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4568 ASSERT_TRUE (op1.varying_p ());
4570 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4571 int_range<2> zero (integer_type_node, INT (0), INT (0));
4572 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4573 ASSERT_TRUE (op1.num_pairs () == 2);
4574 // Remove the [0,0] range.
4575 op1.intersect (zero);
4576 ASSERT_TRUE (op1.num_pairs () == 1);
4577 // op1 << 1 should be [0x8000,0x8000] << 1,
4578 // which should result in [0,0].
4579 int_range_max result;
4580 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4581 ASSERT_TRUE (result == zero);
4585 static void
4586 range_op_rshift_tests ()
4588 // unsigned: [3, MAX] = OP1 >> 1
4590 int_range_max lhs (unsigned_type_node,
4591 UINT (3), max_limit (unsigned_type_node));
4592 int_range_max one (unsigned_type_node,
4593 wi::one (TYPE_PRECISION (unsigned_type_node)),
4594 wi::one (TYPE_PRECISION (unsigned_type_node)));
4595 int_range_max op1;
4596 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4597 ASSERT_FALSE (op1.contains_p (UINT (3)));
4600 // signed: [3, MAX] = OP1 >> 1
4602 int_range_max lhs (integer_type_node,
4603 INT (3), max_limit (integer_type_node));
4604 int_range_max one (integer_type_node, INT (1), INT (1));
4605 int_range_max op1;
4606 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4607 ASSERT_FALSE (op1.contains_p (INT (-2)));
4610 // This is impossible, so OP1 should be [].
4611 // signed: [MIN, MIN] = OP1 >> 1
4613 int_range_max lhs (integer_type_node,
4614 min_limit (integer_type_node),
4615 min_limit (integer_type_node));
4616 int_range_max one (integer_type_node, INT (1), INT (1));
4617 int_range_max op1;
4618 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4619 ASSERT_TRUE (op1.undefined_p ());
4622 // signed: ~[-1] = OP1 >> 31
4623 if (TYPE_PRECISION (integer_type_node) > 31)
4625 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4626 int_range_max shift (integer_type_node, INT (31), INT (31));
4627 int_range_max op1;
4628 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4629 int_range_max negatives = range_negatives (integer_type_node);
4630 negatives.intersect (op1);
4631 ASSERT_TRUE (negatives.undefined_p ());
4635 static void
4636 range_op_bitwise_and_tests ()
4638 int_range_max res;
4639 wide_int min = min_limit (integer_type_node);
4640 wide_int max = max_limit (integer_type_node);
4641 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4642 int_range_max i1 (integer_type_node, tiny, max);
4643 int_range_max i2 (integer_type_node, INT (255), INT (255));
4645 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4646 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4647 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4649 // VARYING = OP1 & 255: OP1 is VARYING
4650 i1 = int_range<1> (integer_type_node);
4651 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4652 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4654 // For 0 = x & MASK, x is ~MASK.
4656 int_range<2> zero (integer_type_node, INT (0), INT (0));
4657 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4658 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4659 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4660 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4663 // (NONZERO | X) is nonzero.
4664 i1.set_nonzero (integer_type_node);
4665 i2.set_varying (integer_type_node);
4666 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4667 ASSERT_TRUE (res.nonzero_p ());
4669 // (NEGATIVE | X) is nonzero.
4670 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4671 i2.set_varying (integer_type_node);
4672 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4673 ASSERT_FALSE (res.contains_p (INT (0)));
4676 static void
4677 range_relational_tests ()
4679 int_range<2> lhs (unsigned_char_type_node);
4680 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4681 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4683 // Never wrapping additions mean LHS > OP1.
4684 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4685 ASSERT_TRUE (code == VREL_GT);
4687 // Most wrapping additions mean nothing...
4688 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4689 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4690 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4691 ASSERT_TRUE (code == VREL_VARYING);
4693 // However, always wrapping additions mean LHS < OP1.
4694 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4695 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4696 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4697 ASSERT_TRUE (code == VREL_LT);
4700 void
4701 range_op_tests ()
4703 range_op_rshift_tests ();
4704 range_op_lshift_tests ();
4705 range_op_bitwise_and_tests ();
4706 range_op_cast_tests ();
4707 range_relational_tests ();
4709 extern void range_op_float_tests ();
4710 range_op_float_tests ();
4713 } // namespace selftest
4715 #endif // CHECKING_P